language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Textualize__textual | docs/examples/guide/widgets/counter02.py | {
"start": 140,
"end": 590
} | class ____(Static, can_focus=True):
"""A counter that can be incremented and decremented by pressing keys."""
BINDINGS = [
("up,k", "change_count(1)", "Increment"), # (1)!
("down,j", "change_count(-1)", "Decrement"),
]
count = reactive(0)
def render(self) -> RenderResult:
return f"Count: {self.count}"
def action_change_count(self, amount: int) -> None: # (2)!
self.count += amount
| Counter |
python | anthropics__anthropic-sdk-python | src/anthropic/types/citations_delta.py | {
"start": 903,
"end": 997
} | class ____(BaseModel):
citation: Citation
type: Literal["citations_delta"]
| CitationsDelta |
python | walkccc__LeetCode | solutions/303. Range Sum Query - Immutable/303.py | {
"start": 0,
"end": 224
} | class ____:
def __init__(self, nums: list[int]):
self.prefix = list(itertools.accumulate(nums, initial=0))
def sumRange(self, left: int, right: int) -> int:
return self.prefix[right + 1] - self.prefix[left]
| NumArray |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/expressions/struct.py | {
"start": 726,
"end": 5058
} | class ____(Expr):
class Name(IntEnum):
"""Internal and picklable representation of polars' `StructFunction`."""
FieldByName = auto()
RenameFields = auto()
PrefixFields = auto()
SuffixFields = auto()
JsonEncode = auto()
WithFields = auto() # TODO: https://github.com/rapidsai/cudf/issues/19284
MapFieldNames = auto() # TODO: https://github.com/rapidsai/cudf/issues/19285
FieldByIndex = auto()
MultipleFields = (
auto()
) # https://github.com/pola-rs/polars/pull/23022#issuecomment-2933910958
@classmethod
def from_polars(cls, obj: polars._expr_nodes.StructFunction) -> Self:
"""Convert from polars' `StructFunction`."""
try:
function, name = str(obj).split(".", maxsplit=1)
except ValueError:
# Failed to unpack string
function = None
if function != "StructFunction":
raise ValueError("StructFunction required")
return getattr(cls, name)
__slots__ = ("name", "options")
_non_child = ("dtype", "name", "options")
_supported_ops: ClassVar[set[Name]] = {
Name.FieldByName,
Name.RenameFields,
Name.PrefixFields,
Name.SuffixFields,
Name.JsonEncode,
}
def __init__(
self,
dtype: DataType,
name: StructFunction.Name,
options: tuple[Any, ...],
*children: Expr,
) -> None:
self.dtype = dtype
self.options = options
self.name = name
self.children = children
self.is_pointwise = True
if self.name not in self._supported_ops:
raise NotImplementedError(
f"Struct function {self.name}"
) # pragma: no cover
def do_evaluate(
self, df: DataFrame, *, context: ExecutionContext = ExecutionContext.FRAME
) -> Column:
"""Evaluate this expression given a dataframe for context."""
columns = [child.evaluate(df, context=context) for child in self.children]
(column,) = columns
# Type checker doesn't know polars only calls StructFunction with struct types
if self.name == StructFunction.Name.FieldByName:
field_index = next(
(
i
for i, field in enumerate(
cast(pl.Struct, self.children[0].dtype.polars_type).fields
)
if field.name == self.options[0]
),
None,
)
assert field_index is not None
return Column(
column.obj.children()[field_index],
dtype=self.dtype,
)
elif self.name == StructFunction.Name.JsonEncode:
# Once https://github.com/rapidsai/cudf/issues/19338 is implemented,
# we can use do this conversion on host.
buff = StringIO()
target = plc.io.SinkInfo([buff])
table = plc.Table(column.obj.children())
metadata = plc.io.TableWithMetadata(
table,
[
(field.name, [])
for field in cast(
pl.Struct, self.children[0].dtype.polars_type
).fields
],
)
options = (
plc.io.json.JsonWriterOptions.builder(target, table)
.lines(val=True)
.na_rep("null")
.include_nulls(val=True)
.metadata(metadata)
.utf8_escaped(val=False)
.build()
)
plc.io.json.write_json(options, stream=df.stream)
return Column(
plc.Column.from_iterable_of_py(
buff.getvalue().split(), stream=df.stream
),
dtype=self.dtype,
)
elif self.name in {
StructFunction.Name.RenameFields,
StructFunction.Name.PrefixFields,
StructFunction.Name.SuffixFields,
}:
return column
else:
raise NotImplementedError(
f"Struct function {self.name}"
) # pragma: no cover
| StructFunction |
python | joke2k__faker | tests/providers/test_geo.py | {
"start": 3857,
"end": 4324
} | class ____(unittest.TestCase):
"""Tests in addresses in the de_AT locale"""
def setUp(self):
self.fake = Faker("de_AT")
Faker.seed(0)
def test_local_latitude(self):
local_latitude = self.fake.local_latitude()
assert re.match(r"4[6-8]\.\d+", str(local_latitude))
def test_local_longitude(self):
local_longitude = self.fake.local_longitude()
assert re.match(r"1[1-5]\.\d+", str(local_longitude))
| TestDeAT |
python | apache__airflow | providers/sftp/tests/unit/sftp/triggers/test_sftp.py | {
"start": 1110,
"end": 8258
} | class ____:
def test_sftp_trigger_serialization(self):
"""
Asserts that the SFTPTrigger correctly serializes its arguments and classpath.
"""
trigger = SFTPTrigger(path="test/path/", sftp_conn_id="sftp_default", file_pattern="my_test_file")
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.sftp.triggers.sftp.SFTPTrigger"
assert kwargs == {
"path": "test/path/",
"file_pattern": "my_test_file",
"sftp_conn_id": "sftp_default",
"newer_than": None,
"poke_interval": 5.0,
}
@pytest.mark.asyncio
@pytest.mark.parametrize(
"newer_than",
["19700101053001", None],
)
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_files_and_attrs_by_pattern")
async def test_sftp_trigger_run_trigger_success_state(self, mock_get_files_by_pattern, newer_than):
"""
Assert that a TriggerEvent with a success status is yielded if a file
matching the pattern is returned by the hook
"""
mock_get_files_by_pattern.return_value = [
SFTPName("some_file", attrs=SFTPAttrs(mtime=1684244898)),
SFTPName("some_other_file"),
]
trigger = SFTPTrigger(
path="test/path/", sftp_conn_id="sftp_default", file_pattern="my_test_file", newer_than=newer_than
)
if newer_than:
expected_event = {"status": "success", "message": "Sensed 1 files: ['some_file']"}
else:
expected_event = {
"status": "success",
"message": "Sensed 2 files: ['some_file', 'some_other_file']",
}
generator = trigger.run()
actual_event = await generator.asend(None)
assert TriggerEvent(expected_event) == actual_event
@pytest.mark.asyncio
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_mod_time")
async def test_sftp_success_without_file_pattern(self, mock_mod_time):
"""
Test SFTPTrigger run method by mocking the file path and without file pattern,
assert that a TriggerEvent with a success status is yielded.
"""
mock_mod_time.return_value = "19700101053001"
trigger = SFTPTrigger(path="test/path/test.txt", sftp_conn_id="sftp_default", file_pattern="")
expected_event = {"status": "success", "message": "Sensed file: test/path/test.txt"}
generator = trigger.run()
actual_event = await generator.asend(None)
assert TriggerEvent(expected_event) == actual_event
@pytest.mark.asyncio
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_mod_time")
async def test_sftp_success_with_newer_then(self, mock_mod_time):
"""
Test SFTPTrigger run method by mocking the file path, without file pattern, and with newer then datetime
assert that a TriggerEvent with a success status is yielded.
"""
mock_mod_time.return_value = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
trigger = SFTPTrigger(
path="test/path/test.txt", sftp_conn_id="sftp_default", file_pattern="", newer_than=yesterday
)
expected_event = {"status": "success", "message": "Sensed file: test/path/test.txt"}
generator = trigger.run()
actual_event = await generator.asend(None)
assert TriggerEvent(expected_event) == actual_event
@pytest.mark.asyncio
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_files_and_attrs_by_pattern")
async def test_sftp_trigger_run_trigger_defer_state(
self,
mock_get_files_by_pattern,
):
"""
Assert that a the task does not complete,
indicating that the task needs to be deferred
"""
mock_get_files_by_pattern.return_value = [SFTPName("my_test_file.txt", attrs=SFTPAttrs(mtime=49129))]
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
trigger = SFTPTrigger(
path="test/path/", sftp_conn_id="sftp_default", file_pattern="my_test_file", newer_than=yesterday
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_mod_time")
async def test_sftp_with_newer_then_date_greater(self, mock_mod_time):
"""
Test the Trigger run method by passing full file path, without file pattern and along with newer then datetime.
mock the datetime as greater then the last modified date and make the trigger task in running
state and assert to success
"""
today_time = time.time()
mock_mod_time.return_value = datetime.date.fromtimestamp(today_time).strftime("%Y%m%d%H%M%S")
newer_then_time = datetime.datetime.now() + datetime.timedelta(hours=1)
trigger = SFTPTrigger(
path="test/path/test.txt",
sftp_conn_id="sftp_default",
file_pattern="",
newer_than=newer_then_time,
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_files_and_attrs_by_pattern")
async def test_sftp_trigger_run_trigger_failure_state(self, mock_get_files_by_pattern):
"""
Mock the hook to raise other than an AirflowException and assert that a TriggerEvent with a failure status
"""
mock_get_files_by_pattern.side_effect = Exception("An unexpected exception")
trigger = SFTPTrigger(path="test/path/", sftp_conn_id="sftp_default", file_pattern="my_test_file")
expected_event = {"status": "error", "message": "An unexpected exception"}
generator = trigger.run()
actual_event = await generator.asend(None)
assert TriggerEvent(expected_event) == actual_event
@pytest.mark.asyncio
@mock.patch("airflow.providers.sftp.hooks.sftp.SFTPHookAsync.get_files_and_attrs_by_pattern")
async def test_sftp_trigger_run_airflow_exception(self, mock_get_files_by_pattern):
"""
Assert that a the task does not complete if the hook raises an AirflowException,
indicating that the task needs to be deferred
"""
mock_get_files_by_pattern.side_effect = AirflowException("No files at path /test/path/ found...")
trigger = SFTPTrigger(path="/test/path/", sftp_conn_id="sftp_default", file_pattern="my_test_file")
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
| TestSFTPTrigger |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_length.py | {
"start": 763,
"end": 835
} | class ____:
def __len__(self):
x = 42
return x
| Length2 |
python | nedbat__coveragepy | tests/test_report_common.py | {
"start": 459,
"end": 6255
} | class ____(CoverageTest):
"""Check that reporting implicitly maps paths."""
def make_files(self, data: str, settings: bool = False) -> None:
"""Create the test files we need for line coverage."""
src = """\
if VER == 1:
print("line 2")
if VER == 2:
print("line 4")
if VER == 3:
print("line 6")
"""
self.make_file("src/program.py", src)
self.make_file("ver1/program.py", src)
self.make_file("ver2/program.py", src)
if data == "line":
self.make_data_file(
lines={
abs_file("ver1/program.py"): [1, 2, 3, 5],
abs_file("ver2/program.py"): [1, 3, 4, 5],
},
)
else:
self.make_data_file(
arcs={
abs_file("ver1/program.py"): arcz_to_arcs(".1 12 23 35 5."),
abs_file("ver2/program.py"): arcz_to_arcs(".1 13 34 45 5."),
},
)
if settings:
self.make_file(
".coveragerc",
"""\
[paths]
source =
src
ver1
ver2
""",
)
def test_map_paths_during_line_report_without_setting(self) -> None:
self.make_files(data="line")
cov = coverage.Coverage()
cov.load()
cov.report(show_missing=True)
expected = textwrap.dedent(
os_sep("""\
Name Stmts Miss Cover Missing
-----------------------------------------------
ver1/program.py 6 2 67% 4, 6
ver2/program.py 6 2 67% 2, 6
-----------------------------------------------
TOTAL 12 4 67%
""")
)
assert expected == self.stdout()
def test_map_paths_during_line_report(self) -> None:
self.make_files(data="line", settings=True)
cov = coverage.Coverage()
cov.load()
cov.report(show_missing=True)
expected = textwrap.dedent(
os_sep("""\
Name Stmts Miss Cover Missing
----------------------------------------------
src/program.py 6 1 83% 6
----------------------------------------------
TOTAL 6 1 83%
""")
)
assert expected == self.stdout()
def test_map_paths_during_branch_report_without_setting(self) -> None:
self.make_files(data="arcs")
cov = coverage.Coverage(branch=True)
cov.load()
cov.report(show_missing=True)
expected = textwrap.dedent(
os_sep("""\
Name Stmts Miss Branch BrPart Cover Missing
-------------------------------------------------------------
ver1/program.py 6 2 6 3 58% 1->3, 4, 6
ver2/program.py 6 2 6 3 58% 2, 3->5, 6
-------------------------------------------------------------
TOTAL 12 4 12 6 58%
""")
)
assert expected == self.stdout()
def test_map_paths_during_branch_report(self) -> None:
self.make_files(data="arcs", settings=True)
cov = coverage.Coverage(branch=True)
cov.load()
cov.report(show_missing=True)
expected = textwrap.dedent(
os_sep("""\
Name Stmts Miss Branch BrPart Cover Missing
------------------------------------------------------------
src/program.py 6 1 6 1 83% 6
------------------------------------------------------------
TOTAL 6 1 6 1 83%
""")
)
assert expected == self.stdout()
def test_map_paths_during_annotate(self) -> None:
self.make_files(data="line", settings=True)
cov = coverage.Coverage()
cov.load()
cov.annotate()
self.assert_exists(os_sep("src/program.py,cover"))
self.assert_doesnt_exist(os_sep("ver1/program.py,cover"))
self.assert_doesnt_exist(os_sep("ver2/program.py,cover"))
def test_map_paths_during_html_report(self) -> None:
self.make_files(data="line", settings=True)
cov = coverage.Coverage()
cov.load()
cov.html_report()
contains("htmlcov/index.html", os_sep("src / program.py"))
doesnt_contain("htmlcov/index.html", "ver1", "ver2")
def test_map_paths_during_xml_report(self) -> None:
self.make_files(data="line", settings=True)
cov = coverage.Coverage()
cov.load()
cov.xml_report()
contains("coverage.xml", "src/program.py")
doesnt_contain("coverage.xml", "ver1", "ver2")
def test_map_paths_during_json_report(self) -> None:
self.make_files(data="line", settings=True)
cov = coverage.Coverage()
cov.load()
cov.json_report()
def os_sepj(s: str) -> str:
return os_sep(s).replace("\\", r"\\")
contains("coverage.json", os_sepj("src/program.py"))
doesnt_contain("coverage.json", "ver1", "ver2")
def test_map_paths_during_lcov_report(self) -> None:
self.make_files(data="line", settings=True)
cov = coverage.Coverage()
cov.load()
cov.lcov_report()
contains("coverage.lcov", os_sep("src/program.py"))
doesnt_contain("coverage.lcov", "ver1", "ver2")
| ReportMapsPathsTest |
python | google__pytype | pytype/tools/tool_utils_test.py | {
"start": 775,
"end": 1346
} | class ____(unittest.TestCase):
"""Tests for tool_utils.makedirs_or_die()."""
def test_make(self):
with test_utils.Tempdir() as d:
subdir = path_utils.join(d.path, 'some/path')
tool_utils.makedirs_or_die(subdir, '')
self.assertTrue(path_utils.isdir(subdir))
def test_die(self):
with self.assertRaises(SystemExit):
if sys.platform == 'win32':
tool_utils.makedirs_or_die('C:/invalid:path', '')
else:
tool_utils.makedirs_or_die('/nonexistent/path', '')
if __name__ == '__main__':
unittest.main()
| TestMakeDirsOrDie |
python | getsentry__sentry | tests/sentry/uptime/autodetect/test_ranking.py | {
"start": 4610,
"end": 5269
} | class ____(UptimeTestCase):
def test(self) -> None:
assert get_candidate_projects_for_org(self.organization) == []
url_1 = "https://sentry.io"
url_2 = "https://sentry.sentry.io"
add_base_url_to_rank(self.project, url_1)
assert get_candidate_projects_for_org(self.organization) == [(self.project.id, 1)]
add_base_url_to_rank(self.project, url_2)
project_2 = self.create_project()
add_base_url_to_rank(project_2, url_2)
assert get_candidate_projects_for_org(self.organization) == [
(self.project.id, 2),
(project_2.id, 1),
]
| GetCandidateProjectsForOrgTest |
python | pypa__hatch | tests/backend/builders/test_wheel.py | {
"start": 141078,
"end": 149849
} | class ____:
def test_single_sbom_file(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
with temp_dir.as_cwd():
result = hatch("new", "My.App")
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
sbom_file = project_path / "my-sbom.spdx.json"
sbom_file.write_text('{"spdxVersion": "SPDX-2.3"}')
config = {
"project": {"name": "My.App", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"sbom-files": ["my-sbom.spdx.json"]}}},
}
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(artifacts[0]), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_sbom",
"My.App",
metadata_directory=metadata_directory,
sbom_files=[("my-sbom.spdx.json", '{"spdxVersion": "SPDX-2.3"}')],
)
helpers.assert_files(extraction_directory, expected_files)
def test_multiple_sbom_files(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
with temp_dir.as_cwd():
result = hatch("new", "My.App")
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
(project_path / "sbom1.spdx.json").write_text('{"spdxVersion": "SPDX-2.3"}')
(project_path / "sbom2.cyclonedx.json").write_text('{"bomFormat": "CycloneDX"}')
config = {
"project": {"name": "My.App", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"sbom-files": ["sbom1.spdx.json", "sbom2.cyclonedx.json"]}}},
}
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(artifacts[0]), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_sbom",
"My.App",
metadata_directory=metadata_directory,
sbom_files=[
("sbom1.spdx.json", '{"spdxVersion": "SPDX-2.3"}'),
("sbom2.cyclonedx.json", '{"bomFormat": "CycloneDX"}'),
],
)
helpers.assert_files(extraction_directory, expected_files)
def test_nested_sbom_file(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
with temp_dir.as_cwd():
result = hatch("new", "My.App")
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
sbom_dir = project_path / "sboms"
sbom_dir.mkdir()
(sbom_dir / "vendor.spdx.json").write_text('{"spdxVersion": "SPDX-2.3"}')
config = {
"project": {"name": "My.App", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"sbom-files": ["sboms/vendor.spdx.json"]}}},
}
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(artifacts[0]), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_sbom",
"My.App",
metadata_directory=metadata_directory,
sbom_files=[("vendor.spdx.json", '{"spdxVersion": "SPDX-2.3"}')],
)
helpers.assert_files(extraction_directory, expected_files)
def test_sbom_files_invalid_type(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1"},
"tool": {"hatch": {"build": {"targets": {"wheel": {"sbom-files": "not-a-list"}}}}},
}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.wheel.sbom-files` must be an array"):
_ = builder.config.sbom_files
def test_sbom_file_invalid_item(self, isolation):
config = {
"project": {"name": "my-app", "version": "0.0.1"},
"tool": {"hatch": {"build": {"targets": {"wheel": {"sbom-files": [123]}}}}},
}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
TypeError, match="SBOM file #1 in field `tool.hatch.build.targets.wheel.sbom-files` must be a string"
):
_ = builder.config.sbom_files
def test_sbom_from_build_data(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["tests"] = False
config_file.save()
with temp_dir.as_cwd():
result = hatch("new", "My.App")
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
(project_path / "sbom1.cyclonedx.json").write_text('{"bomFormat": "CycloneDX"}')
(project_path / "sbom2.spdx.json").write_text('{"spdxVersion": "SPDX-2.3"}')
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data["sbom_files"].append("sbom2.spdx.json")
"""
)
)
config = {
"project": {"name": "My.App", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {
"targets": {"wheel": {"sbom-files": ["sbom1.cyclonedx.json"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
}
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(artifacts[0]), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_sbom",
"My.App",
metadata_directory=metadata_directory,
sbom_files=[
("sbom1.cyclonedx.json", '{"bomFormat": "CycloneDX"}'),
("sbom2.spdx.json", '{"spdxVersion": "SPDX-2.3"}'),
],
)
helpers.assert_files(extraction_directory, expected_files)
| TestSBOMFiles |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_backfills.py | {
"start": 21347,
"end": 26317
} | class ____(TestBackfillEndpoint):
@pytest.mark.parametrize(
("reprocess_behavior", "expected_dates"),
[
(
"none",
[
{"logical_date": "2024-01-01T00:00:00Z"},
{"logical_date": "2024-01-04T00:00:00Z"},
{"logical_date": "2024-01-05T00:00:00Z"},
],
),
(
"failed",
[
{"logical_date": "2024-01-01T00:00:00Z"},
{"logical_date": "2024-01-03T00:00:00Z"}, # Reprocess failed
{"logical_date": "2024-01-04T00:00:00Z"},
{"logical_date": "2024-01-05T00:00:00Z"},
],
),
(
"completed",
[
{"logical_date": "2024-01-01T00:00:00Z"},
{"logical_date": "2024-01-02T00:00:00Z"}, # Reprocess all
{"logical_date": "2024-01-03T00:00:00Z"},
{"logical_date": "2024-01-04T00:00:00Z"},
{"logical_date": "2024-01-05T00:00:00Z"},
],
),
],
)
def test_create_backfill_dry_run(
self, session, dag_maker, test_client, reprocess_behavior, expected_dates
):
with dag_maker(
session=session,
dag_id="TEST_DAG_2",
schedule="0 0 * * *",
start_date=pendulum.parse("2024-01-01"),
) as dag:
EmptyOperator(task_id="mytask")
session.commit()
existing_dagruns = [
{"logical_date": pendulum.parse("2024-01-02"), "state": DagRunState.SUCCESS}, # Completed dag run
{"logical_date": pendulum.parse("2024-01-03"), "state": DagRunState.FAILED}, # Failed dag run
]
for dagrun in existing_dagruns:
session.add(
DagRun(
dag_id=dag.dag_id,
run_id=f"manual__{dagrun['logical_date'].isoformat()}",
logical_date=dagrun["logical_date"],
state=dagrun["state"],
run_type="scheduled",
)
)
session.commit()
from_date = pendulum.parse("2024-01-01")
from_date_iso = to_iso(from_date)
to_date = pendulum.parse("2024-01-05")
to_date_iso = to_iso(to_date)
data = {
"dag_id": dag.dag_id,
"from_date": from_date_iso,
"to_date": to_date_iso,
"max_active_runs": 5,
"run_backwards": False,
"dag_run_conf": {"param1": "val1", "param2": True},
"reprocess_behavior": reprocess_behavior,
}
response = test_client.post(
url="/backfills/dry_run",
json=data,
)
assert response.status_code == 200
response_json = response.json()
assert response_json["backfills"] == expected_dates
@pytest.mark.parametrize(
("repro_act", "repro_exp", "run_backwards", "status_code"),
[
("none", ReprocessBehavior.NONE, False, 422),
("completed", ReprocessBehavior.COMPLETED, False, 200),
("completed", ReprocessBehavior.COMPLETED, True, 422),
],
)
def test_create_backfill_dry_run_with_depends_on_past(
self, repro_act, repro_exp, run_backwards, status_code, session, dag_maker, test_client
):
with dag_maker(session=session, dag_id="TEST_DAG_1", schedule="0 * * * *") as dag:
EmptyOperator(task_id="mytask", depends_on_past=True)
session.query(DagModel).all()
session.commit()
from_date = pendulum.parse("2024-01-01")
from_date_iso = to_iso(from_date)
to_date = pendulum.parse("2024-02-01")
to_date_iso = to_iso(to_date)
max_active_runs = 5
data = {
"dag_id": dag.dag_id,
"from_date": f"{from_date_iso}",
"to_date": f"{to_date_iso}",
"max_active_runs": max_active_runs,
"run_backwards": run_backwards,
"dag_run_conf": {"param1": "val1", "param2": True},
"reprocess_behavior": repro_act,
}
response = test_client.post(
url="/backfills/dry_run",
json=data,
)
assert response.status_code == status_code
if response.status_code != 200:
if run_backwards:
assert (
response.json().get("detail")
== "Backfill cannot be run in reverse when the DAG has tasks where depends_on_past=True."
)
else:
assert (
response.json().get("detail")
== "DAG has tasks for which depends_on_past=True. You must set reprocess behavior to reprocess completed or reprocess failed."
)
| TestCreateBackfillDryRun |
python | great-expectations__great_expectations | tests/metrics/test_metric.py | {
"start": 2001,
"end": 2960
} | class ____:
@pytest.mark.unit
def test_success(self):
expected_config = MetricConfiguration(
metric_name=FULLY_QUALIFIED_METRIC_NAME,
metric_domain_kwargs={
"batch_id": BATCH_ID,
"row_condition": None,
"condition_parser": None,
"column": COLUMN,
},
metric_value_kwargs={
"min_value": 42,
"strict_min": False,
},
)
actual_config = ColumnValuesAbove(
column=COLUMN,
min_value=42,
).config(batch_id=BATCH_ID)
assert actual_config.metric_name == expected_config.metric_name
assert actual_config.metric_domain_kwargs == expected_config.metric_domain_kwargs
assert actual_config.metric_value_kwargs == expected_config.metric_value_kwargs
assert isinstance(actual_config.id, MetricConfigurationID)
| TestMetricConfig |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/callable.py | {
"start": 0,
"end": 277
} | class ____:
"""A callable object that behaves like a function."""
def __call__(self, arg1, arg2, **kwargs):
pass
def method(self, arg1, arg2):
"""docstring of Callable.method()."""
pass
function = Callable()
method = function.method
| Callable |
python | spack__spack | lib/spack/spack/version/common.py | {
"start": 811,
"end": 901
} | class ____(VersionError):
"""Raised for version checksum errors."""
| VersionChecksumError |
python | PyCQA__pylint | tests/functional/m/member/member_checks_hints.py | {
"start": 332,
"end": 670
} | class ____(Parent):
def __init__(self):
super().__init__()
self._similar # [no-member]
self._really_similar # [no-member]
self._paren # [no-member]
# Distance is too big
self._registryyyy # [no-member]
# Nothing close.
self._pretty_sure_this_wont_match # [no-member]
| Child |
python | apache__airflow | airflow-core/src/airflow/cli/cli_parser.py | {
"start": 4577,
"end": 7137
} | class ____(RawTextRichHelpFormatter):
"""
Custom help formatter to display help message.
It resolves lazy help string before printing it using rich.
"""
def add_argument(self, action: Action) -> None:
if isinstance(action.help, lazy_object_proxy.Proxy):
action.help = str(action.help)
return super().add_argument(action)
@cache
def get_parser(dag_parser: bool = False) -> argparse.ArgumentParser:
"""Create and returns command line argument parser."""
parser = DefaultHelpParser(prog="airflow", formatter_class=AirflowHelpFormatter)
subparsers = parser.add_subparsers(dest="subcommand", metavar="GROUP_OR_COMMAND")
subparsers.required = True
command_dict = DAG_CLI_DICT if dag_parser else ALL_COMMANDS_DICT
for _, sub in sorted(command_dict.items()):
_add_command(subparsers, sub)
return parser
def _sort_args(args: Iterable[Arg]) -> Iterable[Arg]:
"""Sort subcommand optional args, keep positional args."""
def get_long_option(arg: Arg):
"""Get long option from Arg.flags."""
return arg.flags[0] if len(arg.flags) == 1 else arg.flags[1]
positional, optional = partition(lambda x: x.flags[0].startswith("-"), args)
yield from positional
yield from sorted(optional, key=lambda x: get_long_option(x).lower())
def _add_command(subparsers: argparse._SubParsersAction, sub: CLICommand) -> None:
if isinstance(sub, ActionCommand) and sub.hide:
sub_proc = subparsers.add_parser(sub.name, epilog=sub.epilog)
else:
sub_proc = subparsers.add_parser(
sub.name, help=sub.help, description=sub.description or sub.help, epilog=sub.epilog
)
sub_proc.formatter_class = LazyRichHelpFormatter
if isinstance(sub, GroupCommand):
_add_group_command(sub, sub_proc)
elif isinstance(sub, ActionCommand):
_add_action_command(sub, sub_proc)
else:
raise AirflowException("Invalid command definition.")
def _add_action_command(sub: ActionCommand, sub_proc: argparse.ArgumentParser) -> None:
for arg in _sort_args(sub.args):
arg.add_to_parser(sub_proc)
sub_proc.set_defaults(func=sub.func)
def _add_group_command(sub: GroupCommand, sub_proc: argparse.ArgumentParser) -> None:
subcommands = sub.subcommands
sub_subparsers = sub_proc.add_subparsers(dest="subcommand", metavar="COMMAND")
sub_subparsers.required = True
for command in sorted(subcommands, key=lambda x: x.name):
_add_command(sub_subparsers, command)
| LazyRichHelpFormatter |
python | kevin1024__vcrpy | vcr/stubs/__init__.py | {
"start": 13610,
"end": 13827
} | class ____(VCRConnection):
"""A Mocked class for HTTP requests"""
_baseclass = HTTPConnection
_protocol = "http"
debuglevel = _baseclass.debuglevel
_http_vsn = _baseclass._http_vsn
| VCRHTTPConnection |
python | kubernetes-client__python | kubernetes/client/models/v1_cluster_role_binding_list.py | {
"start": 383,
"end": 7095
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ClusterRoleBinding]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ClusterRoleBindingList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ClusterRoleBindingList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ClusterRoleBindingList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ClusterRoleBindingList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ClusterRoleBindingList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ClusterRoleBindingList. # noqa: E501
Items is a list of ClusterRoleBindings # noqa: E501
:return: The items of this V1ClusterRoleBindingList. # noqa: E501
:rtype: list[V1ClusterRoleBinding]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ClusterRoleBindingList.
Items is a list of ClusterRoleBindings # noqa: E501
:param items: The items of this V1ClusterRoleBindingList. # noqa: E501
:type: list[V1ClusterRoleBinding]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ClusterRoleBindingList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ClusterRoleBindingList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ClusterRoleBindingList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ClusterRoleBindingList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ClusterRoleBindingList. # noqa: E501
:return: The metadata of this V1ClusterRoleBindingList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ClusterRoleBindingList.
:param metadata: The metadata of this V1ClusterRoleBindingList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ClusterRoleBindingList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ClusterRoleBindingList):
return True
return self.to_dict() != other.to_dict()
| V1ClusterRoleBindingList |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_serialize_as_any.py | {
"start": 235,
"end": 16452
} | class ____(ParentModel):
y: str
ParentModel.__pydantic_core_schema__ = core_schema.model_schema(
ParentModel,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(core_schema.int_schema()),
}
),
ref='ParentModel',
)
ParentModel.__pydantic_validator__ = SchemaValidator(ParentModel.__pydantic_core_schema__)
ParentModel.__pydantic_serializer__ = SchemaSerializer(ParentModel.__pydantic_core_schema__)
ChildModel.__pydantic_core_schema__ = core_schema.model_schema(
ChildModel,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(core_schema.int_schema()),
'y': core_schema.model_field(core_schema.str_schema()),
}
),
)
ChildModel.__pydantic_validator__ = SchemaValidator(ChildModel.__pydantic_core_schema__)
ChildModel.__pydantic_serializer__ = SchemaSerializer(ChildModel.__pydantic_core_schema__)
def test_serialize_as_any_with_models() -> None:
child = ChildModel.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
assert ParentModel.__pydantic_serializer__.to_python(child, serialize_as_any=False) == {'x': 1}
assert ParentModel.__pydantic_serializer__.to_python(child, serialize_as_any=True) == {
'x': 1,
'y': 'hopefully not a secret',
}
def test_serialize_as_any_with_dataclass() -> None:
@dataclass
class Parent:
x: int
class Child(Parent):
y: str
Parent.__pydantic_core_schema__ = core_schema.dataclass_schema(
Parent,
core_schema.dataclass_args_schema(
'Parent',
[
core_schema.dataclass_field(name='x', schema=core_schema.int_schema()),
],
),
['x'],
)
Parent.__pydantic_validator__ = SchemaValidator(Parent.__pydantic_core_schema__)
Parent.__pydantic_serializer__ = SchemaSerializer(Parent.__pydantic_core_schema__)
Child.__pydantic_core_schema__ = core_schema.dataclass_schema(
Child,
core_schema.dataclass_args_schema(
'Child',
[
core_schema.dataclass_field(name='x', schema=core_schema.int_schema()),
core_schema.dataclass_field(name='y', schema=core_schema.str_schema()),
],
),
['x', 'y'],
)
Child.__pydantic_validator__ = SchemaValidator(Child.__pydantic_core_schema__)
Child.__pydantic_serializer__ = SchemaSerializer(Child.__pydantic_core_schema__)
child = Child.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
assert Parent.__pydantic_serializer__.to_python(child, serialize_as_any=False) == {'x': 1}
assert Parent.__pydantic_serializer__.to_python(child, serialize_as_any=True) == {
'x': 1,
'y': 'hopefully not a secret',
}
def test_serialize_as_any_with_typeddict() -> None:
class Parent(TypedDict):
x: int
class Child(Parent):
y: str
Parent.__pydantic_core_schema__ = core_schema.typed_dict_schema(
{
'x': core_schema.typed_dict_field(core_schema.int_schema()),
}
)
Parent.__pydantic_validator__ = SchemaValidator(Parent.__pydantic_core_schema__)
Parent.__pydantic_serializer__ = SchemaSerializer(Parent.__pydantic_core_schema__)
Child.__pydantic_core_schema__ = core_schema.typed_dict_schema(
{
'x': core_schema.typed_dict_field(core_schema.int_schema()),
'y': core_schema.typed_dict_field(core_schema.str_schema()),
}
)
Child.__pydantic_validator__ = SchemaValidator(Child.__pydantic_core_schema__)
Child.__pydantic_serializer__ = SchemaSerializer(Child.__pydantic_core_schema__)
child = Child.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
assert Parent.__pydantic_serializer__.to_python(child, serialize_as_any=False) == {'x': 1}
assert Parent.__pydantic_serializer__.to_python(child, serialize_as_any=True) == {
'x': 1,
'y': 'hopefully not a secret',
}
def test_serialize_as_any_with_unrelated_models() -> None:
class Parent:
x: int
class Other:
y: str
Parent.__pydantic_core_schema__ = core_schema.model_schema(
Parent,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(core_schema.int_schema()),
}
),
)
Parent.__pydantic_validator__ = SchemaValidator(Parent.__pydantic_core_schema__)
Parent.__pydantic_serializer__ = SchemaSerializer(Parent.__pydantic_core_schema__)
Other.__pydantic_core_schema__ = core_schema.model_schema(
Other,
core_schema.model_fields_schema(
{
'y': core_schema.model_field(core_schema.str_schema()),
}
),
config=core_schema.CoreConfig(extra_fields_behavior='allow'),
)
Other.__pydantic_validator__ = SchemaValidator(Other.__pydantic_core_schema__)
Other.__pydantic_serializer__ = SchemaSerializer(Other.__pydantic_core_schema__)
other = Other.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
assert Parent.__pydantic_serializer__.to_python(other, serialize_as_any=False) == {}
# note, without extra='allow', the 'x' field would not be included, as it's not in the schema
assert Parent.__pydantic_serializer__.to_python(other, serialize_as_any=True) == {
'x': 1,
'y': 'hopefully not a secret',
}
def test_serialize_as_any_with_nested_models() -> None:
class Parent:
x: int
class Other(Parent):
y: str
class Outer:
p: Parent
Parent.__pydantic_core_schema__ = core_schema.model_schema(
Parent,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(core_schema.int_schema()),
}
),
ref='Parent',
)
Parent.__pydantic_validator__ = SchemaValidator(Parent.__pydantic_core_schema__)
Parent.__pydantic_serializer__ = SchemaSerializer(Parent.__pydantic_core_schema__)
Other.__pydantic_core_schema__ = core_schema.model_schema(
Other,
core_schema.model_fields_schema(
{
'x': core_schema.model_field(core_schema.int_schema()),
'y': core_schema.model_field(core_schema.str_schema()),
}
),
config=core_schema.CoreConfig(extra_fields_behavior='allow'),
)
Other.__pydantic_validator__ = SchemaValidator(Other.__pydantic_core_schema__)
Other.__pydantic_serializer__ = SchemaSerializer(Other.__pydantic_core_schema__)
Outer.__pydantic_core_schema__ = core_schema.definitions_schema(
core_schema.model_schema(
Outer,
core_schema.model_fields_schema(
{
'p': core_schema.model_field(core_schema.definition_reference_schema('Parent')),
}
),
),
[
Parent.__pydantic_core_schema__,
],
)
Outer.__pydantic_validator__ = SchemaValidator(Outer.__pydantic_core_schema__)
Outer.__pydantic_serializer__ = SchemaSerializer(Outer.__pydantic_core_schema__)
other = Other.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
outer = Outer()
outer.p = other
assert Outer.__pydantic_serializer__.to_python(outer, serialize_as_any=False) == {
'p': {'x': 1},
}
assert Outer.__pydantic_serializer__.to_python(outer, serialize_as_any=True) == {
'p': {
'x': 1,
'y': 'hopefully not a secret',
}
}
assert Outer.__pydantic_serializer__.to_json(outer, serialize_as_any=False) == b'{"p":{"x":1}}'
assert (
Outer.__pydantic_serializer__.to_json(outer, serialize_as_any=True)
== b'{"p":{"x":1,"y":"hopefully not a secret"}}'
)
def test_serialize_with_recursive_models() -> None:
class Node:
next: Optional['Node'] = None
value: int = 42
schema = core_schema.definitions_schema(
core_schema.definition_reference_schema('Node'),
[
core_schema.model_schema(
Node,
core_schema.model_fields_schema(
{
'value': core_schema.model_field(
core_schema.with_default_schema(core_schema.int_schema(), default=42)
),
'next': core_schema.model_field(
core_schema.with_default_schema(
core_schema.nullable_schema(core_schema.definition_reference_schema('Node')),
default=None,
)
),
}
),
ref='Node',
)
],
)
Node.__pydantic_core_schema__ = schema
Node.__pydantic_validator__ = SchemaValidator(Node.__pydantic_core_schema__)
Node.__pydantic_serializer__ = SchemaSerializer(Node.__pydantic_core_schema__)
other = Node.__pydantic_validator__.validate_python({'next': {'value': 4}})
assert Node.__pydantic_serializer__.to_python(other, serialize_as_any=False) == {
'next': {'next': None, 'value': 4},
'value': 42,
}
assert Node.__pydantic_serializer__.to_python(other, serialize_as_any=True) == {
'next': {'next': None, 'value': 4},
'value': 42,
}
def test_serialize_as_any_with_root_model_and_subclasses() -> None:
class RModel:
root: ParentModel
RModel.__pydantic_core_schema__ = core_schema.model_schema(
RModel,
ParentModel.__pydantic_core_schema__,
root_model=True,
)
RModel.__pydantic_validator__ = SchemaValidator(RModel.__pydantic_core_schema__)
RModel.__pydantic_serializer__ = SchemaSerializer(RModel.__pydantic_core_schema__)
value = RModel.__pydantic_validator__.validate_python({'x': 1})
value.root = ChildModel.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
assert RModel.__pydantic_serializer__.to_python(value, serialize_as_any=False) == {'x': 1}
assert RModel.__pydantic_serializer__.to_python(value, serialize_as_any=True) == {
'x': 1,
'y': 'hopefully not a secret',
}
assert RModel.__pydantic_serializer__.to_json(value, serialize_as_any=False) == b'{"x":1}'
assert (
RModel.__pydantic_serializer__.to_json(value, serialize_as_any=True) == b'{"x":1,"y":"hopefully not a secret"}'
)
def test_serialize_with_custom_type_and_subclasses():
class CustomType:
value: ParentModel
CustomType.__pydantic_core_schema__ = core_schema.model_schema(
CustomType,
core_schema.model_fields_schema(
{
'value': core_schema.model_field(ParentModel.__pydantic_core_schema__),
}
),
)
CustomType.__pydantic_validator__ = SchemaValidator(CustomType.__pydantic_core_schema__)
CustomType.__pydantic_serializer__ = SchemaSerializer(CustomType.__pydantic_core_schema__)
value = CustomType.__pydantic_validator__.validate_python({'value': {'x': 1}})
value.value = ChildModel.__pydantic_validator__.validate_python({'x': 1, 'y': 'hopefully not a secret'})
assert CustomType.__pydantic_serializer__.to_python(value, serialize_as_any=False) == {
'value': {'x': 1},
}
assert CustomType.__pydantic_serializer__.to_python(value, serialize_as_any=True) == {
'value': {'x': 1, 'y': 'hopefully not a secret'}
}
assert CustomType.__pydantic_serializer__.to_json(value, serialize_as_any=False) == b'{"value":{"x":1}}'
assert (
CustomType.__pydantic_serializer__.to_json(value, serialize_as_any=True)
== b'{"value":{"x":1,"y":"hopefully not a secret"}}'
)
def test_serialize_as_any_wrap_serializer_applied_once() -> None:
# https://github.com/pydantic/pydantic/issues/11139
class InnerModel:
an_inner_field: int
InnerModel.__pydantic_core_schema__ = core_schema.model_schema(
InnerModel,
core_schema.model_fields_schema({'an_inner_field': core_schema.model_field(core_schema.int_schema())}),
)
InnerModel.__pydantic_validator__ = SchemaValidator(InnerModel.__pydantic_core_schema__)
InnerModel.__pydantic_serializer__ = SchemaSerializer(InnerModel.__pydantic_core_schema__)
class MyModel:
a_field: InnerModel
def a_model_serializer(self, handler, info):
return {k + '_wrapped': v for k, v in handler(self).items()}
MyModel.__pydantic_core_schema__ = core_schema.model_schema(
MyModel,
core_schema.model_fields_schema({'a_field': core_schema.model_field(InnerModel.__pydantic_core_schema__)}),
serialization=core_schema.wrap_serializer_function_ser_schema(
MyModel.a_model_serializer,
info_arg=True,
),
)
MyModel.__pydantic_validator__ = SchemaValidator(MyModel.__pydantic_core_schema__)
MyModel.__pydantic_serializer__ = SchemaSerializer(MyModel.__pydantic_core_schema__)
instance = MyModel.__pydantic_validator__.validate_python({'a_field': {'an_inner_field': 1}})
assert MyModel.__pydantic_serializer__.to_python(instance, serialize_as_any=True) == {
'a_field_wrapped': {'an_inner_field': 1},
}
@pytest.fixture(params=['model', 'dataclass'])
def container_schema_builder(
request: pytest.FixtureRequest,
) -> Callable[[dict[str, core_schema.CoreSchema]], core_schema.CoreSchema]:
if request.param == 'model':
return lambda fields: core_schema.model_schema(
cls=type('Test', (), {}),
schema=core_schema.model_fields_schema(
fields={k: core_schema.model_field(schema=v) for k, v in fields.items()},
),
)
elif request.param == 'dataclass':
return lambda fields: core_schema.dataclass_schema(
cls=dataclass(type('Test', (), {})),
schema=core_schema.dataclass_args_schema(
'Test',
fields=[core_schema.dataclass_field(name=k, schema=v) for k, v in fields.items()],
),
fields=[k for k in fields.keys()],
)
else:
raise ValueError(f'Unknown container type {request.param}')
def test_serialize_as_any_with_field_serializer(container_schema_builder) -> None:
# https://github.com/pydantic/pydantic/issues/12379
schema = container_schema_builder(
{
'value': core_schema.int_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
lambda model, v: v * 2, is_field_serializer=True
)
)
}
)
v = SchemaValidator(schema).validate_python({'value': 123})
cls = type(v)
s = SchemaSerializer(schema)
# necessary to ensure that type inference will pick up the serializer
cls.__pydantic_serializer__ = s
assert s.to_python(v, serialize_as_any=False) == {'value': 246}
assert s.to_python(v, serialize_as_any=True) == {'value': 246}
assert s.to_json(v, serialize_as_any=False) == b'{"value":246}'
assert s.to_json(v, serialize_as_any=True) == b'{"value":246}'
def test_serialize_as_any_with_field_serializer_root_model() -> None:
"""https://github.com/pydantic/pydantic/issues/12379."""
schema = core_schema.model_schema(
type('Test', (), {}),
core_schema.int_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
lambda model, v: v * 2, is_field_serializer=True
)
),
root_model=True,
)
v = SchemaValidator(schema).validate_python(123)
cls = type(v)
s = SchemaSerializer(schema)
# necessary to ensure that type inference will pick up the serializer
cls.__pydantic_serializer__ = s
assert s.to_python(v, serialize_as_any=False) == 246
assert s.to_python(v, serialize_as_any=True) == 246
assert s.to_json(v, serialize_as_any=False) == b'246'
assert s.to_json(v, serialize_as_any=True) == b'246'
| ChildModel |
python | weaviate__weaviate-python-client | weaviate/connect/integrations.py | {
"start": 458,
"end": 787
} | class ____(_IntegrationConfig):
api_key: str = Field(serialization_alias="X-Cohere-Api-Key")
requests_per_minute_embeddings: Optional[int] = Field(
serialization_alias="X-Cohere-Ratelimit-RequestPM-Embedding"
)
base_url: Optional[str] = Field(serialization_alias="X-Cohere-Baseurl")
| _IntegrationConfigCohere |
python | jazzband__django-oauth-toolkit | oauth2_provider/generators.py | {
"start": 348,
"end": 671
} | class ____(BaseHashGenerator):
def hash(self):
"""
Generate a client_id for Basic Authentication scheme without colon char
as in https://rfc-editor.org/rfc/rfc2617.html#section-2
"""
return oauthlib_generate_client_id(length=40, chars=UNICODE_ASCII_CHARACTER_SET)
| ClientIdGenerator |
python | falconry__falcon | tests/test_middleware.py | {
"start": 30021,
"end": 32126
} | class ____(TestMiddleware):
def test_error_composed_before_resp_middleware_called(self, asgi, util):
mw = CaptureResponseMiddleware()
app = util.create_app(asgi, middleware=mw)
app.add_route('/', MiddlewareClassResource())
client = testing.TestClient(app)
response = client.simulate_request(path='/', method='POST')
assert response.status == falcon.HTTP_403
assert mw.resp.status == response.status
composed_body = json.loads(mw.resp.data.decode())
assert composed_body['title'] == response.status
assert not mw.req_succeeded
# NOTE(kgriffs): Sanity-check the other params passed to
# process_response()
assert isinstance(mw.req, falcon.Request)
assert isinstance(mw.resource, MiddlewareClassResource)
def test_http_status_raised_from_error_handler(self, asgi, util):
mw = CaptureResponseMiddleware()
app = util.create_app(asgi, middleware=mw)
app.add_route('/', MiddlewareClassResource())
client = testing.TestClient(app)
# NOTE(kgriffs): Use the old-style error handler signature to
# ensure our shim for that works as expected.
def _http_error_handler(error, req, resp, params):
raise falcon.HTTPStatus(falcon.HTTP_201)
async def _http_error_handler_async(req, resp, error, params):
raise falcon.HTTPStatus(falcon.HTTP_201)
h = _http_error_handler_async if asgi else _http_error_handler
# NOTE(kgriffs): This will take precedence over the default
# handler for facon.HTTPError.
if asgi:
# NOTE(vytas): The ASGI flavour supports no reordering shim.
app.add_error_handler(falcon.HTTPError, h)
else:
with pytest.warns(DeprecatedWarning, match='deprecated signature'):
app.add_error_handler(falcon.HTTPError, h)
response = client.simulate_request(path='/', method='POST')
assert response.status == falcon.HTTP_201
assert mw.resp.status == response.status
| TestErrorHandling |
python | django__django | tests/logging_tests/tests.py | {
"start": 18054,
"end": 18899
} | class ____(AdminScriptTestCase):
"""
Accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
super().setUp()
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings("settings.py", sdict={"LOGGING": log_config})
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(["check"])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
| SettingsConfigTest |
python | pytorch__pytorch | torch/utils/_sympy/functions.py | {
"start": 46609,
"end": 47474
} | class ____(sympy.Function):
is_integer = True
@classmethod
def eval(cls, number):
# assert number.is_integer is not True, number
if number is sympy.oo:
return int_oo
if number is -sympy.oo:
return -int_oo
if isinstance(number, sympy.Number):
return sympy.Integer(round(float(number), 0))
# To get float -> int, Python style round semantics.
#
# x = PyFloat_AsDouble(self);
# if (o_ndigits == Py_None) {
# /* single-argument round or with None ndigits:
# * round to nearest integer */
# rounded = round(x);
# if (fabs(x-rounded) == 0.5)
# /* halfway case: round to even */
# rounded = 2.0*round(x/2.0);
# return PyLong_FromDouble(rounded);
# }
# NB: Like Round, this only ever returns floats. ndigits cannot be None
| RoundToInt |
python | mkdocstrings__mkdocstrings | src/mkdocstrings/_internal/handlers/base.py | {
"start": 2729,
"end": 19539
} | class ____:
"""The base handler class.
Inherit from this class to implement a handler.
You will have to implement the `collect` and `render` methods.
You can also implement the `teardown` method,
and override the `update_env` method, to add more filters to the Jinja environment,
making them available in your Jinja templates.
To define a fallback theme, add a `fallback_theme` class-variable.
To add custom CSS, add an `extra_css` variable or create an 'style.css' file beside the templates.
"""
name: ClassVar[str]
"""The handler's name, for example "python"."""
domain: ClassVar[str]
"""The handler's domain, used to register objects in the inventory, for example "py"."""
enable_inventory: ClassVar[bool] = False
"""Whether the inventory creation is enabled."""
fallback_theme: ClassVar[str] = ""
"""Fallback theme to use when a template isn't found in the configured theme."""
extra_css: str = ""
"""Extra CSS."""
def __init__(
self,
*,
theme: str,
custom_templates: str | None,
mdx: Sequence[str | Extension],
mdx_config: Mapping[str, Any],
) -> None:
"""Initialize the object.
If the given theme is not supported (it does not exist), it will look for a `fallback_theme` attribute
in `self` to use as a fallback theme.
Keyword Arguments:
theme (str): The theme to use.
custom_templates (str | None): The path to custom templates.
mdx (list[str | Extension]): A list of Markdown extensions to use.
mdx_config (Mapping[str, Mapping[str, Any]]): Configuration for the Markdown extensions.
"""
self.theme = theme
"""The selected theme."""
self.custom_templates = custom_templates
"""The path to custom templates."""
self.mdx = mdx
"""The Markdown extensions to use."""
self.mdx_config = mdx_config
"""The configuration for the Markdown extensions."""
self._md: Markdown | None = None
self._headings: list[Element] = []
paths = []
# add selected theme templates
themes_dir = self.get_templates_dir(self.name)
paths.append(themes_dir / self.theme)
# add extended theme templates
extended_templates_dirs = self.get_extended_templates_dirs(self.name)
for templates_dir in extended_templates_dirs:
paths.append(templates_dir / self.theme)
# add fallback theme templates
if self.fallback_theme and self.fallback_theme != self.theme:
paths.append(themes_dir / self.fallback_theme)
# add fallback theme of extended templates
for templates_dir in extended_templates_dirs:
paths.append(templates_dir / self.fallback_theme)
for path in paths:
css_path = path / "style.css"
if css_path.is_file():
self.extra_css += "\n" + css_path.read_text(encoding="utf-8")
break
if self.custom_templates is not None:
paths.insert(0, Path(self.custom_templates) / self.name / self.theme)
self.env = Environment(
autoescape=True,
loader=FileSystemLoader(paths),
auto_reload=False, # Editing a template in the middle of a build is not useful.
)
"""The Jinja environment."""
self.env.filters["convert_markdown"] = self.do_convert_markdown
self.env.filters["heading"] = self.do_heading
self.env.filters["any"] = do_any
self.env.globals["log"] = get_template_logger(self.name)
@property
def md(self) -> Markdown:
"""The Markdown instance.
Raises:
RuntimeError: When the Markdown instance is not set yet.
"""
if self._md is None:
raise RuntimeError("Markdown instance not set yet")
return self._md
def get_inventory_urls(self) -> list[tuple[str, dict[str, Any]]]:
"""Return the URLs (and configuration options) of the inventory files to download."""
return []
@classmethod
def load_inventory(
cls,
in_file: BinaryIO, # noqa: ARG003
url: str, # noqa: ARG003
base_url: str | None = None, # noqa: ARG003
**kwargs: Any, # noqa: ARG003
) -> Iterator[tuple[str, str]]:
"""Yield items and their URLs from an inventory file streamed from `in_file`.
Arguments:
in_file: The binary file-like object to read the inventory from.
url: The URL that this file is being streamed from (used to guess `base_url`).
base_url: The URL that this inventory's sub-paths are relative to.
**kwargs: Ignore additional arguments passed from the config.
Yields:
Tuples of (item identifier, item URL).
"""
yield from ()
def get_options(self, local_options: Mapping[str, Any]) -> HandlerOptions:
"""Get combined options.
Override this method to customize how options are combined,
for example by merging the global options with the local options.
By combining options here, you don't have to do it twice in `collect` and `render`.
Arguments:
local_options: The local options.
Returns:
The combined options.
"""
return local_options
def collect(self, identifier: str, options: HandlerOptions) -> CollectorItem:
"""Collect data given an identifier and user configuration.
In the implementation, you typically call a subprocess that returns JSON, and load that JSON again into
a Python dictionary for example, though the implementation is completely free.
Arguments:
identifier: An identifier for which to collect data. For example, in Python,
it would be 'mkdocstrings.handlers' to collect documentation about the handlers module.
It can be anything that you can feed to the tool of your choice.
options: The final configuration options.
Returns:
Anything you want, as long as you can feed it to the handler's `render` method.
"""
raise NotImplementedError
def render(self, data: CollectorItem, options: HandlerOptions, *, locale: str | None = None) -> str:
"""Render a template using provided data and configuration options.
Arguments:
data: The collected data to render.
options: The final configuration options.
locale: The locale to use for translations, if any.
Returns:
The rendered template as HTML.
"""
raise NotImplementedError
def render_backlinks(self, backlinks: Mapping[str, Iterable[Backlink]], *, locale: str | None = None) -> str: # noqa: ARG002
"""Render backlinks.
Parameters:
backlinks: A mapping of identifiers to backlinks.
locale: The locale to use for translations, if any.
Returns:
The rendered backlinks as HTML.
"""
return ""
def teardown(self) -> None:
"""Teardown the handler.
This method should be implemented to, for example, terminate a subprocess
that was started when creating the handler instance.
"""
def get_templates_dir(self, handler: str | None = None) -> Path:
"""Return the path to the handler's templates directory.
Override to customize how the templates directory is found.
Arguments:
handler: The name of the handler to get the templates directory of.
Raises:
ModuleNotFoundError: When no such handler is installed.
FileNotFoundError: When the templates directory cannot be found.
Returns:
The templates directory path.
"""
handler = handler or self.name
try:
import mkdocstrings_handlers # noqa: PLC0415
except ModuleNotFoundError as error:
raise ModuleNotFoundError(f"Handler '{handler}' not found, is it installed?") from error
for path in mkdocstrings_handlers.__path__:
theme_path = Path(path, handler, "templates")
if theme_path.exists():
return theme_path
raise FileNotFoundError(f"Can't find 'templates' folder for handler '{handler}'")
def get_extended_templates_dirs(self, handler: str) -> list[Path]:
"""Load template extensions for the given handler, return their templates directories.
Arguments:
handler: The name of the handler to get the extended templates directory of.
Returns:
The extensions templates directories.
"""
discovered_extensions = entry_points(group=f"mkdocstrings.{handler}.templates")
return [extension.load()() for extension in discovered_extensions]
def get_aliases(self, identifier: str) -> tuple[str, ...]: # noqa: ARG002
"""Return the possible aliases for a given identifier.
Arguments:
identifier: The identifier to get the aliases of.
Returns:
A tuple of strings - aliases.
"""
return ()
@property
def outer_layer(self) -> bool:
"""Whether we're in the outer Markdown conversion layer."""
return _markdown_conversion_layer == 0
def do_convert_markdown(
self,
text: str,
heading_level: int,
html_id: str = "",
*,
strip_paragraph: bool = False,
autoref_hook: AutorefsHookInterface | None = None,
) -> Markup:
"""Render Markdown text; for use inside templates.
Arguments:
text: The text to convert.
heading_level: The base heading level to start all Markdown headings from.
html_id: The HTML id of the element that's considered the parent of this element.
strip_paragraph: Whether to exclude the `<p>` tag from around the whole output.
Returns:
An HTML string.
"""
global _markdown_conversion_layer # noqa: PLW0603
_markdown_conversion_layer += 1
treeprocessors = self.md.treeprocessors
treeprocessors[HeadingShiftingTreeprocessor.name].shift_by = heading_level # type: ignore[attr-defined]
treeprocessors[IdPrependingTreeprocessor.name].id_prefix = html_id and html_id + "--" # type: ignore[attr-defined]
treeprocessors[ParagraphStrippingTreeprocessor.name].strip = strip_paragraph # type: ignore[attr-defined]
if BacklinksTreeProcessor.name in treeprocessors:
treeprocessors[BacklinksTreeProcessor.name].initial_id = html_id # type: ignore[attr-defined]
if autoref_hook and AutorefsInlineProcessor.name in self.md.inlinePatterns:
self.md.inlinePatterns[AutorefsInlineProcessor.name].hook = autoref_hook # type: ignore[attr-defined]
try:
return Markup(self.md.convert(text))
finally:
treeprocessors[HeadingShiftingTreeprocessor.name].shift_by = 0 # type: ignore[attr-defined]
treeprocessors[IdPrependingTreeprocessor.name].id_prefix = "" # type: ignore[attr-defined]
treeprocessors[ParagraphStrippingTreeprocessor.name].strip = False # type: ignore[attr-defined]
if BacklinksTreeProcessor.name in treeprocessors:
treeprocessors[BacklinksTreeProcessor.name].initial_id = None # type: ignore[attr-defined]
if AutorefsInlineProcessor.name in self.md.inlinePatterns:
self.md.inlinePatterns[AutorefsInlineProcessor.name].hook = None # type: ignore[attr-defined]
self.md.reset()
_markdown_conversion_layer -= 1
def do_heading(
self,
content: Markup,
heading_level: int,
*,
role: str | None = None,
hidden: bool = False,
toc_label: str | None = None,
skip_inventory: bool = False,
**attributes: str,
) -> Markup:
"""Render an HTML heading and register it for the table of contents. For use inside templates.
Arguments:
content: The HTML within the heading.
heading_level: The level of heading (e.g. 3 -> `h3`).
role: An optional role for the object bound to this heading.
hidden: If True, only register it for the table of contents, don't render anything.
toc_label: The title to use in the table of contents ('data-toc-label' attribute).
skip_inventory: Flag element to not be registered in the inventory (by setting a `data-skip-inventory` attribute).
**attributes: Any extra HTML attributes of the heading.
Returns:
An HTML string.
"""
# Produce a heading element that will be used later, in `AutoDocProcessor.run`, to:
# - register it in the ToC: right now we're in the inner Markdown conversion layer,
# so we have to bubble up the information to the outer Markdown conversion layer,
# for the ToC extension to pick it up.
# - register it in autorefs: right now we don't know what page is being rendered,
# so we bubble up the information again to where autorefs knows the page,
# and can correctly register the heading anchor (id) to its full URL.
# - register it in the objects inventory: same as for autorefs,
# we don't know the page here, or the handler (and its domain),
# so we bubble up the information to where the mkdocstrings extension knows that.
el = Element(f"h{heading_level}", attributes)
if toc_label is None:
toc_label = content.unescape() if isinstance(content, Markup) else content
el.set("data-toc-label", toc_label)
if skip_inventory:
el.set("data-skip-inventory", "true")
if role:
el.set("data-role", role)
if content:
el.text = str(content).strip()
self._headings.append(el)
if hidden:
return Markup('<a id="{0}"></a>').format(attributes["id"])
# Now produce the actual HTML to be rendered. The goal is to wrap the HTML content into a heading.
# Start with a heading that has just attributes (no text), and add a placeholder into it.
el = Element(f"h{heading_level}", attributes)
el.append(Element("mkdocstrings-placeholder"))
# Tell the inner 'toc' extension to make its additions if configured so.
toc = cast("TocTreeprocessor", self.md.treeprocessors["toc"])
if toc.use_anchors:
toc.add_anchor(el, attributes["id"])
if toc.use_permalinks:
toc.add_permalink(el, attributes["id"])
# The content we received is HTML, so it can't just be inserted into the tree. We had marked the middle
# of the heading with a placeholder that can never occur (text can't directly contain angle brackets).
# Now this HTML wrapper can be "filled" by replacing the placeholder.
html_with_placeholder = tostring(el, encoding="unicode")
assert ( # noqa: S101
html_with_placeholder.count("<mkdocstrings-placeholder />") == 1
), f"Bug in mkdocstrings: failed to replace in {html_with_placeholder!r}"
html = html_with_placeholder.replace("<mkdocstrings-placeholder />", content)
return Markup(html)
def get_headings(self) -> Sequence[Element]:
"""Return and clear the headings gathered so far.
Returns:
A list of HTML elements.
"""
result = list(self._headings)
self._headings.clear()
return result
def update_env(self, config: Any) -> None:
"""Update the Jinja environment."""
def _update_env(self, md: Markdown, *, config: Any | None = None) -> None:
"""Update our handler to point to our configured Markdown instance, grabbing some of the config from `md`."""
extensions: list[str | Extension] = [*self.mdx, MkdocstringsInnerExtension(self._headings)]
new_md = Markdown(extensions=extensions, extension_configs=self.mdx_config)
# MkDocs adds its own (required) extension that's not part of the config. Propagate it.
if "relpath" in md.treeprocessors:
relpath = md.treeprocessors["relpath"]
new_relpath = type(relpath)(relpath.file, relpath.files, relpath.config) # type: ignore[attr-defined,call-arg]
new_md.treeprocessors.register(new_relpath, "relpath", priority=0)
self._md = new_md
self.env.filters["highlight"] = Highlighter(new_md).highlight
self.update_env(config)
| BaseHandler |
python | huggingface__transformers | src/transformers/models/mobilebert/modeling_mobilebert.py | {
"start": 16558,
"end": 16978
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate = MobileBertIntermediate(config)
self.output = FFNOutput(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
| FFNLayer |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 96144,
"end": 96837
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, client_id: str, client_secret: str):
"""Airbyte Source for Primetric.
Args:
name (str): The name of the destination.
client_id (str): The Client ID of your Primetric developer application. The Client ID is visible here.
client_secret (str): The Client Secret of your Primetric developer application. You can manage your client's credentials here.
"""
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
super().__init__("Primetric", name)
| PrimetricSource |
python | pyca__cryptography | tests/hazmat/primitives/twofactor/test_totp.py | {
"start": 522,
"end": 5246
} | class ____:
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA1()),
skip_message="Does not support HMAC-SHA1.",
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA1"]
)
def test_generate_sha1(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
assert totp.generate(time) == totp_value
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA256()),
skip_message="Does not support HMAC-SHA256.",
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA256"]
)
def test_generate_sha256(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA256(), 30, backend)
assert totp.generate(time) == totp_value
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA512()),
skip_message="Does not support HMAC-SHA512.",
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA512"]
)
def test_generate_sha512(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA512(), 30, backend)
assert totp.generate(time) == totp_value
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA1()),
skip_message="Does not support HMAC-SHA1.",
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA1"]
)
def test_verify_sha1(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
totp.verify(totp_value, time)
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA256()),
skip_message="Does not support HMAC-SHA256.",
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA256"]
)
def test_verify_sha256(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA256(), 30, backend)
totp.verify(totp_value, time)
@pytest.mark.supported(
only_if=lambda backend: backend.hmac_supported(hashes.SHA512()),
skip_message="Does not support HMAC-SHA512.",
)
@pytest.mark.parametrize(
"params", [i for i in vectors if i["mode"] == b"SHA512"]
)
def test_verify_sha512(self, backend, params):
secret = params["secret"]
time = int(params["time"])
totp_value = params["totp"]
totp = TOTP(secret, 8, hashes.SHA512(), 30, backend)
totp.verify(totp_value, time)
def test_invalid_verify(self, backend):
secret = b"12345678901234567890"
time = 59
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
with pytest.raises(InvalidToken):
totp.verify(b"12345678", time)
def test_floating_point_time_generate(self, backend):
secret = b"12345678901234567890"
time = 59.1
totp = TOTP(secret, 8, hashes.SHA1(), 30, backend)
assert totp.generate(time) == b"94287082"
def test_get_provisioning_uri(self, backend):
secret = b"12345678901234567890"
totp = TOTP(secret, 6, hashes.SHA1(), 30, backend=backend)
assert totp.get_provisioning_uri("Alice Smith", None) == (
"otpauth://totp/Alice%20Smith?digits=6&secret=GEZDGNBVG"
"Y3TQOJQGEZDGNBVGY3TQOJQ&algorithm=SHA1&period=30"
)
assert totp.get_provisioning_uri("Alice Smith", "World") == (
"otpauth://totp/World:Alice%20Smith?digits=6&secret=GEZ"
"DGNBVGY3TQOJQGEZDGNBVGY3TQOJQ&algorithm=SHA1&issuer=World"
"&period=30"
)
def test_buffer_protocol(self, backend):
key = bytearray(b"a long key with lots of entropy goes here")
totp = TOTP(key, 8, hashes.SHA512(), 30, backend)
time = 60
assert totp.generate(time) == b"53049576"
def test_invalid_time(self, backend):
key = b"12345678901234567890"
totp = TOTP(key, 8, hashes.SHA1(), 30, backend)
with pytest.raises(TypeError):
totp.generate("test") # type: ignore[arg-type]
| TestTOTP |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py | {
"start": 48598,
"end": 48965
} | class ____(rnn_cell_wrapper_impl.DeviceWrapperBase,
_RNNCellWrapperV1):
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DeviceWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = rnn_cell_wrapper_impl.DeviceWrapperBase.__init__.__doc__
@tf_export(v1=["nn.rnn_cell.MultiRNNCell"])
| DeviceWrapper |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py | {
"start": 5582,
"end": 6025
} | class ____(FileSystemEvent):
"""File system event representing file creation on the file system."""
event_type = EVENT_TYPE_CREATED
def __init__(self, src_path):
super(FileCreatedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
| FileCreatedEvent |
python | huggingface__transformers | src/transformers/models/sam3/modeling_sam3.py | {
"start": 37974,
"end": 39674
} | class ____(nn.Module):
def __init__(self, in_channels: int, fpn_dim: int, scale_factor: float):
super().__init__()
self.scale_factor = scale_factor
# Build the upsampling/downsampling layers based on scale factor
self.scale_layers = nn.ModuleList()
if scale_factor == 4.0:
self.scale_layers.append(nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2))
self.scale_layers.append(nn.GELU())
self.scale_layers.append(nn.ConvTranspose2d(in_channels // 2, in_channels // 4, kernel_size=2, stride=2))
intermediate_channels = in_channels // 4
elif scale_factor == 2.0:
self.scale_layers.append(nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2))
intermediate_channels = in_channels // 2
elif scale_factor == 1.0:
intermediate_channels = in_channels
elif scale_factor == 0.5:
self.scale_layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
intermediate_channels = in_channels
else:
raise NotImplementedError(f"scale_factor={scale_factor} is not supported yet.")
self.proj1 = nn.Conv2d(in_channels=intermediate_channels, out_channels=fpn_dim, kernel_size=1)
self.proj2 = nn.Conv2d(in_channels=fpn_dim, out_channels=fpn_dim, kernel_size=3, padding=1)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
for layer in self.scale_layers:
hidden_states = layer(hidden_states)
hidden_states = self.proj1(hidden_states)
hidden_states = self.proj2(hidden_states)
return hidden_states
| Sam3FPNLayer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 3145,
"end": 3575
} | class ____(graphene.ObjectType):
intValue = graphene.Field(
graphene.Int, description="Nullable to allow graceful degrade on > 32 bit numbers"
)
intRepr = graphene.NonNull(
graphene.String,
description="String representation of the int to support greater than 32 bit",
)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "IntMetadataEntry"
| GrapheneIntMetadataEntry |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-nvidia/tests/test_nvidia.py | {
"start": 898,
"end": 11354
} | class ____:
def __init__(self, set_env_key_to: Optional[str] = "", set_fake_key: bool = False):
self.set_env_key_to = set_env_key_to
self.set_fake_key = set_fake_key
def __enter__(self) -> None:
self.api_env_was = os.environ.get("NVIDIA_API_KEY", "")
os.environ["NVIDIA_API_KEY"] = self.set_env_key_to
if self.set_fake_key:
os.environ["NVIDIA_API_KEY"] = "nvai-" + "x" * 9 + "-" + "x" * 54
def __exit__(self, *exc: object) -> None:
if self.api_env_was == "":
del os.environ["NVIDIA_API_KEY"]
else:
os.environ["NVIDIA_API_KEY"] = self.api_env_was
def mock_chat_completion_v1(*args: Any, **kwargs: Any) -> ChatCompletion:
return ChatCompletion(
id="chatcmpl-4162e407-e121-42b4-8590-1c173380be7d",
object="chat.completion",
created=1713474384,
model="mistralai/mistral-7b-instruct-v0.2",
usage=CompletionUsage(
completion_tokens=304, prompt_tokens=11, total_tokens=315
),
choices=[
Choice(
finish_reason="stop",
index=0,
logprobs=ChoiceLogprobs(
content=None,
text_offset=[],
token_logprobs=[0.0, 0.0],
tokens=[],
top_logprobs=[],
),
message=ChatCompletionMessage(
content="Cool Test Message",
role="assistant",
function_call=None,
tool_calls=None,
),
)
],
)
async def mock_async_chat_completion_v1(*args: Any, **kwargs: Any) -> Completion:
return mock_chat_completion_v1(*args, **kwargs)
def mock_chat_completion_stream_v1(
*args: Any, **kwargs: Any
) -> Generator[ChatCompletionChunk, None, None]:
responses = [
ChatCompletionChunk(
id="chatcmpl-998d9b96-0b71-41f5-b910-dd3bc00f38c6",
object="chat.completion.chunk",
created=1713474736,
model="google/gemma-7b",
choices=[
ChunkChoice(
finish_reason="stop",
index=0,
delta=ChoiceDelta(
content="Test",
function_call=None,
role="assistant",
tool_calls=None,
),
)
],
),
ChatCompletionChunk(
id="chatcmpl-998d9b96-0b71-41f5-b910-dd3bc00f38c6",
object="chat.completion.chunk",
created=1713474736,
model="google/gemma-7b",
choices=[
ChunkChoice(
finish_reason="stop",
index=0,
delta=ChoiceDelta(
content="Second Test",
function_call=None,
role="assistant",
tool_calls=None,
),
)
],
),
]
yield from responses
@pytest.fixture()
def known_unknown() -> str:
return "mock-model"
@pytest.fixture()
def mock_local_models(httpx_mock: HTTPXMock):
mock_response = {
"data": [
{
"id": "mock-model",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "mock-model",
},
{
"id": "lora1",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
"root": "mock-model",
},
]
}
httpx_mock.add_response(
url="http://localhost:8000/v1/models",
method="GET",
json=mock_response,
status_code=200,
)
async def mock_async_chat_completion_stream_v1(
*args: Any, **kwargs: Any
) -> AsyncGenerator[Completion, None]:
async def gen() -> AsyncGenerator[Completion, None]:
for response in mock_chat_completion_stream_v1(*args, **kwargs):
yield response
return gen()
@patch("llama_index.llms.openai.base.SyncOpenAI")
def test_chat_model_basic(MockSyncOpenAI: MagicMock) -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
mock_instance = MockSyncOpenAI.return_value
mock_instance.chat.completions.create.return_value = mock_chat_completion_v1()
llm = NVIDIA()
prompt = "test prompt"
message = ChatMessage(role="user", content="test message")
response = llm.complete(prompt)
assert response.text == "Cool Test Message"
chat_response = llm.chat([message])
assert chat_response.message.content == "Cool Test Message"
@patch("llama_index.llms.openai.base.SyncOpenAI")
def test_chat_model_streaming(MockSyncOpenAI: MagicMock) -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
mock_instance = MockSyncOpenAI.return_value
mock_instance.chat.completions.create.return_value = (
mock_chat_completion_stream_v1()
)
llm = NVIDIA()
prompt = "test prompt"
message = ChatMessage(role="user", content="test message")
response_gen = llm.stream_complete(prompt)
responses = list(response_gen)
assert responses[-1].text == "TestSecond Test"
mock_instance.chat.completions.create.return_value = (
mock_chat_completion_stream_v1()
)
chat_response_gen = llm.stream_chat([message])
chat_responses = list(chat_response_gen)
print(chat_responses)
assert chat_responses[-1].message.content == "TestSecond Test"
assert chat_responses[-1].message.role == "assistant"
@pytest.mark.asyncio
@patch("llama_index.llms.openai.base.AsyncOpenAI")
async def test_async_chat_model_basic(MockAsyncOpenAI: MagicMock) -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
mock_instance = MockAsyncOpenAI.return_value
create_fn = AsyncMock()
create_fn.side_effect = mock_async_chat_completion_v1
mock_instance.chat.completions.create = create_fn
llm = NVIDIA()
prompt = "test prompt"
message = ChatMessage(role="user", content="test message")
response = await llm.acomplete(prompt)
assert response.text == "Cool Test Message"
chat_response = await llm.achat([message])
assert chat_response.message.content == "Cool Test Message"
@pytest.mark.asyncio
@patch("llama_index.llms.openai.base.AsyncOpenAI")
async def test_async_streaming_chat_model(MockAsyncOpenAI: MagicMock) -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
mock_instance = MockAsyncOpenAI.return_value
create_fn = AsyncMock()
create_fn.side_effect = mock_async_chat_completion_stream_v1
mock_instance.chat.completions.create = create_fn
llm = NVIDIA()
prompt = "test prompt"
message = ChatMessage(role="user", content="test message")
response_gen = await llm.astream_complete(prompt)
responses = [response async for response in response_gen]
assert responses[-1].text == "TestSecond Test"
chat_response_gen = await llm.astream_chat([message])
chat_responses = [response async for response in chat_response_gen]
assert chat_responses[-1].message.content == "TestSecond Test"
def test_validates_api_key_is_present() -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
assert NVIDIA()
os.environ["NVIDIA_API_KEY"] = ""
assert NVIDIA(api_key="nvai-" + "x" * 9 + "-" + "x" * 54)
def test_metadata() -> None:
assert isinstance(NVIDIA().metadata, LLMMetadata)
def test_default_local_known(mock_local_models, known_unknown: str) -> None:
"""
Test that a model in the model table will be accepted.
"""
# check if default model is getting set
with pytest.warns(UserWarning):
x = NVIDIA(base_url="http://localhost:8000/v1")
assert x.model == known_unknown
def test_default_local_lora(mock_local_models) -> None:
"""
Test that a model in the model table will be accepted.
"""
# find a model that matches the public_class under test
x = NVIDIA(base_url="http://localhost:8000/v1", model="lora1")
assert x.model == "lora1"
def test_local_model_not_found(mock_local_models) -> None:
"""
Test that a model in the model table will be accepted.
"""
err_msg = f"No locally hosted lora3 was found."
with pytest.raises(ValueError) as msg:
x = NVIDIA(base_url="http://localhost:8000/v1", model="lora3")
assert err_msg == str(msg.value)
@patch("llama_index.llms.openai.base.SyncOpenAI")
def test_model_compatible_client_default_model(MockSyncOpenAI: MagicMock) -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
mock_instance = MockSyncOpenAI.return_value
mock_instance.chat.completions.create.return_value = mock_chat_completion_v1()
llm = NVIDIA()
message = ChatMessage(role="user", content="test message")
llm.chat([message])
@patch("llama_index.llms.openai.base.SyncOpenAI")
@pytest.mark.parametrize(
"model",
(
next(iter(NVIDIA_FUNCTION_CALLING_MODELS)),
next(iter(MODEL_TABLE.keys())),
next(iter(COMPLETION_MODELS)),
),
)
def test_model_compatible_client_model(MockSyncOpenAI: MagicMock, model: str) -> None:
with CachedNVIDIApiKeys(set_fake_key=True):
mock_instance = MockSyncOpenAI.return_value
mock_instance.chat.completions.create.return_value = mock_chat_completion_v1()
NVIDIA(api_key="BOGUS", model=model)
def test_model_incompatible_client_model() -> None:
model_name = "x"
err_msg = f"Model {model_name} is unknown, check `available_models`"
with pytest.raises(ValueError) as msg:
NVIDIA(model=model_name)
assert err_msg == str(msg.value)
def test_model_incompatible_client_known_model() -> None:
model_name = "nvidia/embed-qa-4"
warn_msg = (
f"Found {model_name} in available_models, but type is "
"unknown and inference may fail."
)
with pytest.warns(UserWarning) as msg:
NVIDIA(api_key="BOGUS", model=model_name)
assert len(msg) == 1
assert warn_msg in str(msg[0].message)
| CachedNVIDIApiKeys |
python | keras-team__keras | keras/src/backend/tensorflow/export.py | {
"start": 26,
"end": 792
} | class ____:
def _track_layer(self, layer):
# Variables in the lists below are actually part of the trackables
# that get saved, because the lists are created in __init__.
variables = layer.variables
trainable_variables = layer.trainable_variables
non_trainable_variables = layer.non_trainable_variables
self._tf_trackable.variables += variables
self._tf_trackable.trainable_variables += trainable_variables
self._tf_trackable.non_trainable_variables += non_trainable_variables
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
decorated_fn = tf.function(
fn, input_signature=input_signature, autograph=False
)
return decorated_fn
| TFExportArchive |
python | pytorch__pytorch | test/distributed/checkpoint/test_state_dict_stager.py | {
"start": 7015,
"end": 7171
} | class ____:
tensor: torch.Tensor
name: str
values: list[float]
nested: NestedTensorStruct
@dataclasses.dataclass(frozen=True)
| ComplexDataClass |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 37892,
"end": 38154
} | class ____(Request):
""" """
_service = "queues"
_action = "get_default"
_version = "2.9"
_schema = {
"additionalProperties": False,
"definitions": {},
"properties": {},
"type": "object",
}
| GetDefaultRequest |
python | mozilla__bleach | bleach/_vendor/html5lib/filters/whitespace.py | {
"start": 253,
"end": 1214
} | class ____(base.Filter):
"""Collapses whitespace except in pre, textarea, and script elements"""
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" \
and (preserve or token["name"] in self.spacePreserveElements):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(' ', text)
| Filter |
python | huggingface__transformers | src/transformers/models/marian/modeling_marian.py | {
"start": 5173,
"end": 10873
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[MarianConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->Marian, BART->MARIAN
| MarianAttention |
python | agronholm__apscheduler | src/apscheduler/triggers/cron/fields.py | {
"start": 4774,
"end": 4863
} | class ____(BaseField, extra_compilers=(MonthRangeExpression,)):
__slots__ = ()
| MonthField |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 4022,
"end": 4098
} | class ____(InvalidBaseYamlConfigError):
pass
| InvalidDataContextConfigError |
python | sphinx-doc__sphinx | sphinx/jinja2glue.py | {
"start": 4583,
"end": 8356
} | class ____(TemplateBridge, BaseLoader):
"""Interfaces the rendering environment of jinja2 for use in Sphinx."""
# TemplateBridge interface
def init(
self,
builder: Builder,
theme: Theme | None = None,
dirs: list[str] | None = None,
) -> None:
# create a chain of paths to search
if theme:
# the theme's own dir and its bases' dirs
pathchain = theme.get_theme_dirs()
# the loader dirs: pathchain + the parent directories for all themes
loaderchain = pathchain + [p.parent for p in pathchain]
elif dirs:
pathchain = list(map(_StrPath, dirs))
loaderchain = list(map(_StrPath, dirs))
else:
pathchain = []
loaderchain = []
# prepend explicit template paths
self.templatepathlen = len(builder.config.templates_path)
if builder.config.templates_path:
cfg_templates_path = [
builder.confdir / tp for tp in builder.config.templates_path
]
pathchain[0:0] = cfg_templates_path
loaderchain[0:0] = cfg_templates_path
# store it for use in newest_template_mtime
self.pathchain = pathchain
# make the paths into loaders
self.loaders = [SphinxFileSystemLoader(x) for x in loaderchain]
use_i18n = builder._translator is not None
extensions = ['jinja2.ext.i18n'] if use_i18n else []
self.environment = SandboxedEnvironment(loader=self, extensions=extensions)
self.environment.filters['tobool'] = _tobool
self.environment.filters['toint'] = _toint
self.environment.filters['todim'] = _todim
self.environment.filters['slice_index'] = _slice_index
self.environment.globals['debug'] = pass_context(pformat)
self.environment.globals['warning'] = warning
self.environment.globals['accesskey'] = pass_context(accesskey)
self.environment.globals['idgen'] = idgen
if use_i18n:
# ``install_gettext_translations`` is injected by the ``jinja2.ext.i18n`` extension
self.environment.install_gettext_translations(builder._translator) # type: ignore[attr-defined]
def render(self, template: str, context: dict[str, Any]) -> str: # type: ignore[override]
return self.environment.get_template(template).render(context)
def render_string(self, source: str, context: dict[str, Any]) -> str:
return self.environment.from_string(source).render(context)
def newest_template_mtime(self) -> float:
return self._newest_template_mtime_name()[0]
def newest_template_name(self) -> str:
return self._newest_template_mtime_name()[1]
def _newest_template_mtime_name(self) -> tuple[float, str]:
return max(
(Path(root, sfile).stat().st_mtime_ns / 10**9, sfile)
for dirname in self.pathchain
for root, _dirs, files in os.walk(dirname)
for sfile in files
if sfile.endswith('.html')
)
# Loader interface
def get_source(
self, environment: Environment, template: str
) -> tuple[str, str, Callable[[], bool]]:
loaders = self.loaders
# exclamation mark starts search from theme
if template.startswith('!'):
loaders = loaders[self.templatepathlen :]
template = template[1:]
for loader in loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
msg = f'{template!r} not found in {self.environment.loader.pathchain}' # type: ignore[union-attr]
raise TemplateNotFound(msg)
| BuiltinTemplateLoader |
python | django__django | tests/expressions/tests.py | {
"start": 106138,
"end": 112747
} | class ____(SimpleTestCase):
def test_resolve_output_field_positive_integer(self):
connectors = [
Combinable.ADD,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
]
for connector in connectors:
with self.subTest(connector=connector):
expr = CombinedExpression(
Expression(PositiveIntegerField()),
connector,
Expression(PositiveIntegerField()),
)
self.assertIsInstance(expr.output_field, PositiveIntegerField)
def test_resolve_output_field_number(self):
tests = [
(IntegerField, AutoField, IntegerField),
(AutoField, IntegerField, IntegerField),
(IntegerField, DecimalField, DecimalField),
(DecimalField, IntegerField, DecimalField),
(IntegerField, FloatField, FloatField),
(FloatField, IntegerField, FloatField),
]
connectors = [
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
]
for lhs, rhs, combined in tests:
for connector in connectors:
with self.subTest(
lhs=lhs, connector=connector, rhs=rhs, combined=combined
):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
self.assertIsInstance(expr.output_field, combined)
def test_resolve_output_field_with_null(self):
def null():
return Value(None)
tests = [
# Numbers.
(AutoField, Combinable.ADD, null),
(DecimalField, Combinable.ADD, null),
(FloatField, Combinable.ADD, null),
(IntegerField, Combinable.ADD, null),
(IntegerField, Combinable.SUB, null),
(null, Combinable.ADD, IntegerField),
# Dates.
(DateField, Combinable.ADD, null),
(DateTimeField, Combinable.ADD, null),
(DurationField, Combinable.ADD, null),
(TimeField, Combinable.ADD, null),
(TimeField, Combinable.SUB, null),
(null, Combinable.ADD, DateTimeField),
(DateField, Combinable.SUB, null),
]
for lhs, connector, rhs in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
with self.assertRaisesMessage(FieldError, msg):
expr.output_field
def test_resolve_output_field_numbers_with_null(self):
test_values = [
(3.14159, None, FloatField),
(None, 3.14159, FloatField),
(None, 42, IntegerField),
(42, None, IntegerField),
(None, Decimal("3.14"), DecimalField),
(Decimal("3.14"), None, DecimalField),
]
connectors = [
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
]
for lhs, rhs, expected_output_field in test_values:
for connector in connectors:
with self.subTest(lhs=lhs, connector=connector, rhs=rhs):
expr = CombinedExpression(Value(lhs), connector, Value(rhs))
self.assertIsInstance(expr.output_field, expected_output_field)
def test_resolve_output_field_dates(self):
tests = [
# Add - same type.
(DateField, Combinable.ADD, DateField, FieldError),
(DateTimeField, Combinable.ADD, DateTimeField, FieldError),
(TimeField, Combinable.ADD, TimeField, FieldError),
(DurationField, Combinable.ADD, DurationField, DurationField),
# Add - different type.
(DateField, Combinable.ADD, DurationField, DateTimeField),
(DateTimeField, Combinable.ADD, DurationField, DateTimeField),
(TimeField, Combinable.ADD, DurationField, TimeField),
(DurationField, Combinable.ADD, DateField, DateTimeField),
(DurationField, Combinable.ADD, DateTimeField, DateTimeField),
(DurationField, Combinable.ADD, TimeField, TimeField),
# Subtract - same type.
(DateField, Combinable.SUB, DateField, DurationField),
(DateTimeField, Combinable.SUB, DateTimeField, DurationField),
(TimeField, Combinable.SUB, TimeField, DurationField),
(DurationField, Combinable.SUB, DurationField, DurationField),
# Subtract - different type.
(DateField, Combinable.SUB, DurationField, DateTimeField),
(DateTimeField, Combinable.SUB, DurationField, DateTimeField),
(TimeField, Combinable.SUB, DurationField, TimeField),
(DurationField, Combinable.SUB, DateField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
(DurationField, Combinable.SUB, DateTimeField, FieldError),
]
for lhs, connector, rhs, combined in tests:
msg = (
f"Cannot infer type of {connector!r} expression involving these types: "
)
with self.subTest(lhs=lhs, connector=connector, rhs=rhs, combined=combined):
expr = CombinedExpression(
Expression(lhs()),
connector,
Expression(rhs()),
)
if issubclass(combined, Exception):
with self.assertRaisesMessage(combined, msg):
expr.output_field
else:
self.assertIsInstance(expr.output_field, combined)
def test_mixed_char_date_with_annotate(self):
queryset = Experiment.objects.annotate(nonsense=F("name") + F("assigned"))
msg = (
"Cannot infer type of '+' expression involving these types: CharField, "
"DateField. You must set output_field."
)
with self.assertRaisesMessage(FieldError, msg):
list(queryset)
| CombinedExpressionTests |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 12127,
"end": 13101
} | class ____(Node):
def __init__(self, query, array_size=None):
self.query = query
self.array_size = array_size
self._cursor_wrapper = None
def __sql__(self, ctx):
return self.query.__sql__(ctx)
def __iter__(self):
if self._cursor_wrapper is None:
self._execute(self.query._database)
return iter(self._cursor_wrapper.iterator())
def _execute(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self.query, named_cursor=True,
array_size=self.array_size)
self._cursor_wrapper = self.query._get_cursor_wrapper(cursor)
return self._cursor_wrapper
def ServerSide(query, database=None, array_size=None):
if database is None:
database = query._database
server_side_query = ServerSideQuery(query, array_size=array_size)
for row in server_side_query:
yield row
| ServerSideQuery |
python | apache__airflow | providers/atlassian/jira/src/airflow/providers/atlassian/jira/hooks/jira.py | {
"start": 1254,
"end": 3793
} | class ____(BaseHook):
"""
Jira interaction hook, a Wrapper around Atlassian Jira Python SDK.
:param jira_conn_id: reference to a pre-defined Jira Connection
:param proxies: Proxies to make the Jira REST API call. Optional
:param api_root: root for the api requests. Optional
:param api_version: Jira api version to use. Optional
"""
default_conn_name = "jira_default"
conn_type = "jira"
conn_name_attr = "jira_conn_id"
hook_name = "JIRA"
def __init__(
self,
jira_conn_id: str = default_conn_name,
proxies: Any | None = None,
api_root: str = "rest/api",
api_version: str | int = "2",
) -> None:
super().__init__()
self.jira_conn_id = jira_conn_id
self.proxies = proxies
self.api_root = api_root
self.api_version = api_version
self.client: Jira | None = None
self.get_conn()
def get_conn(self) -> Jira:
if not self.client:
self.log.debug("Creating Jira client for conn_id: %s", self.jira_conn_id)
verify = True
if not self.jira_conn_id:
raise AirflowException("Failed to create jira client. no jira_conn_id provided")
conn = self.get_connection(self.jira_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
verify = extra_options.get("verify", verify)
# only required attributes are taken for now,
# more can be added ex: timeout, cloud, session
self.client = Jira(
url=cast("str", conn.host),
username=conn.login,
password=conn.password,
verify_ssl=verify,
proxies=self.proxies,
api_version=self.api_version,
api_root=self.api_root,
)
return self.client
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to Atlassian Jira Connection form."""
from flask_babel import lazy_gettext
from wtforms import BooleanField
return {
"verify": BooleanField(lazy_gettext("Verify SSL"), default=True),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Atlassian Jira Connection."""
return {
"hidden_fields": ["schema", "extra"],
"relabeling": {},
}
| JiraHook |
python | crytic__slither | slither/core/declarations/custom_error.py | {
"start": 337,
"end": 3308
} | class ____(SourceMapping):
def __init__(self, compilation_unit: "SlitherCompilationUnit") -> None:
super().__init__()
self._name: str = ""
self._parameters: List[LocalVariable] = []
self._compilation_unit = compilation_unit
self._solidity_signature: Optional[str] = None
self._full_name: Optional[str] = None
self._pattern = "error"
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, new_name: str) -> None:
self._name = new_name
@property
def parameters(self) -> List[LocalVariable]:
return self._parameters
def add_parameters(self, p: "LocalVariable") -> None:
self._parameters.append(p)
@property
def compilation_unit(self) -> "SlitherCompilationUnit":
return self._compilation_unit
# region Signature
###################################################################################
###################################################################################
@staticmethod
def _convert_type_for_solidity_signature(t: Optional[Type]) -> str:
if is_underlying_type_address(t):
return "address"
return str(t)
@property
def solidity_signature(self) -> str:
"""
Return a signature following the Solidity Standard
Contract and converted into address
:return: the solidity signature
"""
# Ideally this should be an assert
# But due to a logic limitation in the solc parsing (find_variable)
# We need to raise an error if the custom error sig was not yet built
# (set_solidity_sig was not called before find_variable)
if self._solidity_signature is None:
raise ValueError("Custom Error not yet built")
return self._solidity_signature # type: ignore
def set_solidity_sig(self) -> None:
"""
Function to be called once all the parameters have been set
Returns:
"""
parameters = [x.type for x in self.parameters if x.type]
self._full_name = self.name + "(" + ",".join(map(str, parameters)) + ")"
solidity_parameters = map(self._convert_type_for_solidity_signature, parameters)
self._solidity_signature = self.name + "(" + ",".join(solidity_parameters) + ")"
@property
def full_name(self) -> Optional[str]:
"""
Return the error signature without
converting contract into address
:return: the error signature
"""
if self._full_name is None:
raise ValueError("Custom Error not yet built")
return self._full_name
# endregion
###################################################################################
###################################################################################
def __str__(self) -> str:
return "revert " + self.solidity_signature
| CustomError |
python | RaRe-Technologies__gensim | gensim/test/test_segmentation.py | {
"start": 413,
"end": 2076
} | class ____(unittest.TestCase):
def setUp(self):
self.topics = [
array([9, 4, 6]),
array([9, 10, 7]),
array([5, 2, 7])
]
def test_s_one_pre(self):
"""Test s_one_pre segmentation."""
actual = segmentation.s_one_pre(self.topics)
expected = [
[(4, 9), (6, 9), (6, 4)],
[(10, 9), (7, 9), (7, 10)],
[(2, 5), (7, 5), (7, 2)]
]
self.assertTrue(np.allclose(actual, expected))
def test_s_one_one(self):
"""Test s_one_one segmentation."""
actual = segmentation.s_one_one(self.topics)
expected = [
[(9, 4), (9, 6), (4, 9), (4, 6), (6, 9), (6, 4)],
[(9, 10), (9, 7), (10, 9), (10, 7), (7, 9), (7, 10)],
[(5, 2), (5, 7), (2, 5), (2, 7), (7, 5), (7, 2)]
]
self.assertTrue(np.allclose(actual, expected))
def test_s_one_set(self):
"""Test s_one_set segmentation."""
actual = segmentation.s_one_set(self.topics)
expected = [
[(9, array([9, 4, 6])), (4, array([9, 4, 6])), (6, array([9, 4, 6]))],
[(9, array([9, 10, 7])), (10, array([9, 10, 7])), (7, array([9, 10, 7]))],
[(5, array([5, 2, 7])), (2, array([5, 2, 7])), (7, array([5, 2, 7]))]
]
for s_i in range(len(actual)):
for j in range(len(actual[s_i])):
self.assertEqual(actual[s_i][j][0], expected[s_i][j][0])
self.assertTrue(np.allclose(actual[s_i][j][1], expected[s_i][j][1]))
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
| TestSegmentation |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_single.py | {
"start": 83527,
"end": 90921
} | class ____(
AssertsCompiledSQL, fixtures.DeclarativeMappedTest
):
"""test new polymorphic_abstract feature added as of #9060"""
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Company(Base):
__tablename__ = "company"
id = Column(Integer, primary_key=True)
executives: Mapped[List[Executive]] = relationship()
technologists: Mapped[List[Technologist]] = relationship()
class Employee(Base):
__tablename__ = "employee"
id: Mapped[int] = mapped_column(primary_key=True)
company_id: Mapped[int] = mapped_column(ForeignKey("company.id"))
name: Mapped[str]
type: Mapped[str]
__mapper_args__ = {
"polymorphic_on": "type",
}
class Executive(Employee):
"""An executive of the company"""
executive_background: Mapped[str] = mapped_column(nullable=True)
__mapper_args__ = {"polymorphic_abstract": True}
class Technologist(Employee):
"""An employee who works with technology"""
competencies: Mapped[str] = mapped_column(nullable=True)
__mapper_args__ = {"polymorphic_abstract": True}
class Manager(Executive):
"""a manager"""
__mapper_args__ = {"polymorphic_identity": "manager"}
class Principal(Executive):
"""a principal of the company"""
__mapper_args__ = {"polymorphic_identity": "principal"}
class Engineer(Technologist):
"""an engineer"""
__mapper_args__ = {"polymorphic_identity": "engineer"}
class SysAdmin(Technologist):
"""a systems administrator"""
__mapper_args__ = {"polymorphic_identity": "sysadmin"}
def test_select_against_abstract(self):
Technologist = self.classes.Technologist
self.assert_compile(
select(Technologist).where(
Technologist.competencies.like("%Java%")
),
"SELECT employee.id, employee.company_id, employee.name, "
"employee.type, employee.competencies FROM employee "
"WHERE employee.competencies LIKE :competencies_1 "
"AND employee.type IN (:type_1_1, :type_1_2)",
checkparams={
"competencies_1": "%Java%",
"type_1_1": "engineer",
"type_1_2": "sysadmin",
},
render_postcompile=True,
)
def test_relationship_join(self):
Technologist = self.classes.Technologist
Company = self.classes.Company
self.assert_compile(
select(Company)
.join(Company.technologists)
.where(Technologist.competencies.like("%Java%")),
"SELECT company.id FROM company JOIN employee "
"ON company.id = employee.company_id AND employee.type "
"IN (:type_1_1, :type_1_2) WHERE employee.competencies "
"LIKE :competencies_1",
checkparams={
"competencies_1": "%Java%",
"type_1_1": "engineer",
"type_1_2": "sysadmin",
},
render_postcompile=True,
)
@testing.fixture
def data_fixture(self, connection):
Company = self.classes.Company
Engineer = self.classes.Engineer
Manager = self.classes.Manager
Principal = self.classes.Principal
with Session(connection) as sess:
sess.add(
Company(
technologists=[
Engineer(name="e1", competencies="Java programming")
],
executives=[
Manager(name="m1", executive_background="eb1"),
Principal(name="p1", executive_background="eb2"),
],
)
)
sess.flush()
yield sess
def test_relationship_join_w_eagerload(self, data_fixture):
Company = self.classes.Company
Technologist = self.classes.Technologist
session = data_fixture
with self.sql_execution_asserter() as asserter:
session.scalars(
select(Company)
.join(Company.technologists)
.where(Technologist.competencies.ilike("%java%"))
.options(selectinload(Company.executives))
).all()
asserter.assert_(
CompiledSQL(
"SELECT company.id FROM company JOIN employee ON "
"company.id = employee.company_id AND employee.type "
"IN (__[POSTCOMPILE_type_1]) WHERE "
"lower(employee.competencies) LIKE lower(:competencies_1)",
[
{
"type_1": ["engineer", "sysadmin"],
"competencies_1": "%java%",
}
],
),
CompiledSQL(
"SELECT employee.company_id, "
"employee.id, employee.name, "
"employee.type, "
"employee.executive_background "
"FROM employee WHERE employee.company_id "
"IN (__[POSTCOMPILE_primary_keys]) "
"AND employee.type IN (__[POSTCOMPILE_type_1])",
[
{
"primary_keys": [mock.ANY],
"type_1": ["manager", "principal"],
}
],
),
)
@testing.variation("given_type", ["none", "invalid", "valid"])
def test_no_instantiate(self, given_type):
Technologist = self.classes.Technologist
with expect_raises_message(
exc.InvalidRequestError,
r"Can't instantiate class for Mapper\[Technologist\(employee\)\]; "
r"mapper is marked polymorphic_abstract=True",
):
if given_type.none:
Technologist()
elif given_type.invalid:
Technologist(type="madeup")
elif given_type.valid:
Technologist(type="engineer")
else:
given_type.fail()
def test_not_supported_wo_poly_inheriting(self, decl_base):
class MyClass(decl_base):
__tablename__ = "my_table"
id: Mapped[int] = mapped_column(primary_key=True)
with expect_raises_message(
exc.InvalidRequestError,
"The Mapper.polymorphic_abstract parameter may only be used "
"on a mapper hierarchy which includes the Mapper.polymorphic_on",
):
class Nope(MyClass):
__mapper_args__ = {"polymorphic_abstract": True}
def test_not_supported_wo_poly_base(self, decl_base):
with expect_raises_message(
exc.InvalidRequestError,
"The Mapper.polymorphic_abstract parameter may only be used "
"on a mapper hierarchy which includes the Mapper.polymorphic_on",
):
class Nope(decl_base):
__tablename__ = "my_table"
id: Mapped[int] = mapped_column(primary_key=True)
__mapper_args__ = {"polymorphic_abstract": True}
| AbstractPolymorphicTest |
python | coleifer__peewee | setup.py | {
"start": 3934,
"end": 7502
} | class ____(build_ext):
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailure()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError, DistutilsPlatformError):
raise BuildFailure()
def _do_setup(c_extensions, sqlite_extensions):
if c_extensions and sqlite_extensions:
# Only add modules if the required source files are present. This is to
# work-around python 3.11 and pip being janky.
if sys.version_info < (3, 11, 0):
ext_modules = [sqlite_ext_module, sqlite_udf_module]
else:
ext_modules = []
for m in (sqlite_ext_module, sqlite_udf_module):
if all(os.path.exists(src) for src in m.sources):
ext_modules.append(m)
else:
print('could not find sources for module: %s!' % m.sources)
print('try adding "cython" to your local pyproject.toml')
else:
ext_modules = None
with open('peewee.py', 'rt') as fh:
version, = [l for l in fh.readlines() if l.startswith('__version__')]
version, = re.search(r'\'([\d\.]+)\'', version).groups()
setup(
name='peewee',
version=version,
description='a little orm',
long_description=readme,
author='Charles Leifer',
author_email='coleifer@gmail.com',
url='https://github.com/coleifer/peewee/',
packages=['playhouse'],
py_modules=['peewee', 'pwiz'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.13',
#'Programming Language :: Python :: 3.14',
#'Programming Language :: Python :: 3.15',
#'Programming Language :: Python :: 999.99',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='MIT License',
platforms=['any'],
project_urls={
'Documentation': 'http://docs.peewee-orm.com',
'Source': 'https://github.com/coleifer/peewee'},
scripts=['pwiz.py'],
zip_safe=False,
cmdclass={'build_ext': _PeeweeBuildExt},
ext_modules=cythonize(ext_modules))
if extension_support:
try:
_do_setup(extension_support, sqlite_extension_support)
except BuildFailure:
print('#' * 75)
print('Error compiling C extensions, C extensions will not be built.')
print('#' * 75)
_do_setup(False, False)
else:
_do_setup(False, False)
| _PeeweeBuildExt |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 12837,
"end": 13133
} | class ____(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "big_integer"
_N = TypeVar("_N", bound=Union[decimal.Decimal, float])
| BigInteger |
python | google__jax | jax/_src/pjit.py | {
"start": 66270,
"end": 132772
} | class ____:
aval: Any
sharding: Any
format: Any
committed: bool
is_np_array: bool
replace = replace # type: ignore
@property
def shape(self):
return self.aval.shape
@property
def ndim(self):
return self.aval.ndim
@util.cache(max_size=4096, trace_context_in_key=False)
def create_meta_ty(aval, arg_sharding, arg_format, arg_committed, is_np_array):
return MetaTy(aval, arg_sharding, arg_format, arg_committed, is_np_array)
def convert_to_metaty(arg):
# TODO(yashkatariya): Remove this Tracer special case after
# getattr(Tracer, 'sharding') is fast.
if isinstance(arg, core.Tracer):
return create_meta_ty(arg.aval, None, None, True, False)
aval = core.shaped_abstractify(arg)
arg_sharding = getattr(arg, 'sharding', None)
arg_format = getattr(arg, 'format', None)
arg_committed = getattr(arg, '_committed', True)
is_np_array = isinstance(arg, np.ndarray)
return create_meta_ty(aval, arg_sharding, arg_format, arg_committed,
is_np_array)
def _pjit_call_impl_python(
*args,
jaxpr: core.ClosedJaxpr,
in_shardings, out_shardings, in_layouts, out_layouts,
donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs):
util.test_event("jit_cpp_cache_miss")
pgle_compile_options, pgle_profiler = {}, None
if config.enable_pgle.value and config.pgle_profiling_runs.value > 0:
compilation_target_key = jaxpr
pgle_profiler = _pgle_profiler_dict.get(compilation_target_key)
if pgle_profiler is None:
pgle_profiler = profiler.PGLEProfiler(
config.pgle_profiling_runs.value,
config.pgle_aggregation_percentile.value)
_pgle_profiler_dict[compilation_target_key] = pgle_profiler
# The method below will return FDO profile when module was profiled
# config.jax_pgle_profiling_runs amount of times, otherwise the result will
# be None.
fdo_profile = pgle_profiler.consume_fdo_profile()
if fdo_profile is not None:
pgle_compile_options['fdo_profile'] = fdo_profile
compiler_options_kvs = compiler_options_kvs + tuple(pgle_compile_options.items())
# Passing mutable PGLE profile here since it should be extracted by JAXPR to
# initialize the fdo_profile compile option.
arg_types = map(convert_to_metaty, args)
computation = _resolve_and_lower(
arg_types, jaxpr=jaxpr, in_shardings=in_shardings,
out_shardings=out_shardings, in_layouts=in_layouts,
out_layouts=out_layouts, donated_invars=donated_invars,
ctx_mesh=ctx_mesh, name=name, keep_unused=keep_unused,
inline=inline, lowering_platforms=None,
lowering_parameters=mlir.LoweringParameters(),
pgle_profiler=pgle_profiler,
compiler_options_kvs=compiler_options_kvs,
)
compiled = computation.compile()
# This check is expensive so only do it if enable_checks is on.
if compiled._auto_spmd_lowering and config.enable_checks.value:
pxla.check_array_xla_sharding_layout_match(
args, compiled._in_shardings, compiled._in_layouts, # type: ignore
jaxpr.jaxpr._debug_info, compiled._kept_var_idx)
if config.distributed_debug.value:
# Defensively only perform fingerprint logic if debug logging is enabled
# NOTE(skyewm): I didn't benchmark this
fingerprint = None
if hasattr(compiled.runtime_executable(), "fingerprint"):
fingerprint = compiled.runtime_executable().fingerprint
if fingerprint is not None:
fingerprint = fingerprint.hex()
distributed_debug_log(("Running pjit'd function", name),
("in_shardings", in_shardings),
("out_shardings", out_shardings),
("in_layouts", in_layouts),
("out_layouts", out_layouts),
("abstract args", map(core.abstractify, args)),
("fingerprint", fingerprint))
return (compiled.unsafe_call(*computation.const_args, *args),
compiled, pgle_profiler, computation.const_args)
@weakref_lru_cache
def _get_jaxpr_as_fun(jaxpr, in_shardings, out_shardings, in_layouts,
out_layouts, donated_invars, ctx_mesh, name,
keep_unused, inline, compiler_options_kvs):
# The input jaxpr to `_get_jaxpr_as_fun` is under a weakref_lru_cache so
# returning `core.jaxpr_as_fun(jaxpr)` directly creates a strong reference to
# the jaxpr defeating the purpose of weakref_lru_cache. So return a function
# that closes over a weakrefed jaxpr and gets called inside that function.
# This way there won't be a strong reference to the jaxpr from the output
# function.
jaxpr = weakref.ref(jaxpr)
return lambda *args: core.jaxpr_as_fun(jaxpr())(*args) # pylint: disable=unnecessary-lambda
def _pjit_call_impl(*args, jaxpr: core.ClosedJaxpr,
in_shardings, out_shardings, in_layouts, out_layouts,
donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs):
def call_impl_cache_miss(*args_, **kwargs_):
# args_ do not include the const args
# See https://docs.jax.dev/en/latest/internals/constants.html.
# TODO(necula): remove num_const_args when fixing the C++ path
out_flat, compiled, pgle_profiler, const_args = _pjit_call_impl_python(
*args, jaxpr=jaxpr, in_shardings=in_shardings,
out_shardings=out_shardings, in_layouts=in_layouts,
out_layouts=out_layouts, donated_invars=donated_invars,
ctx_mesh=ctx_mesh, name=name, keep_unused=keep_unused,
inline=inline, compiler_options_kvs=compiler_options_kvs)
fastpath_data = _get_fastpath_data(
compiled, tree_structure(out_flat), args, out_flat,
jaxpr.effects, jaxpr.consts, None, pgle_profiler,
const_args)
return out_flat, fastpath_data, _need_to_rebuild_with_fdo(pgle_profiler)
f = _get_jaxpr_as_fun(
jaxpr, in_shardings, out_shardings, in_layouts, out_layouts,
donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs)
donated_argnums = tuple(i for i, d in enumerate(donated_invars) if d)
cache_key = pxla.JitGlobalCppCacheKeys(
donate_argnums=donated_argnums, donate_argnames=None,
device=None, backend=None,
in_shardings_treedef=None, in_shardings_leaves=in_shardings,
out_shardings_treedef=None, out_shardings_leaves=out_shardings,
in_layouts_treedef=None, in_layouts_leaves=in_layouts,
out_layouts_treedef=None, out_layouts_leaves=out_layouts)
return xc._xla.pjit(
name, f, call_impl_cache_miss, [], [], cache_key,
tree_util.dispatch_registry, pxla.cc_shard_arg,
_get_cpp_global_cache(cache_key.contains_explicit_attributes))(*args)
jit_p.def_impl(_pjit_call_impl)
# This cache is important for python dispatch performance.
@weakref_lru_cache
def _pjit_lower(
jaxpr: core.ClosedJaxpr,
in_shardings,
out_shardings,
in_layouts: pxla.MaybeLayout,
out_layouts: pxla.MaybeLayout,
donated_invars,
ctx_mesh,
name: str,
keep_unused: bool,
inline: bool,
compiler_options_kvs: tuple[tuple[str, Any], ...],
*,
lowering_platforms: tuple[str, ...] | None,
lowering_parameters: mlir.LoweringParameters,
pgle_profiler: profiler.PGLEProfiler | None) -> pxla.MeshComputation:
return pxla.lower_sharding_computation(
jaxpr, 'jit', name, in_shardings, out_shardings,
in_layouts, out_layouts, tuple(donated_invars),
keep_unused=keep_unused, context_mesh=ctx_mesh,
compiler_options_kvs=compiler_options_kvs,
lowering_platforms=lowering_platforms,
lowering_parameters=lowering_parameters,
pgle_profiler=pgle_profiler)
def pjit_staging_rule(trace, source_info, *args, **params):
if params["compiler_options_kvs"]:
raise ValueError(
'`compiler_options` can only be passed to top-level `jax.jit`. Got'
f' compiler_options={dict(params["compiler_options_kvs"])} specified on'
f' a nested jit with name: {params["name"]} and source info:'
f' {source_info_util.summarize(source_info)}')
# If we're inlining, no need to compute forwarding information; the inlined
# computation will in effect forward things.
if (params["inline"] and
all(isinstance(i, UnspecifiedValue) for i in params["in_shardings"]) and
all(isinstance(o, UnspecifiedValue) for o in params["out_shardings"]) and
all(i is None for i in params["in_layouts"]) and
all(o is None for o in params["out_layouts"])):
jaxpr = params["jaxpr"]
if config.dynamic_shapes.value:
# Inline jaxpr doesn't handle dynamic shapes when inlining. If dynamic
# shapes are enabled, use eval_jaxpr, which uses the tracing machinery,
# but redundantly performs abstract evaluation again.
with core.set_current_trace(trace):
out = core.eval_jaxpr(jaxpr.jaxpr, jaxpr.consts, *args,
propagate_source_info=False)
else:
out = pe.inline_jaxpr_into_trace(
trace, source_info, jaxpr.jaxpr, jaxpr.consts, *args)
return [trace.to_jaxpr_tracer(x, source_info) for x in out]
jaxpr = params['jaxpr']
if config.dynamic_shapes.value:
jaxpr, in_fwd, out_shardings, out_layouts = _pjit_forwarding(
jaxpr, params['out_shardings'], params['out_layouts'])
params = dict(params, jaxpr=jaxpr, out_shardings=out_shardings,
out_layouts=out_layouts)
outvars = map(trace.frame.newvar, _out_type(jaxpr))
eqn = core.new_jaxpr_eqn(
[arg.var for arg in args], outvars, jit_p, params,
jaxpr.effects, source_info)
trace.frame.add_eqn(eqn)
out_tracers = [pe.DynamicJaxprTracer(trace, v.aval, v, source_info)
for v in outvars]
out_tracers_ = iter(out_tracers)
out_tracers = [args[f] if type(f) is int else next(out_tracers_)
for f in in_fwd]
assert next(out_tracers_, None) is None
elif any(isinstance(c, core.Ref) for c in jaxpr.consts):
jaxpr, consts = pxla._move_mutable_consts(jaxpr)
consts = [trace.new_const(c, source_info) for c in consts]
in_shardings = (*params['in_shardings'],) + (UNSPECIFIED,) * len(consts)
in_layouts = (*params['in_layouts'],) + (None,) * len(consts)
donated_invars = (*params['donated_invars'],) + (False,) * len(consts)
new_params = dict(params, jaxpr=jaxpr, in_shardings=in_shardings,
in_layouts=in_layouts, donated_invars=donated_invars)
out_tracers = trace.default_process_primitive(
jit_p, (*args, *consts), new_params, source_info=source_info)
else:
out_tracers = trace.default_process_primitive(
jit_p, args, params, source_info=source_info)
return out_tracers
pe.custom_staging_rules[jit_p] = pjit_staging_rule
def _pjit_forwarding(jaxpr, out_shardings, out_layouts):
in_fwd: list[int | None] = pe._jaxpr_forwarding(jaxpr.jaxpr)
in_fwd = [fwd if isinstance(os, UnspecifiedValue) and ol is None else None
for fwd, os, ol in zip(in_fwd, out_shardings, out_layouts)]
keep = [f is None for f in in_fwd]
jaxpr = pe.prune_closed_jaxpr_outputs(jaxpr, keep)
out_shardings = tuple(o for o, k in zip(out_shardings, keep) if k)
out_layouts = tuple(o for o, k in zip(out_layouts , keep) if k)
return jaxpr, in_fwd, out_shardings, out_layouts
def pjit_forwarding_rule(eqn):
if not config.dynamic_shapes.value:
return [None] * len(eqn.outvars), eqn
jaxpr, in_fwd, out_shardings, out_layouts = _pjit_forwarding(
eqn.params['jaxpr'], eqn.params['out_shardings'], eqn.params['out_layouts'])
new_outvars = [v for v, f in zip(eqn.outvars, in_fwd) if f is None]
new_params = dict(eqn.params, jaxpr=jaxpr, out_shardings=out_shardings,
out_layouts=out_layouts)
new_eqn = eqn.replace(params=new_params, outvars=new_outvars)
return in_fwd, new_eqn
# TODO(mattjj): Remove pjit_forwarding_rule and also in staging rule.
pe.forwarding_rules[jit_p] = pjit_forwarding_rule
# TODO(mattjj): remove/trivialize this when jaxprs have type annotation on them,
# since it's actually not possible in general to infer the type from the term
def _out_type(jaxpr: core.ClosedJaxpr) -> list[core.AbstractValue]:
out = []
in_idx = {v: i for i, v in enumerate(jaxpr.jaxpr.invars)}
out_idx = {x: i for i, x in enumerate(jaxpr.jaxpr.invars)
if type(x) is core.Var}
for x in jaxpr.jaxpr.outvars:
aval = x.aval
if type(aval) is core.DShapedArray:
shape = [core.InDBIdx(in_idx[d]) if d in in_idx else
core.OutDBIdx(out_idx[d]) if d in out_idx else
d for d in x.aval.shape]
aval = aval.update(shape=tuple(shape))
out.append(aval)
return out
def _pjit_typecheck(ctx_factory, *in_atoms, jaxpr, **params):
return core._check_call(ctx_factory, jit_p, in_atoms,
dict(params, call_jaxpr=jaxpr.jaxpr))
core.custom_typechecks[jit_p] = _pjit_typecheck
def _pjit_abstract_eval(*args, jaxpr, out_shardings, **_):
effs = core.eqn_effects(jaxpr) if jaxpr.constvars else jaxpr.effects
return jaxpr.out_avals, effs
jit_p.def_effectful_abstract_eval(_pjit_abstract_eval)
def _pjit_cached_lower_jaxpr_to_fun(ctx: mlir.LoweringRuleContext,
name: str, jaxpr: core.ClosedJaxpr,
num_const_args: int, in_avals,
effects, in_shardings,
out_shardings, in_layouts, out_layouts,
api_name):
assert len(in_avals) == num_const_args + len(jaxpr.in_avals)
assert len(in_avals) == len(in_shardings)
assert len(in_avals) == len(in_layouts)
mod_ctx = ctx.module_context
axis_ctx = ctx.module_context.axis_context
num_devices = None
if isinstance(axis_ctx, sharding_impls.ShardingContext):
num_devices = axis_ctx.num_devices
elif isinstance(axis_ctx, sharding_impls.SPMDAxisContext):
num_devices = axis_ctx.mesh.size
key = (jit_p, name, jaxpr, effects, num_devices,
pxla.SemanticallyEqualShardings(in_shardings, in_avals), # pytype: disable=wrong-arg-types
pxla.SemanticallyEqualShardings(out_shardings, jaxpr.out_avals), # pytype: disable=wrong-arg-types
in_layouts, out_layouts, api_name)
func = mod_ctx.cached_primitive_lowerings.get(key, None)
if func is None:
arg_shardings = [None if isinstance(i, UnspecifiedValue) else i for i in in_shardings]
result_shardings = [None if isinstance(o, UnspecifiedValue) else o for o in out_shardings]
# TODO(b/228598865): non-top-level functions cannot have shardings set
# directly on the inputs or outputs because they are lost during MLIR->HLO
# conversion. using_sharding_annotation=False means we add an identity
# operation instead.
num_callbacks = len(mod_ctx.host_callbacks)
func = mlir.lower_jaxpr_to_fun(
mod_ctx, name, jaxpr, effects,
num_const_args=num_const_args, in_avals=in_avals,
arg_shardings=arg_shardings, result_shardings=result_shardings,
use_sharding_annotations=False,
arg_layouts=in_layouts, result_layouts=out_layouts)
# If this Jaxpr includes callbacks, we can't cache the lowering because
# on TPU every callback must have a globally unique channel, but the
# channel gets assigned during lowering.
has_callbacks = len(mod_ctx.host_callbacks) > num_callbacks
if not has_callbacks or "tpu" not in mod_ctx.platforms:
mod_ctx.cached_primitive_lowerings[key] = func
return func
def _pjit_lowering(ctx: mlir.LoweringRuleContext, *args, name: str,
jaxpr: core.ClosedJaxpr, in_shardings,
out_shardings, in_layouts, out_layouts, donated_invars,
ctx_mesh, keep_unused, inline, compiler_options_kvs):
effects = list(ctx.tokens_in.effects())
output_types = map(mlir.aval_to_ir_type, ctx.avals_out)
output_types = [mlir.token_type()] * len(effects) + output_types
flat_output_types = mlir.flatten_ir_types(output_types)
const_args_and_avals = core.jaxpr_const_args(jaxpr.jaxpr)
const_args, const_arg_avals = util.unzip2(const_args_and_avals)
in_avals = (*const_arg_avals, *jaxpr.in_avals)
ca_shardings = const_args_shardings(const_args)
in_shardings = ca_shardings + in_shardings # type: ignore
ca_layouts = const_args_layouts(const_args, const_arg_avals, ca_shardings)
in_layouts = ca_layouts + in_layouts # type: ignore
func = _pjit_cached_lower_jaxpr_to_fun(
ctx, name, jaxpr, len(const_args), in_avals,
tuple(effects), in_shardings,
out_shardings, in_layouts, out_layouts,
api_name='jit')
tokens_in = [ctx.tokens_in.get(eff) for eff in effects]
hoisted_const_values = [
mlir.ir_constant(c, const_lowering=ctx.const_lowering, aval=aval)
for c, aval in const_args_and_avals
]
args = (*ctx.dim_var_values, *tokens_in, *hoisted_const_values, *args)
with mlir.source_info_to_location(
ctx.module_context, None,
ctx.name_stack.extend(util.wrap_name('jit', name)), ctx.traceback):
call = func_dialect.CallOp(
flat_output_types, ir.FlatSymbolRefAttr.get(func.name.value),
mlir.flatten_ir_values(args))
mlir.wrap_compute_type_in_place(ctx, call)
out_nodes = mlir.unflatten_ir_values_like_types(call.results, output_types)
tokens, out_nodes = split_list(out_nodes, [len(effects)])
tokens_out = ctx.tokens_in.update_tokens(mlir.TokenSet(zip(effects, tokens)))
ctx.set_tokens_out(tokens_out)
return out_nodes
# TODO(phawkins): this is marked uncacheable because it has its own cache and
# because the cache breaks jaxpr metadata like source locations. We should fix
# the metadata problem and consolidate the caches.
mlir.register_lowering(jit_p, _pjit_lowering, cacheable=False)
def const_args_shardings(const_args: Sequence[ArrayLike]) -> Sequence[PjitSharding]:
return _resolve_in_shardings(
const_args, (sharding_impls.UNSPECIFIED,) * len(const_args))
def const_args_layouts(
const_args: Sequence[ArrayLike],
avals: Sequence[core.AbstractValue],
shardings: Sequence[PjitSharding]
) -> Sequence[Layout | AutoLayout | None]:
return _resolve_in_layouts(
const_args, (None,) * len(const_args), shardings, avals)
def _pjit_batcher(axis_data, vals_in,
dims_in: tuple[int, ...],
jaxpr: core.ClosedJaxpr,
in_shardings, out_shardings, in_layouts, out_layouts,
donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs):
segment_lens, dims_in = batching.indirectify_ragged_axes(dims_in)
new_jaxpr, axes_out = batching.batch_jaxpr2(jaxpr, axis_data, dims_in)
# TODO(axch): prepend with Nones (?) to account for new segment_lens inputs
in_shardings = tuple(
_pjit_batcher_for_sharding(i, axis_in, axis_data.spmd_name, ctx_mesh,
aval.ndim)
if axis_in is not None else i
for axis_in, i, aval in zip(dims_in, in_shardings, new_jaxpr.in_avals))
out_shardings = tuple(
_pjit_batcher_for_sharding(o, axis_out, axis_data.spmd_name, ctx_mesh,
aval.ndim)
if axis_out is not None else o
for axis_out, o, aval in zip(axes_out, out_shardings, new_jaxpr.out_avals))
# TODO(yashkatariya): Figure out layouts should change under vmap.
if not (all(l is None for l in in_layouts) and
all(l is None for l in out_layouts)):
raise NotImplementedError(
'Concrete layouts are not supported for vmap(jit).')
vals_out = jit_p.bind(
*vals_in,
jaxpr=new_jaxpr,
in_shardings=in_shardings,
out_shardings=out_shardings,
in_layouts=in_layouts,
out_layouts=out_layouts,
donated_invars=donated_invars,
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
resolved_axes_out = batching.resolve_ragged_axes_against_inputs_outputs(
vals_in, vals_out, axes_out)
return vals_out, resolved_axes_out
batching.fancy_primitive_batchers[jit_p] = _pjit_batcher
batching.ragged_prop_rules[jit_p] = batching.ragged_mask_no_op_rule
def _pjit_batcher_for_sharding(
s, dim: int | batching.RaggedAxis, spmd_axis_name: tuple[str, ...] | None,
mesh, ndim: int):
if isinstance(s, UnspecifiedValue):
return s
hlo_s = s._to_xla_hlo_sharding(ndim)
if spmd_axis_name is None:
if sharding_impls.is_hlo_sharding_replicated(hlo_s):
return s
if isinstance(s, NamedSharding) and isinstance(s.mesh, AbstractMesh):
return NamedSharding(
s.mesh, pxla.batch_spec(s.spec, dim, PartitionSpec.UNCONSTRAINED))
new_op = hlo_s.to_proto().clone()
tad = list(new_op.tile_assignment_dimensions)
tad.insert(dim, 1) # type: ignore
new_op.tile_assignment_dimensions = tad
new_gs = GSPMDSharding(s._internal_device_list, new_op)
return pxla._get_out_sharding_from_orig_sharding([new_gs], [None], s, None)[0]
else:
if isinstance(s, NamedSharding) and isinstance(s.mesh, AbstractMesh):
return NamedSharding(
s.mesh, pxla.batch_spec(s.spec, dim, spmd_axis_name))
if isinstance(s, NamedSharding):
mesh = s.mesh
if mesh.empty:
raise ValueError(
'If you are using spmd_axis_name parameter of jax.vmap,'
' please make sure to run your jitted function inside the mesh'
' context manager. Only `jax.lax.with_sharding_constraint` with'
' `jax.sharding.NamedSharding` as an input can be transformed with'
' spmd_axis_name batching rules outside of an explicit mesh context'
f' manager scope{s!r}')
spec = parse_flatten_op_sharding(hlo_s, mesh)[0]
return NamedSharding(
mesh, pxla.batch_spec(spec, dim, spmd_axis_name))
def _pjit_jvp(primals_in, tangents_in,
jaxpr, in_shardings, out_shardings, in_layouts, out_layouts,
donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs):
is_nz_tangents_in = [type(t) is not ad.Zero for t in tangents_in]
jaxpr_jvp, is_nz_tangents_out = ad.jvp_jaxpr(
jaxpr, is_nz_tangents_in, instantiate=False)
def _filter_zeros(is_nz_l, l):
return (x for nz, x in zip(is_nz_l, l) if nz)
_filter_zeros_in = partial(_filter_zeros, is_nz_tangents_in)
_filter_zeros_out = partial(_filter_zeros, is_nz_tangents_out)
outputs = jit_p.bind(
*primals_in, *_filter_zeros_in(tangents_in),
jaxpr=jaxpr_jvp,
in_shardings=(*in_shardings, *_filter_zeros_in(in_shardings)),
out_shardings=(*out_shardings, *_filter_zeros_out(out_shardings)),
in_layouts=(*in_layouts, *_filter_zeros_in(in_layouts)),
out_layouts=(*out_layouts, *_filter_zeros_out(out_layouts)),
donated_invars=(*donated_invars, *_filter_zeros_in(donated_invars)),
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
primals_out, tangents_out = split_list(outputs, [len(jaxpr.jaxpr.outvars)])
assert len(primals_out) == len(jaxpr.jaxpr.outvars)
tangents_out_it = iter(tangents_out)
return primals_out, [next(tangents_out_it) if nz else ad.Zero(aval)
for nz, aval in zip(is_nz_tangents_out, jaxpr.out_avals)]
ad.primitive_jvps[jit_p] = _pjit_jvp
def _pjit_linearize(nzs, *primals_in, jaxpr, in_shardings, out_shardings,
in_layouts, out_layouts, donated_invars, ctx_mesh, name,
keep_unused, inline, compiler_options_kvs):
primal_jaxpr, num_residuals_out, nzs_out, in_fwd_res, tangent_jaxpr = \
ad.linearize_jaxpr(jaxpr, nzs)
num_residuals_in = len(in_fwd_res)
num_primals_out = len(primal_jaxpr.out_avals) - num_residuals_out
res_shardings_in = (UNSPECIFIED,) * num_residuals_in
res_layouts_in = (None,) * num_residuals_in
res_donated = (False,) * num_residuals_in
primal_out_shardings = tuple(out_shardings) + (UNSPECIFIED,) * num_residuals_out
primal_out_layouts = tuple(out_layouts) + (None,) * num_residuals_out
config.enable_checks.value and core.check_jaxpr(primal_jaxpr.jaxpr)
config.enable_checks.value and core.check_jaxpr(tangent_jaxpr.jaxpr)
def keep_where(l, should_keep):
return tuple(x for x, keep in zip(l, should_keep) if keep)
# Input-to-output forwarding.
in_fwd = pe._jaxpr_forwarding(primal_jaxpr.jaxpr)
in_fwd_primal, in_fwd_res_ = split_list(in_fwd, [num_primals_out])
assert all(f is None for f in in_fwd_res_)
in_fwd = [
fwd if isinstance(os, UnspecifiedValue) and ol is None else None
for os, ol, fwd in zip(out_shardings, out_layouts, in_fwd_primal)
] + in_fwd_res_
del in_fwd_res_, in_fwd_primal
keep = [f is None for f in in_fwd]
primal_jaxpr = pe.prune_closed_jaxpr_outputs(primal_jaxpr, keep)
primal_out_shardings = keep_where(primal_out_shardings, keep)
primal_out_layouts = keep_where(primal_out_layouts, keep)
_, kept_res = split_list(keep, [num_primals_out])
num_kept_residuals = sum(kept_res)
del keep, kept_res, num_primals_out
# Output-to-output forwarding.
num_primals_out = len(primal_jaxpr.out_avals) - num_kept_residuals
out_vars, res_vars = split_list(primal_jaxpr.jaxpr.outvars, [num_primals_out])
idx_map = {id(v): i for i, v in enumerate(out_vars)}
out_fwd = [None] * num_primals_out + [idx_map.get(id(v)) for v in res_vars]
keep = [f is None for f in out_fwd]
primal_jaxpr = pe.prune_closed_jaxpr_outputs(primal_jaxpr, keep)
primal_out_shardings = keep_where(primal_out_shardings, keep)
primal_out_layouts = keep_where(primal_out_layouts, keep)
del keep
tangent_avals_out = [a.to_tangent_aval() for a in jaxpr.out_avals]
def tangent_fun(residuals, *tangents):
tangents_nz = _filter_zeros(nzs, tangents)
nz_tangents_out = jit_p.bind(
*residuals, *tangents_nz, jaxpr=tangent_jaxpr,
in_shardings=res_shardings_in + _filter_zeros(nzs, in_shardings),
out_shardings=_filter_zeros(nzs_out, out_shardings),
in_layouts=res_layouts_in + _filter_zeros(nzs, in_layouts),
out_layouts=_filter_zeros(nzs_out, out_layouts),
donated_invars=res_donated + _filter_zeros(nzs, donated_invars),
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
nz_tangents_out_ = iter(nz_tangents_out)
tangents_out = [next(nz_tangents_out_) if nz else ad.Zero(aval)
for (aval, nz) in zip(tangent_avals_out, nzs_out)]
return tangents_out
def _filter_zeros(is_nz_l, l):
return tuple(x for nz, x in zip(is_nz_l, l) if nz)
assert len(in_shardings) == len(primal_jaxpr.in_avals)
ans = jit_p.bind(*primals_in, jaxpr=primal_jaxpr,
in_shardings=in_shardings,
out_shardings=primal_out_shardings,
in_layouts=in_layouts,
out_layouts=primal_out_layouts,
donated_invars=donated_invars,
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
ans = subs_list(out_fwd, ans, ans)
ans = subs_list(in_fwd, primals_in, ans)
primal_ans, residuals_ans = split_list(ans, [len(ans) - num_residuals_out])
residuals_ans = subs_list(in_fwd_res, [*jaxpr.consts, *primals_in], residuals_ans)
return primal_ans, nzs_out, residuals_ans, tangent_fun
ad.primitive_linearizations[jit_p] = _pjit_linearize
def _pjit_partial_eval(trace: pe.JaxprTrace,
*in_tracers,
jaxpr: core.ClosedJaxpr, in_shardings, out_shardings,
in_layouts, out_layouts, donated_invars, ctx_mesh,
name, keep_unused, inline, compiler_options_kvs):
in_pvals = [t.pval for t in in_tracers]
known_ins = tuple(pv.is_known() for pv in in_pvals)
unknown_ins = tuple(not k for k in known_ins)
known_jaxpr, unknown_jaxpr, unknown_outs, res_out_avals, in_fwd_res = \
pe.partial_eval_jaxpr_nounits_fwd(jaxpr, unknown_ins, instantiate=False)
unknown_outs = tuple(unknown_outs) # type: ignore[assignment]
known_outs = tuple(not uk for uk in unknown_outs)
# out_shardings and out_layouts for residual values output by known_jaxpr
def keep_where(l, should_keep):
return tuple(x for x, keep in zip(l, should_keep) if keep)
known_out_shardings = (keep_where(out_shardings, known_outs)
+ (UNSPECIFIED,) * len(res_out_avals))
known_out_layouts = (keep_where(out_layouts, known_outs)
+ (None,) * len(res_out_avals))
# Input-to-output forwarding: compute which outputs are just forwarded inputs.
num_out_primals = len(known_jaxpr.out_avals) - len(res_out_avals)
in_fwd: list[int | None] = pe._jaxpr_forwarding(known_jaxpr.jaxpr)
in_fwd_primal, in_fwd_res_ = split_list(in_fwd, [num_out_primals])
assert all(f is None for f in in_fwd_res_)
in_fwd = [
fwd if isinstance(os, UnspecifiedValue) and ol is None else None
for os, ol, fwd in zip(
keep_where(out_shardings, known_outs),
keep_where(out_layouts, known_outs), in_fwd_primal)
] + in_fwd_res_
del in_fwd_primal, in_fwd_res_
# Prune jaxpr outputs and out_shardings by removing the input-forwards.
keep = [f is None for f in in_fwd]
known_jaxpr = pe.prune_closed_jaxpr_outputs(known_jaxpr, keep)
known_out_shardings = keep_where(known_out_shardings, keep)
known_out_layouts = keep_where(known_out_layouts, keep)
# Update num_out_primals to reflect pruning.
kept_primals, kept_res = split_list(keep, [num_out_primals])
num_out_primals = sum(kept_primals)
del keep, kept_primals, kept_res
# Output-to-output forwarding: compute which residuals are just primal outputs
out_vars, res_vars = split_list(known_jaxpr.jaxpr.outvars, [num_out_primals])
idx_map = {id(v): i for i, v in enumerate(out_vars)}
out_fwd = [None] * num_out_primals + [idx_map.get(id(v)) for v in res_vars]
# Prune jaxpr outputs and out_shardings by removing forwarded residuals.
keep = [f is None for f in out_fwd]
known_jaxpr = pe.prune_closed_jaxpr_outputs(known_jaxpr, keep)
known_out_shardings = keep_where(known_out_shardings, keep)
known_out_layouts = keep_where(known_out_layouts, keep)
del keep
known_params = dict(
jaxpr=known_jaxpr, in_shardings=keep_where(in_shardings, known_ins),
out_shardings=known_out_shardings,
in_layouts=keep_where(in_layouts, known_ins),
out_layouts=known_out_layouts,
donated_invars=keep_where(donated_invars, known_ins),
ctx_mesh=ctx_mesh,
name=name, keep_unused=keep_unused, inline=inline,
compiler_options_kvs=compiler_options_kvs)
assert len(known_params['out_shardings']) == len(known_params['jaxpr'].out_avals)
assert len(known_params['out_layouts']) == len(known_params['jaxpr'].out_avals)
# Bind known things to pjit_p.
known_inputs = [pv.get_known() for pv in in_pvals if pv.is_known()]
all_known_outs = jit_p.bind(*known_inputs, **known_params)
# Add back in the output fwds.
all_known_outs = subs_list(out_fwd, all_known_outs, all_known_outs)
# Add back in the input fwds.
all_known_outs = subs_list(in_fwd, known_inputs, all_known_outs)
known_out_vals, residual_vals = \
split_list(all_known_outs, [len(all_known_outs) - len(res_out_avals)])
residual_vals_ = iter(residual_vals)
residual_vals = [next(residual_vals_) if f is None
else [*jaxpr.consts, *known_inputs][f] for f in in_fwd_res]
assert next(residual_vals_, None) is None
residual_tracers = map(trace.new_instantiated_const, residual_vals)
# The convention of partial_eval_jaxpr_nounits is to place residual binders at
# the front of the jaxpr produced, so we move them to the back since both the
# jaxpr equation built below and the pjit transpose rule assume a
# residual-inputs-last convention.
unknown_jaxpr = pe.move_binders_to_back(
unknown_jaxpr, [True] * len(residual_vals) + [False] * sum(unknown_ins))
# Set up staged-out 'unknown' eqn
unknown_in_shardings = (keep_where(in_shardings, unknown_ins)
+ (UNSPECIFIED,) * len(residual_tracers))
unknown_in_layouts = (keep_where(in_layouts, unknown_ins)
+ (None,) * len(residual_tracers))
unknown_donated_invars = (keep_where(donated_invars, unknown_ins)
+ (False,) * len(residual_tracers))
unknown_params = dict(
jaxpr=unknown_jaxpr,
in_shardings=unknown_in_shardings,
in_layouts=unknown_in_layouts,
out_shardings=keep_where(out_shardings, unknown_outs),
out_layouts=keep_where(out_layouts, unknown_outs),
donated_invars=unknown_donated_invars,
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
unknown_tracers_in = [t for t in in_tracers if not t.pval.is_known()]
unknown_out_avals = unknown_jaxpr.out_avals
unknown_tracers_out = [
pe.JaxprTracer(trace, pe.PartialVal.unknown(aval), None)
for aval in unknown_out_avals
]
unknown_tracers_in = [*unknown_tracers_in, *residual_tracers]
eqn = pe.new_eqn_recipe(trace, unknown_tracers_in,
unknown_tracers_out,
jit_p,
unknown_params,
unknown_jaxpr.effects,
source_info_util.current())
for t in unknown_tracers_out: t.recipe = eqn
if effects.partial_eval_kept_effects.filter_in(unknown_jaxpr.effects):
trace.effect_handles.append(pe.EffectHandle(unknown_tracers_in, eqn)) # type: ignore
return merge_lists(unknown_outs, known_out_vals, unknown_tracers_out)
pe.custom_partial_eval_rules[jit_p] = _pjit_partial_eval
def _pjit_partial_eval_custom_params_updater(
unks_in: Sequence[bool], inst_in: Sequence[bool],
kept_outs_known: Sequence[bool], kept_outs_staged: Sequence[bool],
num_res_out: int, num_res_in: int, params_known: dict, params_staged: dict
) -> tuple[dict, dict]:
# prune inputs to jaxpr_known according to unks_in
donated_invars_known, _ = pe.partition_list(unks_in, params_known['donated_invars'])
in_shardings_known, _ = pe.partition_list(unks_in, params_known['in_shardings'])
_, out_shardings_known = pe.partition_list(kept_outs_known, params_known['out_shardings'])
in_layouts_known, _ = pe.partition_list(unks_in, params_known['in_layouts'])
_, out_layouts_known = pe.partition_list(kept_outs_known, params_known['out_layouts'])
new_params_known = dict(params_known,
in_shardings=tuple(in_shardings_known),
out_shardings=(*out_shardings_known,
*[UNSPECIFIED] * num_res_out),
in_layouts=tuple(in_layouts_known),
out_layouts=(*out_layouts_known, *[None] * num_res_out),
donated_invars=tuple(donated_invars_known))
assert len(new_params_known['in_shardings']) == len(params_known['jaxpr'].in_avals)
assert len(new_params_known['out_shardings']) == len(params_known['jaxpr'].out_avals)
assert len(new_params_known['in_layouts']) == len(params_known['jaxpr'].in_avals)
assert len(new_params_known['out_layouts']) == len(params_known['jaxpr'].out_avals)
# added num_res new inputs to jaxpr_staged, and pruning according to inst_in
_, donated_invars_staged = pe.partition_list(inst_in, params_staged['donated_invars'])
donated_invars_staged = [False] * num_res_in + donated_invars_staged
_, in_shardings_staged = pe.partition_list(inst_in, params_staged['in_shardings'])
in_shardings_staged = [*[UNSPECIFIED] * num_res_in, *in_shardings_staged]
_, out_shardings_staged = pe.partition_list(kept_outs_staged, params_staged['out_shardings'])
_, in_layouts_staged = pe.partition_list(inst_in, params_staged['in_layouts'])
in_layouts_staged = [*[None] * num_res_in, *in_layouts_staged]
_, out_layouts_staged = pe.partition_list(kept_outs_staged, params_staged['out_layouts'])
new_params_staged = dict(params_staged,
in_shardings=tuple(in_shardings_staged),
out_shardings=tuple(out_shardings_staged),
in_layouts=tuple(in_layouts_staged),
out_layouts=tuple(out_layouts_staged),
donated_invars=tuple(donated_invars_staged))
assert len(new_params_staged['in_shardings']) == len(params_staged['jaxpr'].in_avals)
assert len(new_params_staged['out_shardings']) == len(params_staged['jaxpr'].out_avals)
assert len(new_params_staged['in_layouts']) == len(params_staged['jaxpr'].in_avals)
assert len(new_params_staged['out_layouts']) == len(params_staged['jaxpr'].out_avals)
return new_params_known, new_params_staged
pe.partial_eval_jaxpr_custom_rules[jit_p] = \
partial(pe.closed_call_partial_eval_custom_rule, 'jaxpr',
_pjit_partial_eval_custom_params_updater)
@lu.cache
def _pjit_transpose_trace(fun: lu.WrappedFun,
in_avals: Sequence[core.AbstractValue]):
transpose_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(fun, in_avals)
transpose_jaxpr = core.ClosedJaxpr(transpose_jaxpr, consts)
return transpose_jaxpr
def _pjit_transpose(cts_in, *primals_in,
jaxpr: core.ClosedJaxpr,
in_shardings, out_shardings, in_layouts, out_layouts,
donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs):
def prune_type(ty, xs, maybe_zeros):
return tuple(x for x, mz in zip(xs, maybe_zeros) if type(mz) is not ty)
dbg = jaxpr.jaxpr.debug_info.with_unknown_names()
body = lu.wrap_init(ad.closed_backward_pass, debug_info=dbg)
body = lu.hashable_partial(body, jaxpr, False)
primals_and_nz_cts_in, in_treedef = tree_flatten((primals_in, cts_in))
body, cts_out_treedef_thunk = flatten_fun_nokwargs(body, in_treedef)
transpose_in_shardings = (
*prune_type(ad.UndefinedPrimal, in_shardings, primals_in),
*prune_type(ad.Zero, out_shardings, cts_in)
)
transpose_in_layouts = (
*prune_type(ad.UndefinedPrimal, in_layouts, primals_in),
*prune_type(ad.Zero, out_layouts, cts_in)
)
global_cts_in_avals = tuple(
core.AvalQDD(a, cur_qdd(x)) if (a := typeof(x)).has_qdd else a
for x in primals_and_nz_cts_in)
transpose_jaxpr = _pjit_transpose_trace(body, global_cts_in_avals)
cts_out_treedef = cts_out_treedef_thunk()
transpose_out_shardings = prune_type(
ad.Zero,
in_shardings,
tree_unflatten(cts_out_treedef, [object()] * cts_out_treedef.num_leaves))
transpose_out_layouts = prune_type(
ad.Zero,
in_layouts,
tree_unflatten(cts_out_treedef, [object()] * cts_out_treedef.num_leaves))
try:
nz_cts_out = jit_p.bind(
*primals_and_nz_cts_in,
jaxpr=transpose_jaxpr,
in_shardings=transpose_in_shardings,
out_shardings=transpose_out_shardings,
in_layouts=transpose_in_layouts,
out_layouts=transpose_out_layouts,
donated_invars=(False,) * len(primals_and_nz_cts_in),
ctx_mesh=ctx_mesh,
name=name,
keep_unused=keep_unused,
inline=inline,
compiler_options_kvs=compiler_options_kvs)
except api_util.InternalFloatingPointError as e:
print("Invalid nan value encountered in the backward pass of a jax.jit "
"function. Calling the de-optimized backward pass.")
try:
_ = ad.closed_backward_pass(jaxpr, None, primals_in, cts_in)
except (FloatingPointError, ZeroDivisionError) as e2:
raise e2 from None # great
else:
# If control reaches this line, we got a NaN on the output of `compiled`
# but not `fun.call_wrapped` on the same arguments. Let's tell the user.
api_util._raise_no_nan_in_deoptimized(e)
return tree_unflatten(cts_out_treedef, nz_cts_out)
ad.primitive_transposes[jit_p] = _pjit_transpose
def _pjit_transpose_fancy(
cts_in, *args, jaxpr, in_shardings, out_shardings, in_layouts,
out_layouts, donated_invars, ctx_mesh, name, keep_unused, inline,
compiler_options_kvs):
primals_ctrefs, specs = ad.project_accums(args)
in_flat, in_tree = tree_flatten((primals_ctrefs, cts_in))
in_avals = [core.AvalQDD(a, cur_qdd(x)) if (a := typeof(x)).has_qdd # type: ignore
else a for x in in_flat]
trans_jaxpr, out_tree = _transpose_jaxpr_fancy(jaxpr, in_tree, (*in_avals,), specs)
trans_in_shardings = (
[s for x, s in zip(args, in_shardings) if not isinstance(x,ad.ValAccum)] +
[s for x, s in zip(cts_in, out_shardings) if not isinstance(x, ad.Zero)])
trans_in_layouts = (
[l for x, l in zip(args, in_layouts) if not isinstance(x, ad.ValAccum)] +
[l for x, l in zip(cts_in, out_layouts) if not isinstance(x, ad.Zero)])
cts_out_ = tree_unflatten(out_tree, trans_jaxpr.out_avals)
trans_out_shardings = tuple(s for x, s in zip(cts_out_, in_shardings)
if isinstance(x, core.AbstractValue))
trans_out_layouts = tuple(l for x, l in zip(cts_out_, in_layouts )
if isinstance(x, core.AbstractValue))
try:
cts_out = jit_p.bind(
*in_flat, jaxpr=trans_jaxpr, in_shardings=tuple(trans_in_shardings),
in_layouts=tuple(trans_in_layouts), out_shardings=trans_out_shardings,
out_layouts=trans_out_layouts, donated_invars=(False,) * len(in_flat),
ctx_mesh=ctx_mesh, name=name, keep_unused=keep_unused, inline=inline,
compiler_options_kvs=compiler_options_kvs)
except api_util.InternalFloatingPointError as e:
print("Invalid nan value encountered in the backward pass of a jax.jit "
"function. Calling the de-optimized backward pass.")
try:
ad.backward_pass3(jaxpr.jaxpr, False, jaxpr.consts, args, cts_in)
except (FloatingPointError, ZeroDivisionError) as e2:
raise e2 from None # great
else:
# If control reaches this line, we got a NaN on the output of `compiled`
# but not `fun.call_wrapped` on the same arguments. Let's tell the user.
api_util._raise_no_nan_in_deoptimized(e)
for x, ct in zip(args, tree_unflatten(out_tree, cts_out)):
if isinstance(x, ad.ValAccum): x.accum(ct)
@weakref_lru_cache
def _transpose_jaxpr_fancy(jaxpr, in_tree, in_avals, specs):
cell = lambda: None
def transposed(*in_flat):
primals_ctrefs, cts_in = tree_unflatten(in_tree, in_flat)
args = ad.unproject_accums(specs, primals_ctrefs)
ad.backward_pass3(jaxpr.jaxpr, False, jaxpr.consts, args, cts_in)
cts_out = [x.freeze() if isinstance(x, ad.ValAccum) else None for x in args]
cts_out, cell.out_tree = tree_flatten(cts_out) # type: ignore
return cts_out
dbg = jaxpr.jaxpr.debug_info.with_unknown_names()
trans_jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(
lu.wrap_init(transposed, debug_info=dbg), in_avals)
return core.ClosedJaxpr(trans_jaxpr, consts), cell.out_tree # type: ignore
ad.fancy_transposes[jit_p] = _pjit_transpose_fancy
@weakref_lru_cache
def _dce_jaxpr_pjit(
jaxpr: core.ClosedJaxpr, used_outputs: tuple[bool, ...]
) -> tuple[core.ClosedJaxpr, list[bool]]:
new_jaxpr, used_inputs = pe.dce_jaxpr(jaxpr.jaxpr, used_outputs)
return core.ClosedJaxpr(new_jaxpr, jaxpr.consts), used_inputs
def dce_jaxpr_pjit_rule(used_outputs: list[bool], eqn: core.JaxprEqn
) -> tuple[list[bool], core.JaxprEqn | None]:
if not any(used_outputs) and not pe.has_effects(eqn):
return [False] * len(eqn.invars), None
dced_jaxpr, used_inputs = _dce_jaxpr_pjit(
eqn.params['jaxpr'], tuple(used_outputs))
def keep_where(xs, keeps):
return tuple(x for x, keep in zip(xs, keeps) if keep)
eqn_params = eqn.params
new_params = dict(
eqn_params,
jaxpr=dced_jaxpr,
in_shardings=keep_where(eqn_params["in_shardings"], used_inputs),
out_shardings=keep_where(eqn_params["out_shardings"], used_outputs),
in_layouts=keep_where(eqn_params["in_layouts"], used_inputs),
out_layouts=keep_where(eqn_params["out_layouts"], used_outputs),
donated_invars=keep_where(eqn_params["donated_invars"], used_inputs),
)
if not any(used_inputs) and not any(used_outputs) and not dced_jaxpr.effects:
return used_inputs, None
else:
new_effs = core.eqn_effects(dced_jaxpr)
new_eqn = core.new_jaxpr_eqn(
[v for v, used in zip(eqn.invars, used_inputs) if used],
[v for v, used in zip(eqn.outvars, used_outputs) if used],
eqn.primitive, new_params, new_effs, eqn.source_info, eqn.ctx)
return used_inputs, new_eqn
pe.dce_rules[jit_p] = dce_jaxpr_pjit_rule
def _pjit_pp_rule(eqn: core.JaxprEqn,
context: core.JaxprPpContext,
settings: core.JaxprPpSettings) -> core.pp.Doc:
params = dict(eqn.params)
del params['inline']
if not any(params['donated_invars']):
del params['donated_invars']
if all(isinstance(s, UnspecifiedValue) for s in params['in_shardings']):
del params['in_shardings']
if all(isinstance(s, UnspecifiedValue) for s in params['out_shardings']):
del params['out_shardings']
if all(l is None for l in params['in_layouts']):
del params['in_layouts']
if all(l is None for l in params['out_layouts']):
del params['out_layouts']
if not params['keep_unused']:
del params['keep_unused']
if params['ctx_mesh'].empty:
del params['ctx_mesh']
if not params['compiler_options_kvs']:
del params['compiler_options_kvs']
if params['jaxpr'].jaxpr not in context.shared_jaxprs:
context.suggest_same_var_names(params['jaxpr'].jaxpr.invars, eqn.invars)
context.suggest_same_var_names(params['jaxpr'].jaxpr.outvars, eqn.outvars)
# Move name= to the front to make the resulting equation easier to scan.
del params["name"]
return core._pp_eqn(eqn, context, settings, params=["name"] + sorted(params))
core.pp_eqn_rules[jit_p] = _pjit_pp_rule
# -------------------- with_sharding_constraint --------------------
def check_shardings_are_auto(s: Sharding) -> None:
if not isinstance(s, NamedSharding):
return
mesh = s.mesh.abstract_mesh
if not all(mesh._name_to_type[i] == mesh_lib.AxisType.Auto
for axes in s.spec
if axes is not PartitionSpec.UNCONSTRAINED and axes is not None
for i in (axes if isinstance(axes, tuple) else (axes,))):
raise ValueError(
'The spec of NamedSharding passed to with_sharding_constraint can'
f' only refer to Auto axes of the mesh. Got spec={s.spec} and'
f' mesh={mesh}. You probably meant to use `reshard` API?')
def assert_shardings_equal(x_aval, user_sharding: NamedSharding):
x_spec = x_aval.sharding.spec
user_spec = user_sharding.spec._normalized_spec_for_aval(x_aval.ndim)
if config.remove_size_one_mesh_axis_from_type.value:
user_spec = core.remove_size_one_mesh_axis(user_spec, user_sharding.mesh)
for x, s in zip(x_spec, user_spec):
if s is PartitionSpec.UNCONSTRAINED:
continue
else:
if x != s:
raise AssertionError(
'`with_sharding_constraint` acts as an assert when all axes of'
f' mesh are of type `Explicit`. The array sharding: {x_spec} did'
f' not match the sharding provided: {user_spec}. Please use'
' `jax.sharding.reshard` to shard your input to the sharding you'
' want.')
def with_sharding_constraint(x, shardings):
"""Mechanism to constrain the sharding of an Array inside a jitted computation
This is a strict constraint for the GSPMD partitioner and not a hint. For examples
of how to use this function, see `Distributed arrays and automatic parallelization`_.
Inside of a jitted computation, with_sharding_constraint makes it possible to
constrain intermediate values to an uneven sharding. However, if such an
unevenly sharded value is output by the jitted computation, it will come out
as fully replicated, no matter the sharding annotation given.
Args:
x: PyTree of jax.Arrays which will have their shardings constrained
shardings: PyTree of sharding specifications. Valid values are the same as for
the ``in_shardings`` argument of :func:`jax.experimental.pjit`.
Returns:
x_with_shardings: PyTree of jax.Arrays with specified sharding constraints.
.. _Distributed arrays and automatic parallelization: https://docs.jax.dev/en/latest/notebooks/Distributed_arrays_and_automatic_parallelization.html
"""
x_flat, tree = tree_flatten(x)
x_avals_flat = [core.shaped_abstractify(x) for x in x_flat]
layouts, shardings = _split_layout_and_sharding(shardings)
user_shardings = prepare_axis_resources(
shardings, "shardings", allow_unconstrained_dims=True)
del shardings
user_shardings_flat = tuple(
flatten_axes("with_sharding_constraint shardings", tree, user_shardings))
del user_shardings
user_layouts_flat = tuple(
flatten_axes("with_sharding_constraint layouts", tree, layouts))
del layouts
if not mesh_lib.get_concrete_mesh().empty:
context_mesh = mesh_lib.get_abstract_mesh()
elif not mesh_lib.get_abstract_mesh().empty:
context_mesh = mesh_lib.get_abstract_mesh()
else:
context_mesh = mesh_lib.thread_resources.env.physical_mesh
shardings_flat = [_create_sharding_for_array(context_mesh, a, 'shardings',
'with_sharding_constraint')
for a in user_shardings_flat]
for s, u in zip(shardings_flat, user_shardings_flat):
if isinstance(s, (UnspecifiedValue, AUTO)):
raise ValueError(
f'One of with_sharding_constraint arguments got sharding {u} which is'
' not allowed. Please only pass `jax.sharding.Sharding` instances.')
del user_shardings_flat
# TODO(bartchr): remove `unconstrained_dims` after migrating to Shardy. It's
# already part of the shardings.
unconstrained_dims = [get_unconstrained_dims(s)
if isinstance(s, NamedSharding) else frozenset()
for s in shardings_flat]
pjit_check_aval_sharding(
shardings_flat, x_avals_flat, ("",) * len(shardings_flat),
"with_sharding_constraint arguments",
allow_uneven_sharding=True, allow_partial_manual=True)
check_aval_layout_compatibility(user_layouts_flat, x_avals_flat,
("",) * len(user_layouts_flat),
"with_sharding_constraint arguments")
outs = []
for xf, x_aval, s, l, ud in zip(x_flat, x_avals_flat, shardings_flat,
user_layouts_flat, unconstrained_dims):
if (mesh_lib.get_abstract_mesh().are_all_axes_explicit and l is None and
isinstance(s, NamedSharding)):
assert_shardings_equal(x_aval, s)
outs.append(xf)
else:
check_shardings_are_auto(s)
outs.append(sharding_constraint_p.bind(
xf, sharding=s, layout=l, context_mesh=context_mesh,
unconstrained_dims=ud))
return tree_unflatten(tree, outs)
def _identity_fn(x): return x
def _sharding_constraint_impl(x, sharding, layout, context_mesh,
unconstrained_dims):
if (isinstance(sharding, NamedSharding) and
isinstance(sharding.mesh, AbstractMesh)):
if (not context_mesh.empty and isinstance(context_mesh, AbstractMesh) and
not hasattr(x, 'sharding')):
concrete_mesh = mesh_lib.get_concrete_mesh()
assert not concrete_mesh.empty
sharding = NamedSharding(concrete_mesh, sharding.spec)
else:
aval = core.shaped_abstractify(x)
if not hasattr(x, 'sharding'):
raise ValueError(
'Target sharding contains a `jax.sharding.AbstractMesh` which'
' requires the input passed should be a `jax.Array`. Got'
f' {type(x)} with shape {aval.str_short()}')
if not isinstance(x.sharding, NamedSharding):
raise TypeError(
'The sharding on the input must be a `NamedSharding` since the'
' target sharding has an `AbstractMesh` in it. Got sharding type'
f' {type(x.sharding)} for shape {aval.str_short()}')
if x.sharding.mesh.shape_tuple != sharding.mesh.shape_tuple:
raise ValueError(
f'Mesh shape of the input {x.sharding.mesh.shape_tuple} does not'
' match the mesh shape of the target sharding'
f' {sharding.mesh.shape_tuple} for shape {aval.str_short()}')
sharding = NamedSharding(x.sharding.mesh, sharding.spec)
if layout is None:
# Run a jit here to raise good errors when device assignment don't match.
return api.jit(_identity_fn, out_shardings=sharding)(x)
else:
return api.jit(_identity_fn, out_shardings=Format(layout, sharding))(x)
sharding_constraint_p = core.Primitive("sharding_constraint")
sharding_constraint_p.def_impl(_sharding_constraint_impl)
ad.deflinear2(sharding_constraint_p,
lambda ct, _, **params: (sharding_constraint_p.bind(ct, **params),))
def _sharding_constraint_abstract_eval(
x_aval, *, sharding, layout, context_mesh, unconstrained_dims):
if isinstance(sharding, NamedSharding):
return x_aval.update(
sharding=x_aval.sharding.update(mesh=sharding.mesh.abstract_mesh))
return x_aval.update(sharding=None)
sharding_constraint_p.def_abstract_eval(_sharding_constraint_abstract_eval)
def _sharding_constraint_hlo_lowering(ctx, x_node, *, sharding, layout,
context_mesh, unconstrained_dims):
in_aval, = ctx.avals_in
out_aval, = ctx.avals_out
axis_ctx = ctx.module_context.axis_context
if (isinstance(sharding, NamedSharding) and
any(o is not None for o in out_aval.sharding.spec)):
spec = sharding.spec._normalized_spec_for_aval(in_aval.ndim)
new_spec = []
for user_spec, aval_spec in zip(spec, out_aval.sharding.spec):
if aval_spec is None:
new_spec.append(user_spec)
else:
aval_spec = aval_spec if isinstance(aval_spec, tuple) else (aval_spec,)
if user_spec is PartitionSpec.UNCONSTRAINED:
raise NotImplementedError
if user_spec is None:
new_spec.append(aval_spec)
elif isinstance(user_spec, tuple):
new_spec.append(aval_spec + user_spec)
else:
new_spec.append(aval_spec + (user_spec,))
sharding = sharding.update(spec=new_spec)
if dtypes.issubdtype(in_aval.dtype, dtypes.extended):
in_aval = core.physical_aval(in_aval)
if (isinstance(axis_ctx, sharding_impls.SPMDAxisContext) and
axis_ctx.manual_axes):
sharding = mlir.add_manual_axes(axis_ctx, sharding, in_aval.ndim)
if config.use_shardy_partitioner.value:
sharding = sharding._to_sdy_sharding(in_aval.ndim)
else:
sharding = sharding._to_xla_hlo_sharding(in_aval.ndim).to_proto()
out = mlir.wrap_with_sharding_op(
ctx, x_node, out_aval, sharding, unspecified_dims=unconstrained_dims)
if layout is not None:
out = mlir.wrap_with_layout_op(ctx, out, out_aval, layout, in_aval)
return [out]
mlir.register_lowering(sharding_constraint_p,
_sharding_constraint_hlo_lowering)
def _sharding_constraint_batcher(
axis_data, vals_in, dims_in, sharding, layout, context_mesh,
unconstrained_dims):
if axis_data.spmd_name is not None and isinstance(sharding, NamedSharding):
used = {n for ns in sharding.spec
for n in (ns if isinstance(ns, tuple) else (ns,))}
if set(axis_data.spmd_name) & used:
raise ValueError(f"vmap spmd_axis_name {axis_data.spmd_name} cannot appear in "
"with_sharding_constraint spec, but got spec "
f"{sharding.spec}")
x, = vals_in
d, = dims_in
unconstrained_dims = {ud + (d <= ud) for ud in unconstrained_dims}
if axis_data.spmd_name is None:
unconstrained_dims.add(d)
vmapped_sharding = _pjit_batcher_for_sharding(
sharding, d, axis_data.spmd_name, context_mesh, x.ndim)
if unconstrained_dims and isinstance(vmapped_sharding, NamedSharding):
new_spec = list(vmapped_sharding.spec) + [None] * (x.ndim - len(vmapped_sharding.spec))
for u in unconstrained_dims:
new_spec[u] = PartitionSpec.UNCONSTRAINED
vmapped_sharding = NamedSharding(
vmapped_sharding.mesh, PartitionSpec(*new_spec))
vmapped_layout = (get_layout_for_vmap(d, layout) if layout is not None else
layout)
y = sharding_constraint_p.bind(
x,
sharding=vmapped_sharding,
layout=vmapped_layout,
context_mesh=context_mesh,
unconstrained_dims=frozenset(unconstrained_dims))
return y, d
batching.fancy_primitive_batchers[sharding_constraint_p] = _sharding_constraint_batcher
batching.skippable_batchers[sharding_constraint_p] = lambda _: ()
# -------------------- reshard ------------------------------------
def reshard(xs, out_shardings):
x_flat, treedef = tree_flatten(xs)
shardings_flat = flatten_axis_resources(
"reshard out_shardings", treedef, out_shardings, tupled_args=True)
x_avals_flat = [core.shaped_abstractify(x) for x in x_flat]
out_flat = []
for x, x_aval, s in safe_zip(x_flat, x_avals_flat, shardings_flat):
ds = canonicalize_sharding(s, 'reshard', check_mesh_consistency=False)
if ds is None:
raise ValueError(
'Reshard should only be used with out_shardings which are non-None '
f'and have a nonempty mesh. Got sharding {s}.'
)
ds = ds.update(spec=ds.spec._normalized_spec_for_aval(x_aval.ndim)) # pytype: disable=attribute-error
out_flat.append(reshard_p.bind(x, dst_sharding=ds))
return tree_unflatten(treedef, out_flat)
reshard_p = core.Primitive('reshard')
reshard_p.skip_canonicalization = True
def _reshard_abstract_eval(aval, dst_sharding):
assert isinstance(aval, core.ShapedArray)
if aval.sharding == dst_sharding:
return aval
return aval.update(sharding=dst_sharding)
reshard_p.def_abstract_eval(_reshard_abstract_eval)
def _reshard_impl(x, dst_sharding):
return dispatch.apply_primitive(reshard_p, x, dst_sharding=dst_sharding)
reshard_p.def_impl(_reshard_impl)
def _reshard_transpose_rule(ct, x, dst_sharding):
assert ad.is_undefined_primal(x)
out_sharding = x.aval.to_cotangent_aval().sharding
with mesh_lib.use_abstract_mesh(out_sharding.mesh):
x_bar = reshard_p.bind(ct, dst_sharding=out_sharding)
return [x_bar]
ad.deflinear2(reshard_p, _reshard_transpose_rule)
def _reshard_transpose_fancy(ct, x, dst_sharding):
assert isinstance(x, ad.GradAccum)
out_sharding = x.aval.to_cotangent_aval().sharding
with mesh_lib.use_abstract_mesh(out_sharding.mesh):
x_bar = reshard_p.bind(ct, dst_sharding=out_sharding)
x.accum(x_bar)
ad.fancy_transposes[reshard_p] = _reshard_transpose_fancy
def _reshard_hlo_lowering(ctx, x_node, *, dst_sharding):
aval_in, = ctx.avals_in
aval_out, = ctx.avals_out
if dtypes.issubdtype(aval_in.dtype, dtypes.extended):
aval_in = core.physical_aval(aval_in)
proto = (dst_sharding._to_sdy_sharding(aval_in.ndim)
if config.use_shardy_partitioner.value else
dst_sharding._to_xla_hlo_sharding(aval_in.ndim).to_proto())
return [mlir.lower_with_sharding_in_types(ctx, x_node, aval_out, proto)]
mlir.register_lowering(reshard_p, _reshard_hlo_lowering)
def _reshard_batcher(axis_data, vals_in, dims_in, dst_sharding):
x, = vals_in
d, = dims_in
vmapped_dst_sharding = batching.get_sharding_for_vmap(
axis_data, dst_sharding, d)
y = reshard_p.bind(x, dst_sharding=vmapped_dst_sharding)
return y, d
batching.fancy_primitive_batchers[reshard_p] = _reshard_batcher
batching.skippable_batchers[reshard_p] = lambda _: ()
# -------------------- auto and user mode -------------------------
def _get_new_mesh(axes: str | tuple[str, ...] | None,
axis_type: mesh_lib.AxisType, name: str, shardings=None):
cur_mesh = mesh_lib.get_abstract_mesh()
flat_shardings, _ = tree_flatten(shardings)
sharding_mesh = mesh_lib.empty_abstract_mesh
for i in flat_shardings:
if isinstance(i, NamedSharding):
if not sharding_mesh.empty and sharding_mesh != i.mesh.abstract_mesh:
raise ValueError(
f'Shardings passed to {name} should have the same mesh. Got one'
f' mesh {sharding_mesh} and another {i.mesh}')
sharding_mesh = i.mesh.abstract_mesh
if sharding_mesh.empty and cur_mesh.empty:
raise ValueError(
f'Context mesh {cur_mesh} cannot be empty. Please use'
' `jax.set_mesh` API to enter into a mesh context when using'
f' `{name}` API.')
if not sharding_mesh.empty and not cur_mesh.empty:
if sharding_mesh != cur_mesh:
raise ValueError(
f'Context mesh {cur_mesh} must match the mesh passed to shardings'
f' {sharding_mesh}. Recommended approach is to use'
' `jax.set_mesh` context manager.')
mesh_to_use = cur_mesh
elif sharding_mesh.empty and not cur_mesh.empty:
mesh_to_use = cur_mesh
else:
assert not sharding_mesh.empty and cur_mesh.empty
mesh_to_use = sharding_mesh
if axes is None:
axes = mesh_to_use.axis_names
if not isinstance(axes, tuple):
axes = (axes,)
for a in axes:
if (mesh_to_use._name_to_type[a] == mesh_lib.AxisType.Manual and
axis_type in {mesh_lib.AxisType.Auto, mesh_lib.AxisType.Explicit}):
raise NotImplementedError(
'Going from `Manual` AxisType to `Auto` or `Explicit` AxisType is not'
' allowed. Please file a bug at https://github.com/jax-ml/jax/issues'
' with your use case')
return (mesh_to_use.update_axis_types({a: axis_type for a in axes}),
mesh_to_use, axes)
def auto_axes(f=None, /, *, axes: str | tuple[str, ...] | None = None,
out_sharding=None):
kwargs = dict(axes_=axes, out_sharding=out_sharding)
if f is None:
return lambda g: _auto_axes(g, **kwargs)
return _auto_axes(f, **kwargs)
def _auto_axes(fun, *, axes_, out_sharding):
@wraps(fun)
def decorator(*args, **kwargs):
if out_sharding is None:
if "out_sharding" in kwargs:
_out_sharding = kwargs.pop("out_sharding")
else:
raise TypeError("Missing required keyword argument: 'out_sharding'")
else:
_out_sharding = out_sharding
new_mesh, prev_mesh, axes = _get_new_mesh(
axes_, mesh_lib.AxisType.Auto, 'auto_axes', shardings=_out_sharding)
if set(prev_mesh.auto_axes) == set(axes):
return fun(*args, **kwargs)
with mesh_lib.use_abstract_mesh(new_mesh):
in_specs = tree_map(lambda a: core.modify_spec_for_auto_manual(
core.get_aval(a).sharding.spec, new_mesh), args)
args = reshard(args, in_specs)
out = fun(*args, **kwargs)
return reshard(out, _out_sharding)
return decorator
def explicit_axes(f=None, /, *, axes: str | tuple[str, ...] | None = None,
in_sharding=None):
kwargs = dict(axes=axes, in_sharding=in_sharding)
if f is None:
return lambda g: _explicit_axes(g, **kwargs)
return _explicit_axes(f, **kwargs)
def _explicit_axes(fun, *, axes, in_sharding):
@wraps(fun)
def decorator(*args, **kwargs):
if in_sharding is None:
if "in_sharding" in kwargs:
_in_sharding = kwargs.pop("in_sharding")
else:
raise TypeError("Missing required keyword argument: 'in_sharding'")
else:
_in_sharding = in_sharding
new_mesh, _, _ = _get_new_mesh(axes, mesh_lib.AxisType.Explicit,
'explicit_axes')
with mesh_lib.use_abstract_mesh(new_mesh):
args = reshard(args, _in_sharding)
out = fun(*args, **kwargs)
out_specs = tree_map(lambda o: core.modify_spec_for_auto_manual(
core.get_aval(o).sharding.spec, mesh_lib.get_abstract_mesh()), out)
return reshard(out, out_specs)
return decorator
# -------------------- with_layout_constraint --------------------
def with_layout_constraint(x, layouts):
x_flat, tree = tree_flatten(x)
x_avals_flat = [core.shaped_abstractify(x) for x in x_flat]
layouts_flat = tuple(flatten_axes("with_layout_constraint layouts", tree,
layouts))
if any(not isinstance(l, Layout) for l in layouts_flat):
raise ValueError(
'layouts passed to `with_layout_constraint` must be of type'
f' `Layout`. Got {[type(l) for l in layouts_flat]}')
check_aval_layout_compatibility(
layouts_flat, x_avals_flat, ("",) * len(layouts_flat),
"with_layout_constraint arguments")
outs = [layout_constraint_p.bind(xf, layout=l)
for xf, l in zip(x_flat, layouts_flat)]
return tree_unflatten(tree, outs)
layout_constraint_p = core.Primitive('layout_constraint')
layout_constraint_p.def_abstract_eval(lambda x, **_: x)
ad.deflinear2(layout_constraint_p,
lambda ct, _, **params: (layout_constraint_p.bind(ct, **params),))
def _layout_constraint_impl(x, *, layout):
if not isinstance(x, xc.ArrayImpl):
raise ValueError(
'with_layout_constraint in eager mode can only be applied to'
f' jax.Arrays. Got {type(x)}')
if x.format.layout == layout: # type: ignore
return x
return api.jit(_identity_fn, out_shardings=Format(layout, x.sharding))(x)
layout_constraint_p.def_impl(_layout_constraint_impl)
def _layout_constraint_hlo_lowering(ctx, x_node, *, layout):
aval, = ctx.avals_in
out_aval, = ctx.avals_out
return [mlir.wrap_with_layout_op(ctx, x_node, out_aval, layout, aval)]
mlir.register_lowering(layout_constraint_p,
_layout_constraint_hlo_lowering)
def _layout_constraint_batcher(axis_data, vals_in, dims_in, layout):
x, = vals_in
d, = dims_in
vmapped_layout = get_layout_for_vmap(d, layout)
y = layout_constraint_p.bind(x, layout=vmapped_layout)
return y, d
batching.fancy_primitive_batchers[layout_constraint_p] = _layout_constraint_batcher
batching.skippable_batchers[layout_constraint_p] = lambda _: ()
# -------------------- helpers --------------------
def get_unconstrained_dims(sharding: NamedSharding):
assert sharding.spec is not None
return frozenset(i for i, axes in enumerate(sharding.spec)
if axes is PartitionSpec.UNCONSTRAINED)
| MetaTy |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial008_py39.py | {
"start": 87,
"end": 248
} | class ____(BaseModel):
url: HttpUrl
name: str
@app.post("/images/multiple/")
async def create_multiple_images(images: list[Image]):
return images
| Image |
python | huggingface__transformers | src/transformers/models/llava_onevision/modular_llava_onevision.py | {
"start": 9214,
"end": 9307
} | class ____(LlavaNextVideoCausalLMOutputWithPast):
pass
| LlavaOnevisionCausalLMOutputWithPast |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_merge_range02.py | {
"start": 315,
"end": 894
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("merge_range02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"align": "center"})
worksheet.merge_range(1, 1, 5, 3, "Foo", cell_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | euske__pdfminer | pdfminer/psparser.py | {
"start": 17277,
"end": 20135
} | class ____(unittest.TestCase):
TESTDATA = br'''%!PS
begin end
" @ #
/a/BCD /Some_Name /foo#5f#xbaa
0 +1 -2 .5 1.234
(abc) () (abc ( def ) ghi)
(def\040\0\0404ghi) (bach\\slask) (foo\nbaa)
(this % is not a comment.)
(foo
baa)
(foo\
baa)
<> <20> < 40 4020 >
<abcd00
12345>
func/a/b{(c)do*}def
[ 1 (z) ! ]
<< /foo (bar) >>
'''
TOKENS = [
(5, KWD(b'begin')), (11, KWD(b'end')), (16, KWD(b'"')), (19, KWD(b'@')),
(21, KWD(b'#')), (23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, b'abc'), (77, b''), (80, b'abc ( def ) ghi'),
(98, b'def \x00 4ghi'), (118, b'bach\\slask'), (132, b'foo\nbaa'),
(143, b'this % is not a comment.'), (170, b'foo\nbaa'), (180, b'foobaa'),
(191, b''), (194, b' '), (199, b'@@ '), (211, b'\xab\xcd\x00\x124\x05'),
(226, KWD(b'func')), (230, LIT('a')), (232, LIT('b')),
(234, KWD(b'{')), (235, b'c'), (238, KWD(b'do*')), (241, KWD(b'}')),
(242, KWD(b'def')), (246, KWD(b'[')), (248, 1), (250, b'z'), (254, KWD(b'!')),
(256, KWD(b']')), (258, KWD(b'<<')), (261, LIT('foo')), (266, b'bar'),
(272, KWD(b'>>'))
]
OBJS = [
(23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, b'abc'), (77, b''), (80, b'abc ( def ) ghi'),
(98, b'def \x00 4ghi'), (118, b'bach\\slask'), (132, b'foo\nbaa'),
(143, b'this % is not a comment.'), (170, b'foo\nbaa'), (180, b'foobaa'),
(191, b''), (194, b' '), (199, b'@@ '), (211, b'\xab\xcd\x00\x124\x05'),
(230, LIT('a')), (232, LIT('b')), (234, [b'c']), (246, [1, b'z']),
(258, {'foo': b'bar'}),
]
def get_tokens(self, s):
from io import BytesIO
class MyParser(PSBaseParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while 1:
r.append(parser.nexttoken())
except PSEOF:
pass
return r
def get_objects(self, s):
from io import BytesIO
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(BytesIO(s))
r = []
try:
while 1:
r.append(parser.nextobject())
except PSEOF:
pass
return r
def test_1(self):
tokens = self.get_tokens(self.TESTDATA)
print(tokens)
self.assertEqual(tokens, self.TOKENS)
return
def test_2(self):
objs = self.get_objects(self.TESTDATA)
print(objs)
self.assertEqual(objs, self.OBJS)
return
if __name__ == '__main__':
unittest.main()
| TestPSBaseParser |
python | django__django | tests/urlpatterns/test_resolvers.py | {
"start": 863,
"end": 1322
} | class ____(SimpleTestCase):
@override_settings(ROOT_URLCONF="urlpatterns.path_urls")
def test_resolver_cache_default__root_urlconf(self):
# resolver for a default URLconf (passing no argument) and for the
# settings.ROOT_URLCONF is the same cached object.
self.assertIs(get_resolver(), get_resolver("urlpatterns.path_urls"))
self.assertIsNot(get_resolver(), get_resolver("urlpatterns.path_dynamic_urls"))
| ResolverCacheTests |
python | google__pytype | pytype/tests/test_basic1.py | {
"start": 10826,
"end": 11837
} | class ____(test_base.BaseTest):
"""Loop tests."""
def test_for(self):
self.Check("""
for i in range(10):
print(i)
print("done")
""")
def test_break(self):
self.Check("""
for i in range(10):
print(i)
if i == 7:
break
print("done")
""")
def test_continue(self):
# fun fact: this doesn't use CONTINUE_LOOP
self.Check("""
for i in range(10):
if i % 3 == 0:
continue
print(i)
print("done")
""")
def test_continue_in_try_except(self):
self.Check("""
for i in range(10):
try:
if i % 3 == 0:
continue
print(i)
except ValueError:
pass
print("done")
""")
def test_continue_in_try_finally(self):
self.Check("""
for i in range(10):
try:
if i % 3 == 0:
continue
print(i)
finally:
print(".")
print("done")
""")
| TestLoops |
python | ansible__ansible | lib/ansible/modules/user.py | {
"start": 116784,
"end": 123945
} | class ____(BusyBox):
platform = 'Linux'
distribution = 'Buildroot'
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='int'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list', elements='str'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='path'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
password_expire_max=dict(type='int', no_log=False),
password_expire_min=dict(type='int', no_log=False),
password_expire_warn=dict(type='int', no_log=False),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False),
expires=dict(type='float'),
password_lock=dict(type='bool', no_log=False),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
umask=dict(type='str'),
password_expire_account_disable=dict(type='int', no_log=False),
uid_min=dict(type='int'),
uid_max=dict(type='int'),
),
supports_check_mode=True,
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
# Check to see if the provided home path contains parent directories
# that do not exist.
path_needs_parents = False
if user.home and user.create_home:
parent = os.path.dirname(user.home)
if not os.path.isdir(parent):
path_needs_parents = True
(rc, out, err) = user.create_user()
# If the home path had parent directories that needed to be created,
# make sure file permissions are correct in the created home directory.
if path_needs_parents:
info = user.user_info()
if info is not False:
user.chown_homedir(info[2], info[3], user.home)
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists() and user.state == 'present':
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
(rc, out, err) = user.set_password_expire()
if rc is None:
pass # target state reached, nothing to do
else:
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
else:
result['changed'] = True
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
| Buildroot |
python | pypa__pip | src/pip/_internal/req/__init__.py | {
"start": 578,
"end": 3041
} | class ____:
name: str
def _validate_requirements(
requirements: list[InstallRequirement],
) -> Generator[tuple[str, InstallRequirement], None, None]:
for req in requirements:
assert req.name, f"invalid to-be-installed requirement: {req}"
yield req.name, req
def install_given_reqs(
requirements: list[InstallRequirement],
root: str | None,
home: str | None,
prefix: str | None,
warn_script_location: bool,
use_user_site: bool,
pycompile: bool,
progress_bar: BarType,
) -> list[InstallationResult]:
"""
Install everything in the given list.
(to be called after having downloaded and unpacked the packages)
"""
to_install = collections.OrderedDict(_validate_requirements(requirements))
if to_install:
logger.info(
"Installing collected packages: %s",
", ".join(to_install.keys()),
)
installed = []
show_progress = logger.isEnabledFor(logging.INFO) and len(to_install) > 1
items = iter(to_install.values())
if show_progress:
renderer = get_install_progress_renderer(
bar_type=progress_bar, total=len(to_install)
)
items = renderer(items)
with indent_log():
for requirement in items:
req_name = requirement.name
assert req_name is not None
if requirement.should_reinstall:
logger.info("Attempting uninstall: %s", req_name)
with indent_log():
uninstalled_pathset = requirement.uninstall(auto_confirm=True)
else:
uninstalled_pathset = None
try:
requirement.install(
root=root,
home=home,
prefix=prefix,
warn_script_location=warn_script_location,
use_user_site=use_user_site,
pycompile=pycompile,
)
except Exception:
# if install did not succeed, rollback previous uninstall
if uninstalled_pathset and not requirement.install_succeeded:
uninstalled_pathset.rollback()
raise
else:
if uninstalled_pathset and requirement.install_succeeded:
uninstalled_pathset.commit()
installed.append(InstallationResult(req_name))
return installed
| InstallationResult |
python | pytest-dev__pytest | src/_pytest/fixtures.py | {
"start": 26931,
"end": 30117
} | class ____(FixtureRequest):
"""The type of the ``request`` fixture in a fixture function requested
(transitively) by a test function."""
def __init__(
self,
request: FixtureRequest,
scope: Scope,
param: Any,
param_index: int,
fixturedef: FixtureDef[object],
*,
_ispytest: bool = False,
) -> None:
super().__init__(
pyfuncitem=request._pyfuncitem,
fixturename=fixturedef.argname,
fixture_defs=request._fixture_defs,
arg2fixturedefs=request._arg2fixturedefs,
_ispytest=_ispytest,
)
self._parent_request: Final[FixtureRequest] = request
self._scope_field: Final = scope
self._fixturedef: Final[FixtureDef[object]] = fixturedef
if param is not NOTSET:
self.param = param
self.param_index: Final = param_index
def __repr__(self) -> str:
return f"<SubRequest {self.fixturename!r} for {self._pyfuncitem!r}>"
@property
def _scope(self) -> Scope:
return self._scope_field
@property
def node(self):
scope = self._scope
if scope is Scope.Function:
# This might also be a non-function Item despite its attribute name.
node: nodes.Node | None = self._pyfuncitem
elif scope is Scope.Package:
node = get_scope_package(self._pyfuncitem, self._fixturedef)
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope is Scope.Class:
# Fallback to function item itself.
node = self._pyfuncitem
assert node, (
f'Could not obtain a node for scope "{scope}" for function {self._pyfuncitem!r}'
)
return node
def _check_scope(
self,
requested_fixturedef: FixtureDef[object],
requested_scope: Scope,
) -> None:
if self._scope > requested_scope:
# Try to report something helpful.
argname = requested_fixturedef.argname
fixture_stack = "\n".join(
self._format_fixturedef_line(fixturedef)
for fixturedef in self._get_fixturestack()
)
requested_fixture = self._format_fixturedef_line(requested_fixturedef)
fail(
f"ScopeMismatch: You tried to access the {requested_scope.value} scoped "
f"fixture {argname} with a {self._scope.value} scoped request object. "
f"Requesting fixture stack:\n{fixture_stack}\n"
f"Requested fixture:\n{requested_fixture}",
pytrace=False,
)
def _format_fixturedef_line(self, fixturedef: FixtureDef[object]) -> str:
factory = fixturedef.func
path, lineno = getfslineno(factory)
if isinstance(path, Path):
path = bestrelpath(self._pyfuncitem.session.path, path)
sig = signature(factory)
return f"{path}:{lineno + 1}: def {factory.__name__}{sig}"
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
self._fixturedef.addfinalizer(finalizer)
@final
| SubRequest |
python | dask__dask | dask/dataframe/dask_expr/_repartition.py | {
"start": 13623,
"end": 14601
} | class ____(Repartition):
_parameters = ["frame", "freq"]
def _divisions(self):
freq = _map_freq_to_period_start(self.freq)
try:
start = self.frame.divisions[0].ceil(freq)
except ValueError:
start = self.frame.divisions[0]
divisions = methods.tolist(
pd.date_range(start=start, end=self.frame.divisions[-1], freq=freq)
)
if not len(divisions):
divisions = [self.frame.divisions[0], self.frame.divisions[-1]]
else:
divisions.append(self.frame.divisions[-1])
if divisions[0] != self.frame.divisions[0]:
divisions = [self.frame.divisions[0]] + divisions
return divisions
def _lower(self):
if not isinstance(self.frame.divisions[0], pd.Timestamp):
raise TypeError("Can only repartition on frequency for timeseries")
return RepartitionDivisions(self.frame, self._divisions())
| RepartitionFreq |
python | kamyu104__LeetCode-Solutions | Python/divide-an-array-into-subarrays-with-minimum-cost-i.py | {
"start": 1423,
"end": 1881
} | class ____(object):
def minimumCost(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def topk(a, k):
result = [float("inf")]*k
for x in a:
for i in xrange(len(result)):
if x < result[i]:
result[i], x = x, result[i]
return result
return nums[0]+sum(topk((nums[i] for i in xrange(1, len(nums))), 2))
| Solution2 |
python | skorch-dev__skorch | skorch/tests/test_hf.py | {
"start": 7016,
"end": 11678
} | class ____(_HuggingfaceTokenizersBaseTest):
"""Test with (mostly) uninitialized instances of tokenizer etc. being
passed
"""
from tokenizers import Tokenizer
from tokenizers.models import BPE, WordLevel, WordPiece, Unigram
from tokenizers import normalizers
from tokenizers import pre_tokenizers
from tokenizers.normalizers import Lowercase, NFD, StripAccents
from tokenizers.pre_tokenizers import CharDelimiterSplit, Digits, Whitespace
from tokenizers.processors import ByteLevel, TemplateProcessing
from tokenizers.trainers import BpeTrainer, UnigramTrainer
from tokenizers.trainers import WordPieceTrainer, WordLevelTrainer
# Test one of the main tokenizer types: BPE, WordLevel, WordPiece, Unigram.
# Individual settings like vocab size or choice of pre_tokenizer may not
# necessarily make sense.
settings = {
'setting0': {
'tokenizer': Tokenizer,
'model': BPE,
'model__unk_token': "[UNK]",
'trainer': BpeTrainer,
'trainer__vocab_size': 50,
'trainer__special_tokens': SPECIAL_TOKENS,
'trainer__show_progress': False,
'normalizer': None,
'pre_tokenizer': CharDelimiterSplit,
'pre_tokenizer__delimiter': ' ', # has to be whitespace
'post_processor': ByteLevel,
'max_length': 100,
},
'setting1': {
'tokenizer': Tokenizer,
'tokenizer__model': WordLevel, # model set via tokenizer__model
'model__unk_token': "[UNK]",
'trainer': 'auto', # infer trainer
'trainer__vocab_size': 100,
'trainer__special_tokens': SPECIAL_TOKENS,
'trainer__show_progress': False,
'normalizer': Lowercase,
'pre_tokenizer': Whitespace,
'post_processor': None,
'max_length': 100,
},
'setting2': {
'tokenizer': Tokenizer,
'model': WordPiece(unk_token="[UNK]"), # initialized model passed
'trainer__vocab_size': 150,
'trainer__special_tokens': SPECIAL_TOKENS,
'trainer__show_progress': False,
# sequences: no kwargs
'normalizer': normalizers.Sequence([NFD(), Lowercase(), StripAccents()]),
'pre_tokenizer': pre_tokenizers.Sequence(
[Whitespace(), Digits(individual_digits=True)]
),
'post_processor': TemplateProcessing(
single="[CLS] $A [SEP]",
pair="[CLS] $A [SEP] $B:1 [SEP]:1",
special_tokens=[("[CLS]", 1), ("[SEP]", 2)],
),
'max_length': 200,
},
'setting4': {
'tokenizer': Tokenizer(model=Unigram()),
},
}
@pytest.fixture(params=settings.keys())
def tokenizer(self, request, data):
# return one tokenizer per setting
from skorch.hf import HuggingfaceTokenizer
return HuggingfaceTokenizer(**self.settings[request.param]).fit(data)
def test_fixed_vocabulary(self, tokenizer):
assert tokenizer.fixed_vocabulary_ is False
def test_get_params(self):
from skorch.hf import HuggingfaceTokenizer
tokenizer = HuggingfaceTokenizer(**self.settings['setting0'])
params = tokenizer.get_params(deep=True)
assert 'model__dropout' not in params
tokenizer.set_params(model__dropout=0.123)
params = tokenizer.get_params(deep=True)
assert 'model__dropout' in params
def test_set_params(self, data):
from skorch.hf import HuggingfaceTokenizer
tokenizer = HuggingfaceTokenizer(**self.settings['setting0'])
tokenizer.set_params(
model__dropout=0.123,
trainer__vocab_size=123,
max_length=456,
# With v0.13 of tokenizers, it seems like delimiter always needs to
# be " ", otherwise this error is raised: Error while attempting to
# unpickle Tokenizer: data did not match any variant of untagged
# enum ModelWrapper at line 1 column 2586. So we cannot change its
# value in this test but we should still ensure that set_params
# doesn't fail, so we keep it.
pre_tokenizer__delimiter=' ',
)
tokenizer.fit(data)
assert tokenizer.tokenizer_.model.dropout == pytest.approx(0.123)
assert len(tokenizer.vocabulary_) == pytest.approx(123, abs=5)
assert tokenizer.tokenizer_.pre_tokenizer.delimiter == ' '
assert tokenizer.max_length == 456
| TestHuggingfaceTokenizerUninitialized |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/util.py | {
"start": 18317,
"end": 18714
} | class ____(str):
def __str__(self) -> str:
lself = len(self)
if lself > 500:
lleft = 250
lright = 100
trunc = lself - lleft - lright
return (
f"{self[0:lleft]} ... {trunc} "
f"characters truncated ... {self[-lright:]}"
)
else:
return str.__str__(self)
| _long_statement |
python | redis__redis-py | redis/event.py | {
"start": 1211,
"end": 1494
} | class ____(Exception):
"""
Exception wrapper that adds an event object into exception context.
"""
def __init__(self, exception: Exception, event: object):
self.exception = exception
self.event = event
super().__init__(exception)
| EventException |
python | dateutil__dateutil | src/dateutil/rrule.py | {
"start": 54400,
"end": 66557
} | class ____(object):
""" Parses a string representation of a recurrence rule or set of
recurrence rules.
:param s:
Required, a string defining one or more recurrence rules.
:param dtstart:
If given, used as the default recurrence start if not specified in the
rule string.
:param cache:
If set ``True`` caching of results will be enabled, improving
performance of multiple queries considerably.
:param unfold:
If set ``True`` indicates that a rule string is split over more
than one line and should be joined before processing.
:param forceset:
If set ``True`` forces a :class:`dateutil.rrule.rruleset` to
be returned.
:param compatible:
If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime.datetime` object is returned.
:param tzids:
If given, a callable or mapping used to retrieve a
:class:`datetime.tzinfo` from a string representation.
Defaults to :func:`dateutil.tz.gettz`.
:param tzinfos:
Additional time zone names / aliases which may be present in a string
representation. See :func:`dateutil.parser.parse` for more
information.
:return:
Returns a :class:`dateutil.rrule.rruleset` or
:class:`dateutil.rrule.rrule`
"""
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
"FR": 4, "SA": 5, "SU": 6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError("invalid until date")
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
"""
Two ways to specify this: +1MO or MO(+1)
"""
l = []
for wday in value.split(','):
if '(' in wday:
# If it's of the form TH(+1), etc.
splt = wday.split('(')
w = splt[0]
n = int(splt[1][:-1])
elif len(wday):
# If it's of the form +1MO
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n:
n = int(n)
else:
raise ValueError("Invalid (empty) BYDAY specification.")
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError("unknown parameter name")
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError("unknown parameter '%s'" % name)
except (KeyError, ValueError):
raise ValueError("invalid '%s': %s" % (name, value))
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_date_value(self, date_value, parms, rule_tzids,
ignoretz, tzids, tzinfos):
global parser
if not parser:
from dateutil import parser
datevals = []
value_found = False
TZID = None
for parm in parms:
if parm.startswith("TZID="):
try:
tzkey = rule_tzids[parm.split('TZID=')[-1]]
except KeyError:
continue
if tzids is None:
from . import tz
tzlookup = tz.gettz
elif callable(tzids):
tzlookup = tzids
else:
tzlookup = getattr(tzids, 'get', None)
if tzlookup is None:
msg = ('tzids must be a callable, mapping, or None, '
'not %s' % tzids)
raise ValueError(msg)
TZID = tzlookup(tzkey)
continue
# RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found
# only once.
if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}:
raise ValueError("unsupported parm: " + parm)
else:
if value_found:
msg = ("Duplicate value parameter found in: " + parm)
raise ValueError(msg)
value_found = True
for datestr in date_value.split(','):
date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)
if TZID is not None:
if date.tzinfo is None:
date = date.replace(tzinfo=TZID)
else:
raise ValueError('DTSTART/EXDATE specifies multiple timezone')
datevals.append(date)
return datevals
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzids=None,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
TZID_NAMES = dict(map(
lambda x: (x.upper(), x),
re.findall('TZID=(?P<name>[^:]+):', s)
))
s = s.upper()
if not s.strip():
raise ValueError("empty string")
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError("unsupported RRULE parm: "+parm)
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError("unsupported RDATE parm: "+parm)
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError("unsupported EXRULE parm: "+parm)
exrulevals.append(value)
elif name == "EXDATE":
exdatevals.extend(
self._parse_date_value(value, parms,
TZID_NAMES, ignoretz,
tzids, tzinfos)
)
elif name == "DTSTART":
dtvals = self._parse_date_value(value, parms, TZID_NAMES,
ignoretz, tzids, tzinfos)
if len(dtvals) != 1:
raise ValueError("Multiple DTSTART values specified:" +
value)
dtstart = dtvals[0]
else:
raise ValueError("unsupported property: "+name)
if (forceset or len(rrulevals) > 1 or rdatevals
or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
rset = rruleset(cache=cache)
for value in rrulevals:
rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
rset.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
rset.exdate(value)
if compatible and dtstart:
rset.rdate(dtstart)
return rset
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| _rrulestr |
python | huggingface__transformers | tests/models/fuyu/test_image_processing_fuyu.py | {
"start": 3637,
"end": 21243
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = FuyuImageProcessor
fast_image_processing_class = FuyuImageProcessorFast
# Skip tests that expect pixel_values output
test_cast_dtype = None
def setUp(self):
self.image_processor_tester = FuyuImageProcessingTester(self)
self.image_processor_dict = self.image_processor_tester.prepare_image_processor_dict()
# Initialize image_processor_list (from ImageProcessingTestMixin)
image_processor_list = []
if self.test_slow_image_processor and self.image_processing_class:
image_processor_list.append(self.image_processing_class)
if self.test_fast_image_processor and self.fast_image_processing_class:
image_processor_list.append(self.fast_image_processing_class)
self.image_processor_list = image_processor_list
def test_call_pil(self):
"""Override to handle Fuyu's custom output structure"""
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processing(image_inputs[0], return_tensors="pt")
self.assertIn("images", encoded_images)
self.assertEqual(len(encoded_images.images), 1)
encoded_images = image_processing(image_inputs, return_tensors="pt")
self.assertIn("images", encoded_images)
self.assertEqual(len(encoded_images.images), self.image_processor_tester.batch_size)
def test_call_numpy(self):
"""Override to handle Fuyu's custom output structure"""
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = image_processing(image_inputs[0], return_tensors="pt")
self.assertIn("images", encoded_images)
self.assertEqual(len(encoded_images.images), 1)
encoded_images = image_processing(image_inputs, return_tensors="pt")
self.assertIn("images", encoded_images)
self.assertEqual(len(encoded_images.images), self.image_processor_tester.batch_size)
def test_call_pytorch(self):
"""Override to handle Fuyu's custom output structure"""
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors="pt")
self.assertIn("images", encoded_images)
self.assertEqual(len(encoded_images.images), 1)
encoded_images = image_processing(image_inputs, return_tensors="pt")
self.assertIn("images", encoded_images)
self.assertEqual(len(encoded_images.images), self.image_processor_tester.batch_size)
def test_call_numpy_4_channels(self):
"""Skip this test as Fuyu doesn't support arbitrary channels"""
self.skipTest("Fuyu processor is designed for 3-channel RGB images")
def test_slow_fast_equivalence(self):
"""Override to handle Fuyu's custom output structure"""
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
io.BytesIO(
httpx.get("http://images.cocodataset.org/val2017/000000039769.jpg", follow_redirects=True).content
)
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.images[0][0], encoding_fast.images[0][0])
def test_slow_fast_equivalence_batched(self):
"""Override to handle Fuyu's custom output structure"""
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
# Compare each image tensor
for slow_img, fast_img in zip(encoding_slow.images, encoding_fast.images):
self._assert_slow_fast_tensors_equivalence(slow_img[0], fast_img[0])
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_image_processor(self):
if self.fast_image_processing_class is None:
self.skipTest("Skipping compilation test as fast image processor is not defined")
if version.parse(torch.__version__) < version.parse("2.3"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
torch.compiler.reset()
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
output_eager.images[0][0], output_compiled.images[0][0], atol=1e-4, rtol=1e-4, mean_atol=1e-5
)
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processor, "do_resize"))
self.assertTrue(hasattr(image_processor, "size"))
self.assertTrue(hasattr(image_processor, "do_pad"))
self.assertTrue(hasattr(image_processor, "do_normalize"))
self.assertTrue(hasattr(image_processor, "image_mean"))
self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_rescale"))
self.assertTrue(hasattr(image_processor, "rescale_factor"))
self.assertTrue(hasattr(image_processor, "patch_size"))
def test_patches(self):
"""Test that patchify_image produces the expected number of patches."""
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
batch_size = 3
channels = 3
height = 300
width = 300
image_input = torch.rand(batch_size, channels, height, width)
expected_num_patches = image_processor.get_num_patches(image_height=height, image_width=width)
patches_final = image_processor.patchify_image(image=image_input)
self.assertEqual(patches_final.shape[1], expected_num_patches)
def test_patches_match_slow_fast(self):
"""Test that fast processor produces same patches as slow processor."""
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast patch equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(
reason="Skipping slow/fast patch equivalence test as one of the image processors is not defined"
)
batch_size = 3
channels = 3
height = 300
width = 300
image_input = torch.rand(batch_size, channels, height, width)
processor_slow = self.image_processing_class(**self.image_processor_dict)
processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
patches_fast = processor_fast.patchify_image(image=image_input)
patches_slow = processor_slow.patchify_image(image=image_input)
self.assertEqual(patches_fast.shape, patches_slow.shape)
torch.testing.assert_close(patches_fast, patches_slow, rtol=1e-4, atol=1e-4)
def test_scale_to_target_aspect_ratio(self):
"""Test that resize maintains aspect ratio correctly."""
sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
if self.test_slow_image_processor and self.image_processing_class:
image_processor = self.image_processing_class(**self.image_processor_dict)
scaled_image = image_processor.resize(sample_image, size=self.image_processor_dict["size"])
self.assertEqual(scaled_image.shape[0], 180)
self.assertEqual(scaled_image.shape[1], 84)
if self.test_fast_image_processor and self.fast_image_processing_class:
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
sample_tensor = torch.from_numpy(sample_image).permute(2, 0, 1).float()
size_dict = SizeDict(
height=self.image_processor_dict["size"]["height"], width=self.image_processor_dict["size"]["width"]
)
scaled_image = image_processor_fast.resize(sample_tensor, size=size_dict)
self.assertEqual(scaled_image.shape[1], 180)
self.assertEqual(scaled_image.shape[2], 84)
def test_apply_transformation_numpy(self):
"""Test preprocessing with numpy input."""
sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
transformed_image = image_processor.preprocess(sample_image).images[0][0]
self.assertEqual(transformed_image.shape[1], 180)
self.assertEqual(transformed_image.shape[2], 360)
def test_apply_transformation_pil(self):
"""Test preprocessing with PIL input."""
sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
sample_image_pil = Image.fromarray(sample_image)
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
transformed_image = image_processor.preprocess(sample_image_pil).images[0][0]
self.assertEqual(transformed_image.shape[1], 180)
self.assertEqual(transformed_image.shape[2], 360)
def test_preprocess_output_structure(self):
"""Test that preprocess returns correct output structure."""
sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
result = image_processor.preprocess(sample_image)
self.assertIn("images", result)
self.assertIn("image_unpadded_heights", result)
self.assertIn("image_unpadded_widths", result)
self.assertIn("image_scale_factors", result)
self.assertEqual(len(result.images), 1)
self.assertEqual(len(result.images[0]), 1)
self.assertEqual(len(result.image_unpadded_heights), 1)
self.assertEqual(len(result.image_unpadded_widths), 1)
self.assertEqual(len(result.image_scale_factors), 1)
def test_batch_processing(self):
"""Test processing multiple images."""
sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
sample_image_pil = Image.fromarray(sample_image)
images = [sample_image, sample_image_pil]
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
result = image_processor.preprocess(images)
self.assertEqual(len(result.images), 2)
for img in result.images:
self.assertEqual(len(img), 1)
if hasattr(img[0], "shape"):
if len(img[0].shape) == 3:
self.assertEqual(img[0].shape[1], 180)
self.assertEqual(img[0].shape[2], 360)
def test_pad_image_fast(self):
"""Test that padding works correctly for fast processor."""
if not self.test_fast_image_processor or self.fast_image_processing_class is None:
self.skipTest(reason="Fast processor not available")
from transformers.image_utils import SizeDict
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
small_image = torch.rand(3, 100, 100)
size_dict = SizeDict(height=180, width=360)
padded = image_processor_fast.pad([small_image], pad_size=size_dict, fill_value=1.0)[0]
self.assertEqual(padded.shape[1], 180)
self.assertEqual(padded.shape[2], 360)
self.assertTrue(torch.allclose(padded[:, 100:, :], torch.ones_like(padded[:, 100:, :])))
self.assertTrue(torch.allclose(padded[:, :, 100:], torch.ones_like(padded[:, :, 100:])))
def test_preprocess_with_tokenizer_info(self):
"""Test preprocess_with_tokenizer_info functionality."""
batch_size = 2
subseq_size = 1
channels = 3
image_input = torch.rand(batch_size, subseq_size, channels, 180, 360)
image_present = torch.ones(batch_size, subseq_size, dtype=torch.bool)
image_unpadded_h = torch.tensor([[180], [180]])
image_unpadded_w = torch.tensor([[360], [360]])
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
result = image_processor.preprocess_with_tokenizer_info(
image_input=image_input,
image_present=image_present,
image_unpadded_h=image_unpadded_h,
image_unpadded_w=image_unpadded_w,
image_placeholder_id=100,
image_newline_id=101,
variable_sized=True,
)
# Check output structure
self.assertIn("images", result)
self.assertIn("image_input_ids", result)
self.assertIn("image_patches", result)
self.assertIn("image_patch_indices_per_batch", result)
self.assertIn("image_patch_indices_per_subsequence", result)
# Check batch structure
self.assertEqual(len(result.images), batch_size)
self.assertEqual(len(result.image_input_ids), batch_size)
self.assertEqual(len(result.image_patches), batch_size)
def test_device_handling_fast(self):
"""Test that fast processor can handle device placement."""
if not self.test_fast_image_processor or self.fast_image_processing_class is None:
self.skipTest(reason="Fast processor not available")
sample_image = np.zeros((450, 210, 3), dtype=np.uint8)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
if torch.cuda.is_available():
result_cuda = image_processor_fast.preprocess(sample_image, device="cuda")
self.assertEqual(result_cuda.images[0][0].device.type, "cuda")
result_cpu = image_processor_fast.preprocess(sample_image, device="cpu")
self.assertEqual(result_cpu.images[0][0].device.type, "cpu")
def test_do_not_resize_if_smaller(self):
"""Test that images smaller than target size are not resized."""
if not self.test_fast_image_processor or self.fast_image_processing_class is None:
self.skipTest(reason="Fast processor not available")
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
small_image = torch.rand(3, 100, 150)
size_dict = SizeDict(height=180, width=360)
resized = image_processor_fast.resize(small_image, size=size_dict)
self.assertEqual(resized.shape[1], 100)
self.assertEqual(resized.shape[2], 150)
| FuyuImageProcessorTest |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_mic_match_country_code.py | {
"start": 2305,
"end": 5245
} | class ____(ColumnMapExpectation):
"""Expect the provided MIC (Market Identifier Code) according to country which code (ISO3166) passed in the parameters."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_hu": [
"CIBH",
"EBHU",
"HUPX",
"XBUD",
"KHHU",
],
"some_other": [
"BACE",
"EBHU",
"HUPX",
"XBUD",
"KHHU",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "all_hu",
"country_code": "hu",
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "some_other",
"country_code": "hu",
"mostly": 0.9,
},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_mic_match_country_code"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"mostly",
"country_code",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["pandas"],
}
success_keys = (
"country_code",
"mostly",
)
if __name__ == "__main__":
ExpectColumnValuesToBeValidMicMatchCountryCode().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidMicMatchCountryCode |
python | google__flatbuffers | tests/MyGame/Example2/Monster.py | {
"start": 177,
"end": 1121
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Monster()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsMonster(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def MonsterBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# Monster
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def MonsterStart(builder):
builder.StartObject(0)
def Start(builder):
MonsterStart(builder)
def MonsterEnd(builder):
return builder.EndObject()
def End(builder):
return MonsterEnd(builder)
| Monster |
python | doocs__leetcode | solution/1900-1999/1927.Sum Game/Solution.py | {
"start": 0,
"end": 353
} | class ____:
def sumGame(self, num: str) -> bool:
n = len(num)
cnt1 = num[: n // 2].count("?")
cnt2 = num[n // 2 :].count("?")
s1 = sum(int(x) for x in num[: n // 2] if x != "?")
s2 = sum(int(x) for x in num[n // 2 :] if x != "?")
return (cnt1 + cnt2) % 2 == 1 or s1 - s2 != 9 * (cnt2 - cnt1) // 2
| Solution |
python | getsentry__sentry | tests/sentry/integrations/github/test_webhooks.py | {
"start": 39435,
"end": 51013
} | class ____(APITestCase):
def setUp(self) -> None:
self.url = "/extensions/github/webhook/"
self.secret = "b3002c3e321d4b7880360d397db2ccfd"
options.set("github-app.webhook-secret", self.secret)
future_expires = datetime.now().replace(microsecond=0) + timedelta(minutes=5)
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_integration(
organization=self.organization,
external_id="12345",
provider="github",
metadata={"access_token": "1234", "expires_at": future_expires.isoformat()},
)
integration.add_organization(self.project.organization.id, self.user)
self.integration = integration
@patch("sentry.integrations.github.webhook.sync_group_assignee_inbound_by_external_actor")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_assigned_issue(self, mock_record: MagicMock, mock_sync: MagicMock) -> None:
Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
response = self.client.post(
path=self.url,
data=ISSUES_ASSIGNED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=75deab06ede0068fe16b5f1f6ee1a9509738e006",
HTTP_X_HUB_SIGNATURE_256="sha256=1703af48011c6709662f776163fce1e86772eff189f94e1ebff5ad66a81b711e",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
rpc_integration = integration_service.get_integration(integration_id=self.integration.id)
mock_sync.assert_called_once_with(
integration=rpc_integration,
external_user_name="@octocat",
external_issue_key="baxterthehacker/public-repo#2",
assign=True,
)
assert_success_metric(mock_record)
@patch("sentry.integrations.github.webhook.sync_group_assignee_inbound_by_external_actor")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unassigned_issue(self, mock_record: MagicMock, mock_sync: MagicMock) -> None:
Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
response = self.client.post(
path=self.url,
data=ISSUES_UNASSIGNED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=8d2cf8bdfaae30fc619bfbfafee3681404a12d6b",
HTTP_X_HUB_SIGNATURE_256="sha256=19794c8575c58d0be5d447e08b50d7cc235e7f7e76b32a0c371988d4335fab21",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
rpc_integration = integration_service.get_integration(integration_id=self.integration.id)
# With the fix, we now use issue.assignees (current state) instead of assignee (delta)
# ISSUES_UNASSIGNED_EVENT_EXAMPLE has assignees=[], so we deassign
mock_sync.assert_called_once_with(
integration=rpc_integration,
external_user_name="",
external_issue_key="baxterthehacker/public-repo#2",
assign=False,
)
assert_success_metric(mock_record)
def test_missing_assignee_data(self) -> None:
Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
event_data = json.loads(ISSUES_ASSIGNED_EVENT_EXAMPLE)
del event_data["assignee"]
response = self.client.post(
path=self.url,
data=json.dumps(event_data),
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=fake",
HTTP_X_HUB_SIGNATURE_256="sha256=fake",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
# Should fail due to invalid signature
assert response.status_code == 401
@patch("sentry.integrations.github.webhook.metrics")
def test_creates_missing_repo_for_issues(self, mock_metrics: MagicMock) -> None:
response = self.client.post(
path=self.url,
data=ISSUES_ASSIGNED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=75deab06ede0068fe16b5f1f6ee1a9509738e006",
HTTP_X_HUB_SIGNATURE_256="sha256=1703af48011c6709662f776163fce1e86772eff189f94e1ebff5ad66a81b711e",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
repos = Repository.objects.all()
assert len(repos) == 1
assert repos[0].organization_id == self.project.organization.id
assert repos[0].external_id == "35129377"
assert repos[0].provider == "integrations:github"
assert repos[0].name == "baxterthehacker/public-repo"
mock_metrics.incr.assert_called_with("github.webhook.repository_created")
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_closed_issue(self, mock_record: MagicMock) -> None:
self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="baxterthehacker/public-repo#2",
)
with patch(
"sentry.integrations.github.integration.GitHubIntegration.sync_status_inbound"
) as mock_sync:
response = self.client.post(
path=self.url,
data=ISSUES_CLOSED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=069543293765b5bec93645252813c0254b213edd",
HTTP_X_HUB_SIGNATURE_256="sha256=9be56955f00d995f3a8b339f62c4d2f270ba25fd169db3d08150bdc82fa914b8",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
mock_sync.assert_called_once()
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_reopened_issue(self, mock_record: MagicMock) -> None:
self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="baxterthehacker/public-repo#2",
)
with patch(
"sentry.integrations.github.integration.GitHubIntegration.sync_status_inbound"
) as mock_sync:
response = self.client.post(
path=self.url,
data=ISSUES_REOPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=1c1dd45d6ddff6bbc004ea19decca29e6bd98a8b",
HTTP_X_HUB_SIGNATURE_256="sha256=888724cc9396caf181628f81bcda5c4a29e2e9575fdf951505371090ec142ad3",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
mock_sync.assert_called_once()
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_closed_issue_multiple_orgs(self, mock_record: MagicMock) -> None:
"""Test that closed issues sync to all organization integrations"""
# Create second organization
org2 = self.create_organization(owner=self.user)
self.create_project(organization=org2)
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.add_organization(org2.id, self.user)
# Create repos for both orgs
Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
Repository.objects.create(
organization_id=org2.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
# Create linked issues for both orgs
self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="baxterthehacker/public-repo#2",
)
with patch(
"sentry.integrations.github.integration.GitHubIntegration.sync_status_inbound"
) as mock_sync:
response = self.client.post(
path=self.url,
data=ISSUES_CLOSED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=069543293765b5bec93645252813c0254b213edd",
HTTP_X_HUB_SIGNATURE_256="sha256=9be56955f00d995f3a8b339f62c4d2f270ba25fd169db3d08150bdc82fa914b8",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
# Sync should be called for each org that has a linked issue
assert mock_sync.call_count >= 1
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_reopened_issue_multiple_orgs(self, mock_record: MagicMock) -> None:
"""Test that reopened issues sync to all organization integrations"""
# Create second organization
org2 = self.create_organization(owner=self.user)
self.create_project(organization=org2)
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.add_organization(org2.id, self.user)
# Create repos for both orgs
Repository.objects.create(
organization_id=self.project.organization.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
Repository.objects.create(
organization_id=org2.id,
external_id="35129377",
provider="integrations:github",
name="baxterthehacker/public-repo",
)
# Create linked issues for both orgs
self.create_integration_external_issue(
group=self.group,
integration=self.integration,
key="baxterthehacker/public-repo#2",
)
with patch(
"sentry.integrations.github.integration.GitHubIntegration.sync_status_inbound"
) as mock_sync:
response = self.client.post(
path=self.url,
data=ISSUES_REOPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="issues",
HTTP_X_HUB_SIGNATURE="sha1=1c1dd45d6ddff6bbc004ea19decca29e6bd98a8b",
HTTP_X_HUB_SIGNATURE_256="sha256=888724cc9396caf181628f81bcda5c4a29e2e9575fdf951505371090ec142ad3",
HTTP_X_GITHUB_DELIVERY=str(uuid4()),
)
assert response.status_code == 204
# Sync should be called for each org that has a linked issue
assert mock_sync.call_count >= 1
| IssuesEventWebhookTest |
python | doocs__leetcode | solution/1300-1399/1309.Decrypt String from Alphabet to Integer Mapping/Solution.py | {
"start": 0,
"end": 385
} | class ____:
def freqAlphabets(self, s: str) -> str:
ans = []
i, n = 0, len(s)
while i < n:
if i + 2 < n and s[i + 2] == "#":
ans.append(chr(int(s[i : i + 2]) + ord("a") - 1))
i += 3
else:
ans.append(chr(int(s[i]) + ord("a") - 1))
i += 1
return "".join(ans)
| Solution |
python | getsentry__sentry | src/sentry/consumers/__init__.py | {
"start": 26744,
"end": 27056
} | class ____(ProcessingStrategyFactory):
def __init__(self, inner: ProcessingStrategyFactory):
self.inner = inner
def create_with_partitions(self, commit, partitions):
rv = self.inner.create_with_partitions(commit, partitions)
return JoinProfiler(rv)
| JoinProfilerStrategyFactoryWrapper |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_arraypad.py | {
"start": 545,
"end": 13648
} | class ____(TestCase):
@xpassIfTorchDynamo_np # (reason="tuple values")
def test_check_constant(self):
a = np.arange(100)
a = np.pad(a, (25, 20), "constant", constant_values=(10, 20))
b = np.array(
[
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
20,
]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = np.pad(a, (25, 20), "constant")
b = np.array(
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = np.pad(arr, (1, 2), mode="constant", constant_values=1.1)
expected = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 2, 3, 4, 5, 1, 1],
[1, 6, 7, 8, 9, 10, 11, 1, 1],
[1, 12, 13, 14, 15, 16, 17, 1, 1],
[1, 18, 19, 20, 21, 22, 23, 1, 1],
[1, 24, 25, 26, 27, 28, 29, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = np.pad(arr_float, ((1, 2), (1, 2)), mode="constant", constant_values=1.1)
expected = np.array(
[
[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[1.1, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 1.1, 1.1],
[1.1, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 1.1, 1.1],
[1.1, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 1.1, 1.1],
[1.1, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 1.1, 1.1],
[1.1, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 1.1, 1.1],
[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
]
)
assert_allclose(test, expected)
@xpassIfTorchDynamo_np # (reason="tuple values")
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = np.pad(a, (25, 20), "constant", constant_values=(-1.1, -1.2))
b = np.array(
[
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
-1.1,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
82,
83,
84,
85,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
-1.2,
]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = np.pad(arr, ((1,), (2,)), mode="constant", constant_values=3)
expected = np.array(
[
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
]
)
assert_allclose(test, expected)
@xpassIfTorchDynamo_np # (reason="tuple values")
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.pad(
arr, ((1, 2), (1, 3)), mode="constant", constant_values=((1, 2), (3, 4))
)
expected = np.array(
[
[3, 1, 1, 4, 4, 4],
[3, 0, 1, 4, 4, 4],
[3, 2, 3, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
]
)
assert_allclose(test, expected)
@skipif(
True, reason="passes on MacOS, fails otherwise"
) # (reason="int64 overflow")
def test_check_large_integers(self):
int64_max = 2**63 - 1
arr = np.full(5, int64_max, dtype=np.int64)
test = np.pad(arr, 1, mode="constant", constant_values=arr.min())
expected = np.full(7, int64_max, dtype=np.int64)
assert_array_equal(test, expected)
def test_pad_empty_dimension(self):
arr = np.zeros((3, 0, 2))
result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
assert result.shape == (3, 4, 4)
if __name__ == "__main__":
run_tests()
| TestConstant |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 7457,
"end": 7668
} | class ____:
axis = None
def set_axis(self, axis):
self.axis = axis
def create_dummy_axis(self, **kwargs):
if self.axis is None:
self.axis = _DummyAxis(**kwargs)
| TickHelper |
python | spack__spack | lib/spack/spack/error.py | {
"start": 6602,
"end": 6697
} | class ____(SpecFilenameError):
"""Raised when a spec file doesn't exist."""
| NoSuchSpecFileError |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/quantization_ops/quantization_ops_test.py | {
"start": 8656,
"end": 9691
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def test_invalid_inputs(self):
inputs = constant_op.constant(
np.uint8(0), shape=[3, 3, 3, 3], dtype=dtypes.quint8)
ksize = [1, 1, 1, 1]
strides = [1, 1, 1, 1]
padding = "SAME"
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
"must be.* rank 0"):
self.evaluate(
nn_ops.quantized_avg_pool(
input=inputs,
min_input=[],
max_input=1.0,
ksize=ksize,
strides=strides,
padding=padding))
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
"must be.* rank 0"):
self.evaluate(
nn_ops.quantized_avg_pool(
input=inputs,
min_input=0.0,
max_input=[],
ksize=ksize,
strides=strides,
padding=padding))
| QuantizedAvgPoolingOpTest |
python | django__django | tests/model_inheritance/tests.py | {
"start": 646,
"end": 12606
} | class ____(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __str__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(str(w1), "Worker Fred")
self.assertEqual(str(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertSequenceEqual(
Worker.objects.values("name"),
[
{"name": "Barney"},
{"name": "Fred"},
],
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
with self.assertRaisesMessage(
AttributeError, "'CommonInfo' has no attribute 'objects'"
):
CommonInfo.objects.all()
def test_reverse_relation_for_different_hierarchy_tree(self):
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertSequenceEqual(Place.objects.filter(supplier__name="foo"), [])
msg = (
"Cannot resolve keyword 'supplier' into field. Choices are: "
"address, chef, chef_id, id, italianrestaurant, lot, name, "
"place_ptr, place_ptr_id, provider, rating, serves_hot_dogs, serves_pizza"
)
with self.assertRaisesMessage(FieldError, msg):
Restaurant.objects.filter(supplier__name="foo")
def test_model_with_distinct_accessors(self):
# The Post model has distinct accessors for the Comment and Link
# models.
post = Post.objects.create(title="Lorem Ipsum")
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The web framework for perfections with deadlines.",
url="http://www.djangoproject.com/",
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
msg = "'Post' object has no attribute 'attached_%(class)s_set'"
with self.assertRaisesMessage(AttributeError, msg):
getattr(post, "attached_%(class)s_set")
def test_model_with_distinct_related_query_name(self):
self.assertSequenceEqual(
Post.objects.filter(attached_model_inheritance_comments__is_spam=True), []
)
# The Post model doesn't have a related query accessor based on
# related_name (attached_comment_set).
msg = "Cannot resolve keyword 'attached_comment_set' into field."
with self.assertRaisesMessage(FieldError, msg):
Post.objects.filter(attached_comment_set__is_spam=True)
def test_meta_fields_and_ordering(self):
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
[
"id",
"name",
"address",
"place_ptr",
"rating",
"serves_hot_dogs",
"serves_pizza",
"chef",
],
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
[
"id",
"name",
"address",
"place_ptr",
"rating",
"serves_hot_dogs",
"serves_pizza",
"chef",
"restaurant_ptr",
"serves_gnocchi",
],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
def test_custompk_m2m(self):
b = Base.objects.create()
b.titles.add(Title.objects.create(title="foof"))
s = SubBase.objects.create(sub_id=b.id)
b = Base.objects.get(pk=s.id)
self.assertNotEqual(b.pk, s.pk)
# Low-level test for related_val
self.assertEqual(s.titles.related_val, (s.id,))
# Higher level test for correct query values (title foof not
# accidentally found).
self.assertSequenceEqual(s.titles.all(), [])
def test_create_diamond_mti_default_pk(self):
# 1 INSERT for each base.
with self.assertNumQueries(4):
common_child = CommonChild.objects.create()
# 3 SELECTs for the parents, 1 UPDATE for the child.
with self.assertNumQueries(4):
common_child.save()
def test_create_diamond_mti_common_parent(self):
with self.assertNumQueries(4):
italian_restaurant_child = ItalianRestaurantCommonParent.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
)
self.assertEqual(
italian_restaurant_child.italianrestaurant_ptr.place_ptr,
italian_restaurant_child.place_ptr_two,
)
self.assertEqual(
italian_restaurant_child.italianrestaurant_ptr.restaurant_ptr,
italian_restaurant_child.restaurant_ptr,
)
self.assertEqual(
italian_restaurant_child.restaurant_ptr.place_ptr,
italian_restaurant_child.place_ptr_two,
)
self.assertEqual(italian_restaurant_child.name, "Ristorante Miron")
self.assertEqual(italian_restaurant_child.address, "1234 W. Ash")
def test_update_parent_filtering(self):
"""
Updating a field of a model subclass doesn't issue an UPDATE
query constrained by an inner query (#10399).
"""
supplier = Supplier.objects.create(
name="Central market",
address="610 some street",
)
# Capture the expected query in a database agnostic way
with CaptureQueriesContext(connection) as captured_queries:
Place.objects.filter(pk=supplier.pk).update(name=supplier.name)
expected_sql = captured_queries[0]["sql"]
# Capture the queries executed when a subclassed model instance is
# saved.
with CaptureQueriesContext(connection) as captured_queries:
supplier.save(update_fields=("name",))
for query in captured_queries:
sql = query["sql"]
if "UPDATE" in sql:
self.assertEqual(expected_sql, sql)
def test_create_child_no_update(self):
"""Creating a child with non-abstract parents only issues INSERTs."""
def a():
GrandChild.objects.create(
email="grand_parent@example.com",
first_name="grand",
last_name="parent",
)
def b():
GrandChild().save()
for i, test in enumerate([a, b]):
with (
self.subTest(i=i),
self.assertNumQueries(4),
CaptureQueriesContext(connection) as queries,
):
test()
for query in queries:
sql = query["sql"]
self.assertIn("INSERT INTO", sql, sql)
def test_create_copy_with_inherited_m2m(self):
restaurant = Restaurant.objects.create()
supplier = CustomSupplier.objects.create(
name="Central market", address="944 W. Fullerton"
)
supplier.customers.set([restaurant])
old_customers = supplier.customers.all()
supplier.pk = None
supplier.id = None
supplier._state.adding = True
supplier.save()
supplier.customers.set(old_customers)
supplier = Supplier.objects.get(pk=supplier.pk)
self.assertCountEqual(supplier.customers.all(), old_customers)
self.assertSequenceEqual(supplier.customers.all(), [restaurant])
def test_eq(self):
# Equality doesn't transfer in multitable inheritance.
self.assertNotEqual(Place(id=1), Restaurant(id=1))
self.assertNotEqual(Restaurant(id=1), Place(id=1))
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
@isolate_apps("model_inheritance")
def test_abstract_parent_link(self):
class A(models.Model):
pass
class B(A):
a = models.OneToOneField("A", parent_link=True, on_delete=models.CASCADE)
class Meta:
abstract = True
class C(B):
pass
self.assertIs(C._meta.parents[A], C._meta.get_field("a"))
@isolate_apps("model_inheritance")
def test_init_subclass(self):
saved_kwargs = {}
class A(models.Model):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__()
saved_kwargs.update(kwargs)
kwargs = {"x": 1, "y": 2, "z": 3}
class B(A, **kwargs):
pass
self.assertEqual(saved_kwargs, kwargs)
@isolate_apps("model_inheritance")
def test_set_name(self):
class ClassAttr:
called = None
def __set_name__(self_, owner, name):
self.assertIsNone(self_.called)
self_.called = (owner, name)
class A(models.Model):
attr = ClassAttr()
self.assertEqual(A.attr.called, (A, "attr"))
def test_inherited_ordering_pk_desc(self):
p1 = Parent.objects.create(first_name="Joe", email="joe@email.com")
p2 = Parent.objects.create(first_name="Jon", email="jon@email.com")
expected_order_by_sql = "ORDER BY %s.%s DESC" % (
connection.ops.quote_name(Parent._meta.db_table),
connection.ops.quote_name(Parent._meta.get_field("grandparent_ptr").column),
)
qs = Parent.objects.all()
self.assertSequenceEqual(qs, [p2, p1])
self.assertIn(expected_order_by_sql, str(qs.query))
def test_queryset_class_getitem(self):
self.assertIs(models.QuerySet[Post], models.QuerySet)
self.assertIs(models.QuerySet[Post, Post], models.QuerySet)
self.assertIs(models.QuerySet[Post, int, str], models.QuerySet)
def test_shadow_parent_attribute_with_field(self):
class ScalarParent(models.Model):
foo = 1
class ScalarOverride(ScalarParent):
foo = models.IntegerField()
self.assertEqual(type(ScalarOverride.foo), DeferredAttribute)
def test_shadow_parent_property_with_field(self):
class PropertyParent(models.Model):
@property
def foo(self):
pass
class PropertyOverride(PropertyParent):
foo = models.IntegerField()
self.assertEqual(type(PropertyOverride.foo), DeferredAttribute)
def test_shadow_parent_method_with_field(self):
class MethodParent(models.Model):
def foo(self):
pass
class MethodOverride(MethodParent):
foo = models.IntegerField()
self.assertEqual(type(MethodOverride.foo), DeferredAttribute)
def test_full_clean(self):
restaurant = Restaurant.objects.create()
with self.assertNumQueries(0), self.assertRaises(ValidationError):
restaurant.full_clean()
| ModelInheritanceTests |
python | pytorch__pytorch | torch/_higher_order_ops/schema.py | {
"start": 2186,
"end": 2891
} | class ____:
@staticmethod
def from_hop_argument_info(
arg_idx: int, arg_info: HopArgumentInfo, is_output: bool = False
) -> Any:
typ = CTypeGen.from_example(arg_info.example_value)
if is_output:
return torch._C.Argument("", typ, None, None, False, None)
alias_set = set({f"alias::a{arg_idx}"}) if arg_info.is_mutated else set()
alias_info = torch._C._AliasInfo(arg_info.is_mutated, alias_set, alias_set) # type: ignore[attr-defined]
return torch._C.Argument(
arg_info.name,
typ,
None,
arg_info.default_value,
arg_info.kw_only,
alias_info,
)
| CArgumentGen |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/batch.py | {
"start": 7032,
"end": 9371
} | class ____(AwsBaseSensor[BatchClientHook]):
"""
Poll the state of the Batch job queue until it reaches a terminal state; fails if the queue fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:BatchJobQueueSensor`
:param job_queue: Batch job queue name
:param treat_non_existing_as_deleted: If True, a non-existing Batch job queue is considered as a deleted
queue and as such a valid case.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
"""
aws_hook_class = BatchClientHook
template_fields: Sequence[str] = aws_template_fields(
"job_queue",
)
template_ext: Sequence[str] = ()
ui_color = "#66c3ff"
def __init__(
self,
job_queue: str,
treat_non_existing_as_deleted: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.job_queue = job_queue
self.treat_non_existing_as_deleted = treat_non_existing_as_deleted
def poke(self, context: Context) -> bool:
response = self.hook.client.describe_job_queues( # type: ignore[union-attr]
jobQueues=[self.job_queue]
)
if not response["jobQueues"]:
if self.treat_non_existing_as_deleted:
return True
raise AirflowException(f"AWS Batch job queue {self.job_queue} not found")
status = response["jobQueues"][0]["status"]
if status in BatchClientHook.JOB_QUEUE_TERMINAL_STATUS:
return True
if status in BatchClientHook.JOB_QUEUE_INTERMEDIATE_STATUS:
return False
message = f"AWS Batch job queue failed. AWS Batch job queue status: {status}"
raise AirflowException(message)
| BatchJobQueueSensor |
python | huggingface__transformers | src/transformers/models/ctrl/tokenization_ctrl.py | {
"start": 2496,
"end": 6870
} | class ____(PreTrainedTokenizer):
"""
Construct a CTRL tokenizer. Based on Byte-Pair-Encoding.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
"""
vocab_files_names = VOCAB_FILES_NAMES
control_codes = CONTROL_CODES
def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs):
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
merges = merges_handle.read().split("\n")[1:-1]
merges = [tuple(merge.split()) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
self.add_bpe_version_header = True
super().__init__(
unk_token=unk_token,
token_type_ids_pattern="all_zeros",
token_type_ids_include_special_tokens=True,
special_tokens_pattern="none",
**kwargs,
)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
word = tuple(list(word[:-1]) + [word[-1] + "</w>"])
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = "@@ ".join(word)
word = word[:-4]
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
split_tokens = []
words = re.findall(r"\S+\n?", text)
for token in words:
split_tokens.extend(list(self.bpe(token).split(" ")))
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace("@@ ", "").strip()
return out_string
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
__all__ = ["CTRLTokenizer"]
| CTRLTokenizer |
python | scikit-learn__scikit-learn | sklearn/linear_model/_ridge.py | {
"start": 83683,
"end": 91647
} | class ____(LinearModel):
_parameter_constraints: dict = {
"alphas": ["array-like", Interval(Real, 0, None, closed="neither")],
"fit_intercept": ["boolean"],
"scoring": [StrOptions(set(get_scorer_names())), callable, None],
"cv": ["cv_object"],
"gcv_mode": [StrOptions({"auto", "svd", "eigen"}), None],
"store_cv_results": ["boolean"],
"alpha_per_target": ["boolean"],
}
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
scoring=None,
cv=None,
gcv_mode=None,
store_cv_results=False,
alpha_per_target=False,
):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_results = store_cv_results
self.alpha_per_target = alpha_per_target
def fit(self, X, y, sample_weight=None, **params):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
**params : dict, default=None
Extra parameters for the underlying scorer.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None)
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
_raise_for_params(params, self, "fit")
cv = self.cv
scorer = self._get_scorer()
# `_RidgeGCV` does not work for alpha = 0
if cv is None:
check_scalar_alpha = partial(
check_scalar,
target_type=numbers.Real,
min_val=0.0,
include_boundaries="neither",
)
else:
check_scalar_alpha = partial(
check_scalar,
target_type=numbers.Real,
min_val=0.0,
include_boundaries="left",
)
if isinstance(self.alphas, (np.ndarray, list, tuple)):
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if n_alphas != 1:
for index, alpha in enumerate(self.alphas):
alpha = check_scalar_alpha(alpha, f"alphas[{index}]")
else:
self.alphas[0] = check_scalar_alpha(self.alphas[0], "alphas")
alphas = np.asarray(self.alphas)
if sample_weight is not None:
params["sample_weight"] = sample_weight
if cv is None:
if _routing_enabled():
routed_params = process_routing(
self,
"fit",
**params,
)
else:
routed_params = Bunch(scorer=Bunch(score={}))
if sample_weight is not None:
routed_params.scorer.score["sample_weight"] = sample_weight
# reset `scorer` variable to original user-intend if no scoring is passed
if self.scoring is None:
scorer = None
estimator = _RidgeGCV(
alphas,
fit_intercept=self.fit_intercept,
scoring=scorer,
gcv_mode=self.gcv_mode,
store_cv_results=self.store_cv_results,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target,
)
estimator.fit(
X,
y,
sample_weight=sample_weight,
score_params=routed_params.scorer.score,
)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_results:
self.cv_results_ = estimator.cv_results_
else:
if self.store_cv_results:
raise ValueError("cv!=None and store_cv_results=True are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True are incompatible")
parameters = {"alpha": alphas}
solver = "sparse_cg" if sparse.issparse(X) else "auto"
model = RidgeClassifier if is_classifier(self) else Ridge
estimator = model(
fit_intercept=self.fit_intercept,
solver=solver,
)
if _routing_enabled():
estimator.set_fit_request(sample_weight=True)
grid_search = GridSearchCV(
estimator,
parameters,
cv=cv,
scoring=scorer,
)
grid_search.fit(X, y, **params)
estimator = grid_search.best_estimator_
self.alpha_ = grid_search.best_estimator_.alpha
self.best_score_ = grid_search.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
if hasattr(estimator, "feature_names_in_"):
self.feature_names_in_ = estimator.feature_names_in_
return self
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self)
.add_self_request(self)
.add(
scorer=self._get_scorer(),
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
.add(
splitter=self.cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
)
return router
def _get_scorer(self):
"""Make sure the scorer is weighted if necessary.
This uses `self._get_scorer_instance()` implemented in child objects to get the
raw scorer instance of the estimator, which will be ignored if `self.scoring` is
not None.
"""
if _routing_enabled() and self.scoring is None:
# This estimator passes an array of 1s as sample_weight even if
# sample_weight is not provided by the user. Therefore we need to
# always request it. But we don't set it if it's passed explicitly
# by the user.
return self._get_scorer_instance().set_score_request(sample_weight=True)
return check_scoring(estimator=self, scoring=self.scoring, allow_none=True)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.array_api_support = True
tags.input_tags.sparse = True
return tags
| _BaseRidgeCV |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 12009,
"end": 12491
} | class ____(Action):
async def act(self, triggered_action: "TriggeredAction") -> None:
event = await self.create_event(triggered_action)
self._result_details["emitted_event"] = str(event.id)
async with PrefectServerEventsClient() as events:
await events.emit(event)
@abc.abstractmethod
async def create_event(self, triggered_action: "TriggeredAction") -> "Event":
"""Create an event from the TriggeredAction"""
| EmitEventAction |
python | modin-project__modin | modin/core/dataframe/base/interchange/dataframe_protocol/utils.py | {
"start": 2322,
"end": 2552
} | class ____(enum.IntEnum): # noqa PR01
"""Integer enum for device type codes matching DLPack."""
CPU = 1
CUDA = 2
CPU_PINNED = 3
OPENCL = 4
VULKAN = 7
METAL = 8
VPI = 9
ROCM = 10
| DlpackDeviceType |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 94042,
"end": 105582
} | class ____(DashboardComponent):
"""Stacked area chart showing task groups through time"""
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.source = ColumnDataSource()
# The length of timeseries to chart (in units of plugin.dt)
self.npts = 180
if GroupTiming.name not in scheduler.plugins:
scheduler.add_plugin(plugin=GroupTiming(scheduler))
self.plugin = scheduler.plugins[GroupTiming.name]
self.source.add(np.array(self.plugin.time) * 1000.0, "time")
x_range = DataRange1d(range_padding=0)
y_range = Range1d(0, max(self.plugin.nthreads))
self.root = figure(
title="Task Group Progress",
name="task_group_progress",
toolbar_location="above",
min_border_bottom=50,
x_range=x_range,
y_range=y_range,
tools="",
x_axis_type="datetime",
y_axis_location=None,
**kwargs,
)
self.root.yaxis.major_label_text_alpha = 0
self.root.yaxis.minor_tick_line_alpha = 0
self.root.yaxis.major_tick_line_alpha = 0
self.root.xgrid.visible = False
self.root.add_tools(
BoxZoomTool(),
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
self._hover = None
self._last_drawn = None
self._offset = time()
self._last_transition_count = scheduler.transition_counter
# OrderedDict so we can make a reverse iterator later and get the
# most-recently-added glyphs.
self._renderers = OrderedDict()
self._line_renderers = OrderedDict()
def _should_add_new_renderers(self) -> bool:
"""
Whether to add new renderers to the chart.
When a new set of task groups enters the scheduler we'd like to start rendering
them. But it can be expensive to add new glyps, so we do it deliberately,
checking whether we have to do it and whether the scheduler seems busy.
"""
# Always draw if we have not before
if not self._last_drawn:
return True
# Don't draw if there have been no new tasks completed since the last update,
# or if the scheduler CPU is occupied.
if (
self._last_transition_count == self.scheduler.transition_counter
or self.scheduler.proc.cpu_percent() > 50
):
return False
# Only return true if there are new task groups that we have not yet added
# to the ColumnDataSource.
return not set(self.plugin.compute.keys()) <= set(self.source.data.keys())
def _should_update(self) -> bool:
"""
Whether to update the ColumnDataSource. This is cheaper than redrawing,
but still not free, so we check whether we need it and whether the scheudler
is busy.
"""
return (
self._last_transition_count != self.scheduler.transition_counter
and self.scheduler.proc.cpu_percent() < 50
)
def _get_timeseries(self, restrict_to_existing=False):
"""
Update the ColumnDataSource with our time series data.
restrict_to_existing determines whether to add new task groups
which might have been added since the last time we rendered.
This is important as we want to add new stackers very deliberately.
"""
# Get the front/back indices for most recent npts bins out of the timeseries
front = max(len(self.plugin.time) - self.npts, 0)
back = None
# Remove any periods of zero compute at the front or back of the timeseries
if len(self.plugin.compute):
agg = sum(np.array(v[front:]) for v in self.plugin.compute.values())
front2 = len(agg) - len(np.trim_zeros(agg, trim="f"))
front += front2
back = len(np.trim_zeros(agg, trim="b")) - len(agg) or None
prepend = (
self.plugin.time[front - 1]
if front >= 1
else self.plugin.time[front] - self.plugin.dt
)
timestamps = np.array(self.plugin.time[front:back])
dt = np.diff(timestamps, prepend=prepend)
if restrict_to_existing:
new_data = {
k: np.array(v[front:back]) / dt
for k, v in self.plugin.compute.items()
if k in self.source.data
}
else:
new_data = valmap(
lambda x: np.array(x[front:back]) / dt,
self.plugin.compute,
)
new_data["time"] = (
timestamps - self._offset
) * 1000.0 # bokeh likes milliseconds
new_data["nthreads"] = np.array(self.plugin.nthreads[front:back])
return new_data
@without_property_validation
@log_errors
def update(self):
"""
Maybe update the chart. This is somewhat expensive to draw, so we update
it pretty defensively.
"""
if self._should_add_new_renderers():
# Update the chart, allowing for new task groups to be added.
new_data = self._get_timeseries(restrict_to_existing=False)
self.source.data = new_data
# Possibly update the y range if the number of threads has increased.
max_nthreads = max(self.plugin.nthreads)
if self.root.y_range.end != max_nthreads:
self.root.y_range.end = max_nthreads
stackers = list(self.plugin.compute.keys())
colors = [color_of(key_split(k)) for k in stackers]
for i, (group, color) in enumerate(zip(stackers, colors)):
# If we have already drawn the group, but it is all zero,
# set it to be invisible.
if group in self._renderers:
if not np.count_nonzero(new_data[group]) > 0:
self._renderers[group].visible = False
self._line_renderers[group].visible = False
else:
self._renderers[group].visible = True
self._line_renderers[group].visible = True
continue
# Draw the new area and line glyphs.
renderer = self.root.varea(
x="time",
y1=stack(*stackers[:i]),
y2=stack(*stackers[: i + 1]),
color=color,
alpha=0.5,
source=self.source,
)
self._renderers[group] = renderer
line_renderer = self.root.line(
x="time",
y=stack(*stackers[: i + 1]),
color=color,
alpha=1.0,
source=self.source,
)
self._line_renderers[group] = line_renderer
# Don't add hover until there is something to show, as bokehjs seems to
# have trouble with custom hovers when there are no renderers.
if self.plugin.compute and self._hover is None:
# Add a hover that will show occupancy for all currently active
# task groups. This is a little tricky, bokeh doesn't (yet) support
# hit tests for stacked area charts: https://github.com/bokeh/bokeh/issues/9182
# Instead, show a single vline hover which lists the currently active task
# groups. A custom formatter in JS-land pulls the relevant data index and
# assembles the tooltip.
formatter = CustomJSHover(code="return '';")
self._hover = HoverTool(
tooltips="""
<div>
<div style="font-size: 1.2em; font-weight: bold">
<b>Worker thread occupancy</b>
</div>
<div>
$index{custom}
</div>
</div>
""",
mode="vline",
line_policy="nearest",
attachment="horizontal",
formatters={"$index": formatter},
)
self.root.add_tools(self._hover)
if self._hover:
# Create a custom tooltip that:
# 1. Includes nthreads
# 2. Filters out inactive task groups
# (ones without any compute during the relevant dt)
# 3. Colors the labels appropriately.
formatter = CustomJSHover(
code="""
const colormap = %s;
const divs = [];
for (let k of Object.keys(source.data)) {
const val = source.data[k][value];
const color = colormap[k];
if (k === "time" || k === "nthreads" || val < 1.e-3) {
continue;
}
const label = k.length >= 20 ? k.slice(0, 20) + '…' : k;
// Unshift so that the ordering of the labels is the same as
// the ordering of the stackers.
divs.unshift(
'<div>'
+ '<span style="font-weight: bold; color:' + color + ';">'
+ label
+ '</span>'
+ ': '
+ val.toFixed(1)
+ '</div>'
)
}
divs.unshift(
'<div>'
+ '<span style="font-weight: bold; color: darkgrey;">nthreads: </span>'
+ source.data.nthreads[value]
+ '</div>'
);
return divs.join('\\n')
"""
% dict(
zip(stackers, colors)
), # sneak the color mapping into the callback
args={"source": self.source},
)
# Add the HoverTool to the top line renderer.
top_line = None
for line in reversed(self._line_renderers.values()):
if line.visible:
top_line = line
break
self._hover.renderers = [top_line]
self._hover.formatters = {"$index": formatter}
self._last_drawn = time()
self._last_transition_count = self.scheduler.transition_counter
elif self._should_update():
# Possibly update the y range if new threads have been added
max_nthreads = max(self.plugin.nthreads)
if self.root.y_range.end != max_nthreads:
self.root.y_range.end = max_nthreads
# Update the data, only including existing columns, rather than redrawing
# the whole chart.
self.source.data = self._get_timeseries(restrict_to_existing=True)
self._last_transition_count = self.scheduler.transition_counter
| TaskGroupProgress |
python | joblib__joblib | joblib/test/test_memory.py | {
"start": 37618,
"end": 37754
} | class ____(StoreBackendBase):
"""This backend cannot be instantiated and should raise a TypeError."""
pass
| IncompleteStoreBackend |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/javaw.py | {
"start": 6883,
"end": 7518
} | class ____(JTask):
color = 'GREEN'
run_str = '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}'
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not self.inputs:
try:
self.inputs = [
x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])
]
except Exception:
raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self))
return super(jar_create, self).runnable_status()
| jar_create |
python | davidhalter__jedi | jedi/inference/value/namespace.py | {
"start": 391,
"end": 741
} | class ____(ValueNameMixin, AbstractNameDefinition):
"""
Accessing names for implicit namespace packages should infer to nothing.
This object will prevent Jedi from raising exceptions
"""
def __init__(self, implicit_ns_value, string_name):
self._value = implicit_ns_value
self.string_name = string_name
| ImplicitNSName |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_optim_state.py | {
"start": 3152,
"end": 3921
} | class ____(torch.nn.Module):
"""
Used to define interesting nested structure for FSDP wrapping.
BlockB
weight
Bias
bias
Bias
bias
"""
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
assert all(v > 0 for v in (in_dim, out_dim))
torch.manual_seed(0)
self.weight = torch.nn.Parameter(torch.randn((in_dim, out_dim)))
self.bias_module0 = Bias(out_dim)
self.bias_module1 = Bias(out_dim)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = x @ self.weight
x = self.bias_module0(x)
x = self.relu(x) # ensure biases have different gradients
x = self.bias_module1(x)
return x
| BlockB |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorMetadataDefinitionV0.py | {
"start": 8503,
"end": 8620
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
pypi: Optional[PyPi] = None
| RemoteRegistries |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0007_add-automation-rules.py | {
"start": 218,
"end": 4582
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0042_increase_env_variable_value_max_length"),
("contenttypes", "0002_remove_content_type_name"),
("builds", "0006_add_config_field"),
]
operations = [
migrations.CreateModel(
name="VersionAutomationRule",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
(
"priority",
models.IntegerField(
help_text="A lower number (0) means a higher priority",
verbose_name="Rule priority",
),
),
(
"match_arg",
models.CharField(
help_text="Value used for the rule to match the version",
max_length=255,
verbose_name="Match argument",
),
),
(
"action",
models.CharField(
choices=[
("activate-version", "Activate version on match"),
("set-default-version", "Set as default version on match"),
],
max_length=32,
verbose_name="Action",
),
),
(
"action_arg",
models.CharField(
blank=True,
help_text="Value used for the action to perfom an operation",
max_length=255,
null=True,
verbose_name="Action argument",
),
),
(
"version_type",
models.CharField(
choices=[
("branch", "Branch"),
("tag", "Tag"),
("unknown", "Unknown"),
],
max_length=32,
verbose_name="Version type",
),
),
(
"polymorphic_ctype",
models.ForeignKey(
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="polymorphic_builds.versionautomationrule_set+",
to="contenttypes.ContentType",
),
),
(
"project",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="automation_rules",
to="projects.Project",
),
),
],
options={
"ordering": ("priority", "-modified", "-created"),
# 'manager_inheritance_from_future': True,
},
),
migrations.CreateModel(
name="RegexAutomationRule",
fields=[],
options={
"proxy": True,
# 'manager_inheritance_from_future': True,
"indexes": [],
},
bases=("builds.versionautomationrule",),
),
migrations.AlterUniqueTogether(
name="versionautomationrule",
unique_together=set([("project", "priority")]),
),
]
| Migration |
python | huggingface__transformers | src/transformers/models/lfm2_vl/modeling_lfm2_vl.py | {
"start": 5398,
"end": 6411
} | class ____(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The Lfm2Vl model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
| Lfm2VlModelOutputWithPast |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 1200,
"end": 1250
} | class ____(A2):
def m2(self, x):
pass
| C2 |
python | google__jax | jax/experimental/sparse/bcoo.py | {
"start": 117078,
"end": 129499
} | class ____(JAXSparse):
"""Experimental batched COO matrix implemented in JAX
Args:
(data, indices) : data and indices in batched COO format.
shape : shape of sparse array.
Attributes:
data : ndarray of shape ``[*batch_dims, nse, *dense_dims]`` containing the
explicitly stored data within the sparse matrix.
indices : ndarray of shape ``[*batch_dims, nse, n_sparse]`` containing the
indices of the explicitly stored data. Duplicate entries will be summed.
Examples:
Create a sparse array from a dense array:
>>> M = jnp.array([[0., 2., 0.], [1., 0., 4.]])
>>> M_sp = BCOO.fromdense(M)
>>> M_sp
BCOO(float32[2, 3], nse=3)
Examine the internal representation:
>>> M_sp.data
Array([2., 1., 4.], dtype=float32)
>>> M_sp.indices
Array([[0, 1],
[1, 0],
[1, 2]], dtype=int32)
Create a dense array from a sparse array:
>>> M_sp.todense()
Array([[0., 2., 0.],
[1., 0., 4.]], dtype=float32)
Create a sparse array from COO data & indices:
>>> data = jnp.array([1., 3., 5.])
>>> indices = jnp.array([[0, 0],
... [1, 1],
... [2, 2]])
>>> mat = BCOO((data, indices), shape=(3, 3))
>>> mat
BCOO(float32[3, 3], nse=3)
>>> mat.todense()
Array([[1., 0., 0.],
[0., 3., 0.],
[0., 0., 5.]], dtype=float32)
"""
# Note: additional BCOO methods are defined in transform.py
data: Array
indices: Array
shape: Shape
nse = property(lambda self: self.indices.shape[-2])
dtype = property(lambda self: self.data.dtype)
n_batch = property(lambda self: self.indices.ndim - 2)
n_sparse = property(lambda self: self.indices.shape[-1])
n_dense = property(lambda self: self.data.ndim - 1 - self.n_batch)
indices_sorted: bool
unique_indices: bool
_info = property(lambda self: SparseInfo(self.shape, self.indices_sorted,
self.unique_indices))
_bufs = property(lambda self: (self.data, self.indices))
def __init__(self, args: tuple[Array, Array], *, shape: Sequence[int],
indices_sorted: bool = False, unique_indices: bool = False):
self.data, self.indices = map(jnp.asarray, args)
self.indices_sorted = indices_sorted
self.unique_indices = unique_indices
super().__init__(args, shape=tuple(shape))
_validate_bcoo(self.data, self.indices, self.shape)
def __repr__(self):
name = self.__class__.__name__
try:
nse = self.nse
n_batch = self.n_batch
n_dense = self.n_dense
dtype = self.dtype
shape = list(self.shape)
except:
repr_ = f"{name}(<invalid>)"
else:
extra = f", {nse=}"
if n_batch: extra += f", {n_batch=}"
if n_dense: extra += f", {n_dense=}"
repr_ = f"{name}({dtype}{shape}{extra})"
if isinstance(self.data, core.Tracer):
repr_ = f"{type(self.data).__name__}[{repr_}]"
return repr_
# Stub methods: these are defined in transform.py
def reshape(self, *args, **kwargs) -> BCOO:
raise NotImplementedError("BCOO.reshape")
def astype(self, *args, **kwargs) -> BCOO:
raise NotImplementedError("BCOO.astype")
def sum(self) -> BCOO:
raise NotImplementedError("BCOO.sum")
@classmethod
def fromdense(cls, mat: Array, *, nse: int | None = None, index_dtype: DTypeLike = np.int32,
n_dense: int = 0, n_batch: int = 0) -> BCOO:
"""Create a BCOO array from a (dense) :class:`~jax.Array`."""
return bcoo_fromdense(
mat, nse=nse, index_dtype=index_dtype, n_dense=n_dense, n_batch=n_batch)
@classmethod
def from_scipy_sparse(cls, mat, *, index_dtype: DTypeLike | None=None,
n_dense: int = 0, n_batch: int = 0) -> BCOO:
"""Create a BCOO array from a :mod:`scipy.sparse` array."""
if n_dense != 0 or n_batch != 0:
raise NotImplementedError("BCOO.fromscipy with nonzero n_dense/n_batch")
mat = mat.tocoo()
data = jnp.asarray(mat.data)
indices = jnp.column_stack((mat.row, mat.col)).astype(
index_dtype or jnp.int32)
# TODO: determines sorted and unique indices for scipy conversion.
return cls((data, indices), shape=mat.shape, indices_sorted=False,
unique_indices=False)
@classmethod
def _empty(cls, shape: Shape, *, dtype: DTypeLike | None = None, index_dtype: DTypeLike = 'int32',
n_dense: int = 0, n_batch: int = 0, nse: int = 0) -> BCOO:
"""Create an empty BCOO instance. Public method is sparse.empty()."""
shape = tuple(shape)
n_sparse = len(shape) - n_dense - n_batch
if n_sparse < 0 or n_dense < 0 or n_batch < 0 or nse < 0:
raise ValueError(f"Invalid inputs: {shape=}, {n_dense=}, {n_batch=}, {nse=}")
batch_shape, sparse_shape, dense_shape = split_list(shape, [n_batch, n_sparse])
data = jnp.zeros((*batch_shape, nse, *dense_shape), dtype)
indices = jnp.full((*batch_shape, nse, n_sparse), jnp.array(sparse_shape), index_dtype)
return cls((data, indices), shape=shape, indices_sorted=True,
unique_indices=True)
@classmethod
def _eye(cls, N: int, M: int, k: int, *, dtype: DTypeLike | None = None,
index_dtype: DTypeLike = 'int32', n_batch: int = 0, n_dense: int = 0) -> BCOO:
n_sparse = 2 - n_batch - n_dense
if n_sparse < 0 or n_dense < 0 or n_batch < 0:
raise ValueError(f"Invalid inputs: shape={(N, M)}, {n_dense=}, {n_batch=}")
if k > 0:
diag_size = min(N, M - k)
else:
diag_size = min(N + k, M)
if diag_size <= 0:
# if k is out of range, return an empty matrix.
return cls._empty((N, M), dtype=dtype, index_dtype=index_dtype,
n_batch=n_batch, n_dense=n_dense)
if n_dense > 0 or n_batch > 1:
# These cases explicitly store all the zeros, so fall back to fromdense.
return cls.fromdense(jnp.eye(N, M, k, dtype=dtype),
n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype)
if n_batch == 0:
data = jnp.ones(diag_size, dtype=dtype)
idx = jnp.arange(diag_size, dtype=index_dtype)
zero = _const(idx, 0)
k = _const(idx, k)
indices = jnp.column_stack([
lax.sub(idx, lax.cond(k >= 0, lambda: zero, lambda: k)),
lax.add(idx, lax.cond(k <= 0, lambda: zero, lambda: k))])
else:
data = jnp.ones(N, dtype=dtype)
indices = jnp.arange(N, dtype=index_dtype)
indices = indices + _const(indices, k)
if k < 0:
data = data.at[:abs(k)].set(0)
indices = indices.at[:abs(k)].set(M)
elif k > 0:
data = data.at[M - abs(k):].set(0)
indices = indices.at[M - abs(k)].set(M)
data = data[:, None]
indices = indices[:, None, None]
return cls((data, indices), shape=(N, M), indices_sorted=True,
unique_indices=True)
def update_layout(self, *, n_batch: int | None = None, n_dense: int | None = None,
on_inefficient: str = 'error') -> BCOO:
"""Update the storage layout (i.e. n_batch & n_dense) of a BCOO matrix.
In many cases this can be done without introducing undue storage overhead. However,
increasing ``mat.n_batch`` or ``mat.n_dense`` will lead to very inefficient storage,
with many explicitly-stored zeros, unless the new batch or dense dimensions have size
0 or 1. In such cases, ``update_layout`` will raise a :class:`SparseEfficiencyError`.
This can be silenced by specifying the ``on_inefficient`` argument.
Args:
n_batch : optional(int) the number of batch dimensions in the output matrix. If None,
then n_batch = mat.n_batch.
n_dense : optional(int) the number of dense dimensions in the output matrix. If None,
then n_dense = mat.n_dense.
on_inefficient : optional(string), one of ``['error', 'warn', None]``. Specify the
behavior in case of an inefficient reconfiguration. This is defined as a reconfiguration
where the size of the resulting representation is much larger than the size of the
input representation.
Returns:
mat_out : BCOO array
A BCOO array representing the same sparse array as the input, with the specified
layout. ``mat_out.todense()`` will match ``mat.todense()`` up to appropriate precision.
"""
return bcoo_update_layout(self, n_batch=n_batch, n_dense=n_dense, on_inefficient=on_inefficient)
def sum_duplicates(self, nse: int | None = None, remove_zeros: bool = True) -> BCOO:
"""Return a copy of the array with duplicate indices summed.
Additionally, this operation will result in explicit zero entries removed, and
indices being sorted in lexicographic order.
Because the size of the resulting representation depends on the values in the
arrays, this operation is not compatible with JIT or other transforms. To use
``sum_duplicates`` in such cases, you may pass a value to `nse` to specify the
desired size of the output representation.
Args:
nse : integer (optional), if specified, gives the number of specified elements in
the output sparse representation; if it is larger than the number required, data
will be padded with zeros and indices will be padded with out-of-bounds values.
If it is smaller than the number required, data will be silently discarded.
remove_zeros : bool (default=True). If True, remove explicit zeros from the data
as part of summing duplicates. If False, then explicit zeros at unique indices
will remain among the specified elements. Note: remove_zeros=True is incompatible
with autodiff.
"""
if remove_zeros:
return bcoo_eliminate_zeros(self, nse=nse)
else:
return bcoo_sum_duplicates(self, nse=nse)
def sort_indices(self) -> BCOO:
"""Return a copy of the matrix with indices sorted."""
return bcoo_sort_indices(self)
def todense(self) -> Array:
"""Create a dense version of the array."""
return bcoo_todense(self)
def transpose(self, axes: Sequence[int] | None = None) -> BCOO:
"""Create a new array containing the transpose."""
perm: list[int] = list(range(self.ndim)[::-1] if axes is None else axes)
mat_T = bcoo_transpose(self, permutation=perm)
shape_T = tuple(self.shape[i] for i in perm)
sparse_perm = [p - self.n_batch
for p in perm[self.n_batch: self.n_batch + self.n_sparse]]
if tuple(sparse_perm) == tuple(range(self.n_sparse)):
is_sorted = self.indices_sorted
else:
# TODO: address the corner cases that the transposed indices are sorted.
# possibly use permutation?
is_sorted = False
return BCOO((mat_T.data, mat_T.indices), shape=shape_T,
indices_sorted=is_sorted, unique_indices=self.unique_indices)
def tree_flatten(self):
return (self.data, self.indices), self._info._asdict()
@classmethod
def tree_unflatten(cls, aux_data, children):
obj = object.__new__(cls)
obj.data, obj.indices = children
if aux_data.keys() != {'shape', 'indices_sorted', 'unique_indices'}:
raise ValueError(f"BCOO.tree_unflatten: invalid {aux_data=}")
obj.__dict__.update(**aux_data)
return obj
# vmappable handlers
def _bcoo_to_elt(cont, _, val, axis):
if axis is None:
return val
if axis >= val.n_batch:
raise ValueError(f"Cannot map in_axis={axis} for BCOO array with n_batch={val.n_batch}. "
"in_axes for batched BCOO operations must correspond to a batch dimension.")
return BCOO((cont(val.data, axis), cont(val.indices, axis)),
shape=val.shape[:axis] + val.shape[axis + 1:],
indices_sorted=val.indices_sorted, unique_indices=val.unique_indices)
def _bcoo_from_elt(cont, axis_size, elt, axis):
if axis is None:
return elt
if axis > elt.n_batch:
raise ValueError(f"BCOO: cannot add out_axis={axis} for BCOO array with n_batch={elt.n_batch}. "
"BCOO batch axes must be a contiguous block of leading dimensions.")
return BCOO((cont(axis_size, elt.data, axis), cont(axis_size, elt.indices, axis)),
shape=elt.shape[:axis] + (axis_size,) + elt.shape[axis:],
indices_sorted=elt.indices_sorted, unique_indices=elt.unique_indices)
batching.register_vmappable(BCOO, int, int, _bcoo_to_elt, _bcoo_from_elt, None)
| BCOO |
python | openai__gym | tests/vector/utils.py | {
"start": 2225,
"end": 2623
} | class ____(gym.Space):
"""Minimal custom observation space."""
def sample(self):
return self.np_random.integers(0, 10, ())
def contains(self, x):
return 0 <= x <= 10
def __eq__(self, other):
return isinstance(other, CustomSpace)
custom_spaces = [
CustomSpace(),
Tuple((CustomSpace(), Box(low=0, high=255, shape=(), dtype=np.uint8))),
]
| CustomSpace |
python | yangshun__tech-interview-handbook | apps/website/experimental/utilities/python/trie.py | {
"start": 0,
"end": 2303
} | class ____(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.d = {}
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
curr = self.d
for char in word:
if char not in curr:
curr[char] = {}
curr = curr[char]
curr['#'] = {} # Using an empty dict rather than a boolean value makes recursive traversal easier.
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
curr = self.d
for char in word:
if char in curr:
curr = curr[char]
else:
return False
return '#' in curr
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
curr = self.d
for char in prefix:
if char in curr:
curr = curr[char]
else:
return False
return True
def searchRegex(self, word):
"""
Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.
:type word: str
:rtype: bool
"""
def traverse(node, index):
if len(word) == index:
return '#' in node
char = word[index]
if char == '.':
for key in node.keys():
if traverse(node[key], index+1):
return True
return False
else:
if char not in node:
return False
return traverse(node[char], index + 1)
return traverse(self.d, 0)
# Example
trie = Trie()
trie.insert('hello')
print(trie.search('hello') == True)
print(trie.startsWith('hello') == True)
print(trie.startsWith('hel') == True)
print(trie.search('world') == False)
print(trie.startsWith('wor') == False)
print(trie.searchRegex('..llo') == True)
print(trie.searchRegex('..llx') == False)
print(trie.searchRegex('..') == False)
| Trie |
python | django__django | tests/tasks/test_immediate_backend.py | {
"start": 544,
"end": 11169
} | class ____(SimpleTestCase):
def test_using_correct_backend(self):
self.assertEqual(default_task_backend, task_backends["default"])
self.assertIsInstance(task_backends["default"], ImmediateBackend)
self.assertEqual(default_task_backend.alias, "default")
self.assertEqual(default_task_backend.options, {})
def test_enqueue_task(self):
for task in [test_tasks.noop_task, test_tasks.noop_task_async]:
with self.subTest(task):
result = task.enqueue(1, two=3)
self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL)
self.assertIs(result.is_finished, True)
self.assertIsNotNone(result.started_at)
self.assertIsNotNone(result.last_attempted_at)
self.assertIsNotNone(result.finished_at)
self.assertGreaterEqual(result.started_at, result.enqueued_at)
self.assertGreaterEqual(result.finished_at, result.started_at)
self.assertIsNone(result.return_value)
self.assertEqual(result.task, task)
self.assertEqual(result.args, [1])
self.assertEqual(result.kwargs, {"two": 3})
self.assertEqual(result.attempts, 1)
async def test_enqueue_task_async(self):
for task in [test_tasks.noop_task, test_tasks.noop_task_async]:
with self.subTest(task):
result = await task.aenqueue()
self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL)
self.assertIs(result.is_finished, True)
self.assertIsNotNone(result.started_at)
self.assertIsNotNone(result.last_attempted_at)
self.assertIsNotNone(result.finished_at)
self.assertGreaterEqual(result.started_at, result.enqueued_at)
self.assertGreaterEqual(result.finished_at, result.started_at)
self.assertIsNone(result.return_value)
self.assertEqual(result.task, task)
self.assertEqual(result.args, [])
self.assertEqual(result.kwargs, {})
self.assertEqual(result.attempts, 1)
def test_catches_exception(self):
test_data = [
(
test_tasks.failing_task_value_error, # Task function.
ValueError, # Expected exception.
"This Task failed due to ValueError", # Expected message.
),
(
test_tasks.failing_task_system_exit,
SystemExit,
"This Task failed due to SystemExit",
),
]
for task, exception, message in test_data:
with (
self.subTest(task),
self.assertLogs("django.tasks", level="ERROR") as captured_logs,
):
result = task.enqueue()
self.assertEqual(len(captured_logs.output), 1)
self.assertIn(message, captured_logs.output[0])
self.assertEqual(result.status, TaskResultStatus.FAILED)
with self.assertRaisesMessage(ValueError, "Task failed"):
result.return_value
self.assertIs(result.is_finished, True)
self.assertIsNotNone(result.started_at)
self.assertIsNotNone(result.last_attempted_at)
self.assertIsNotNone(result.finished_at)
self.assertGreaterEqual(result.started_at, result.enqueued_at)
self.assertGreaterEqual(result.finished_at, result.started_at)
self.assertEqual(result.errors[0].exception_class, exception)
traceback = result.errors[0].traceback
self.assertIs(
traceback
and traceback.endswith(f"{exception.__name__}: {message}\n"),
True,
traceback,
)
self.assertEqual(result.task, task)
self.assertEqual(result.args, [])
self.assertEqual(result.kwargs, {})
def test_throws_keyboard_interrupt(self):
with self.assertRaises(KeyboardInterrupt):
with self.assertNoLogs("django.tasks", level="ERROR"):
default_task_backend.enqueue(
test_tasks.failing_task_keyboard_interrupt, [], {}
)
def test_complex_exception(self):
with self.assertLogs("django.tasks", level="ERROR"):
result = test_tasks.complex_exception.enqueue()
self.assertEqual(result.status, TaskResultStatus.FAILED)
with self.assertRaisesMessage(ValueError, "Task failed"):
result.return_value
self.assertIsNotNone(result.started_at)
self.assertIsNotNone(result.last_attempted_at)
self.assertIsNotNone(result.finished_at)
self.assertGreaterEqual(result.started_at, result.enqueued_at)
self.assertGreaterEqual(result.finished_at, result.started_at)
self.assertIsNone(result._return_value)
self.assertEqual(result.errors[0].exception_class, ValueError)
self.assertIn(
'ValueError(ValueError("This task failed"))', result.errors[0].traceback
)
self.assertEqual(result.task, test_tasks.complex_exception)
self.assertEqual(result.args, [])
self.assertEqual(result.kwargs, {})
def test_complex_return_value(self):
with self.assertLogs("django.tasks", level="ERROR"):
result = test_tasks.complex_return_value.enqueue()
self.assertEqual(result.status, TaskResultStatus.FAILED)
self.assertIsNotNone(result.started_at)
self.assertIsNotNone(result.last_attempted_at)
self.assertIsNotNone(result.finished_at)
self.assertGreaterEqual(result.started_at, result.enqueued_at)
self.assertGreaterEqual(result.finished_at, result.started_at)
self.assertIsNone(result._return_value)
self.assertEqual(result.errors[0].exception_class, TypeError)
self.assertIn("Unsupported type", result.errors[0].traceback)
def test_result(self):
result = default_task_backend.enqueue(
test_tasks.calculate_meaning_of_life, [], {}
)
self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL)
self.assertEqual(result.return_value, 42)
async def test_result_async(self):
result = await default_task_backend.aenqueue(
test_tasks.calculate_meaning_of_life, [], {}
)
self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL)
self.assertEqual(result.return_value, 42)
async def test_cannot_get_result(self):
with self.assertRaisesMessage(
NotImplementedError,
"This backend does not support retrieving or refreshing results.",
):
default_task_backend.get_result("123")
with self.assertRaisesMessage(
NotImplementedError,
"This backend does not support retrieving or refreshing results.",
):
await default_task_backend.aget_result(123)
async def test_cannot_refresh_result(self):
result = await default_task_backend.aenqueue(
test_tasks.calculate_meaning_of_life, (), {}
)
with self.assertRaisesMessage(
NotImplementedError,
"This backend does not support retrieving or refreshing results.",
):
await result.arefresh()
with self.assertRaisesMessage(
NotImplementedError,
"This backend does not support retrieving or refreshing results.",
):
result.refresh()
def test_cannot_pass_run_after(self):
with self.assertRaisesMessage(
InvalidTask,
"Backend does not support run_after.",
):
default_task_backend.validate_task(
test_tasks.failing_task_value_error.using(run_after=timezone.now())
)
def test_enqueue_logs(self):
with self.assertLogs("django.tasks", level="DEBUG") as captured_logs:
result = test_tasks.noop_task.enqueue()
self.assertEqual(len(captured_logs.output), 3)
self.assertIn("enqueued", captured_logs.output[0])
self.assertIn(result.id, captured_logs.output[0])
self.assertIn("state=RUNNING", captured_logs.output[1])
self.assertIn(result.id, captured_logs.output[1])
self.assertIn("state=SUCCESSFUL", captured_logs.output[2])
self.assertIn(result.id, captured_logs.output[2])
def test_failed_logs(self):
with self.assertLogs("django.tasks", level="DEBUG") as captured_logs:
result = test_tasks.failing_task_value_error.enqueue()
self.assertEqual(len(captured_logs.output), 3)
self.assertIn("state=RUNNING", captured_logs.output[1])
self.assertIn(result.id, captured_logs.output[1])
self.assertIn("state=FAILED", captured_logs.output[2])
self.assertIn(result.id, captured_logs.output[2])
def test_takes_context(self):
result = test_tasks.get_task_id.enqueue()
self.assertEqual(result.return_value, result.id)
def test_context(self):
result = test_tasks.test_context.enqueue(1)
self.assertEqual(result.status, TaskResultStatus.SUCCESSFUL)
def test_validate_on_enqueue(self):
task_with_custom_queue_name = test_tasks.noop_task.using(
queue_name="unknown_queue"
)
with override_settings(
TASKS={
"default": {
"BACKEND": "django.tasks.backends.immediate.ImmediateBackend",
"QUEUES": ["queue-1"],
}
}
):
with self.assertRaisesMessage(
InvalidTask, "Queue 'unknown_queue' is not valid for backend"
):
task_with_custom_queue_name.enqueue()
async def test_validate_on_aenqueue(self):
task_with_custom_queue_name = test_tasks.noop_task.using(
queue_name="unknown_queue"
)
with override_settings(
TASKS={
"default": {
"BACKEND": "django.tasks.backends.immediate.ImmediateBackend",
"QUEUES": ["queue-1"],
}
}
):
with self.assertRaisesMessage(
InvalidTask, "Queue 'unknown_queue' is not valid for backend"
):
await task_with_custom_queue_name.aenqueue()
| ImmediateBackendTestCase |
python | pytorch__pytorch | torch/fx/proxy.py | {
"start": 18080,
"end": 18165
} | class ____(ValueError):
pass
@compatibility(is_backward_compatible=True)
| TraceError |
python | django__django | tests/model_fields/test_manytomanyfield.py | {
"start": 184,
"end": 3263
} | class ____(SimpleTestCase):
def test_abstract_model_pending_operations(self):
"""
Many-to-many fields declared on abstract models should not add lazy
relations to resolve relationship declared as string (#24215).
"""
pending_ops_before = list(apps._pending_operations.items())
class AbstractManyToManyModel(models.Model):
fk = models.ForeignKey("missing.FK", models.CASCADE)
class Meta:
abstract = True
self.assertIs(AbstractManyToManyModel._meta.apps, apps)
self.assertEqual(
pending_ops_before,
list(apps._pending_operations.items()),
"Pending lookup added for a many-to-many field on an abstract model",
)
@isolate_apps("model_fields", "model_fields.tests")
def test_abstract_model_app_relative_foreign_key(self):
class AbstractReferent(models.Model):
reference = models.ManyToManyField("Referred", through="Through")
class Meta:
app_label = "model_fields"
abstract = True
def assert_app_model_resolved(label):
class Referred(models.Model):
class Meta:
app_label = label
class Through(models.Model):
referred = models.ForeignKey("Referred", on_delete=models.CASCADE)
referent = models.ForeignKey(
"ConcreteReferent", on_delete=models.CASCADE
)
class Meta:
app_label = label
class ConcreteReferent(AbstractReferent):
class Meta:
app_label = label
self.assertEqual(
ConcreteReferent._meta.get_field("reference").related_model, Referred
)
self.assertEqual(ConcreteReferent.reference.through, Through)
assert_app_model_resolved("model_fields")
assert_app_model_resolved("tests")
def test_invalid_to_parameter(self):
msg = (
"ManyToManyField(1) is invalid. First parameter to "
"ManyToManyField must be either a model, a model name, or the "
"string 'self'"
)
with self.assertRaisesMessage(TypeError, msg):
class MyModel(models.Model):
m2m = models.ManyToManyField(1)
@isolate_apps("model_fields")
def test_through_db_table_mutually_exclusive(self):
class Child(models.Model):
pass
class Through(models.Model):
referred = models.ForeignKey(Child, on_delete=models.CASCADE)
referent = models.ForeignKey(Child, on_delete=models.CASCADE)
msg = "Cannot specify a db_table if an intermediary model is used."
with self.assertRaisesMessage(ValueError, msg):
class MyModel(models.Model):
m2m = models.ManyToManyField(
Child,
through="Through",
db_table="custom_name",
)
| ManyToManyFieldTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py | {
"start": 1563,
"end": 1600
} | class ____(Generic[T1, T4]): ...
| ClassJ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.