language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/todo.py | {
"start": 649,
"end": 905
} | class ____(TypedDict):
"""A single todo item with content and status."""
content: str
"""The content/description of the todo item."""
status: Literal["pending", "in_progress", "completed"]
"""The current status of the todo item."""
| Todo |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 24994,
"end": 25130
} | class ____(FloatingPoint):
"""
Handles the float datatype. Single-precision IEEE floating-point.
"""
format = "f4"
| Float |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image07.py | {
"start": 315,
"end": 1046
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image07.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("B3", self.image_dir + "red.jpg")
worksheet.set_header("&L&G", {"image_left": self.image_dir + "blue.jpg"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/unit_tests/test_connector_runner.py | {
"start": 462,
"end": 6711
} | class ____:
@pytest.fixture
def dev_image_name(self):
return "airbyte/source-faker:dev"
@pytest.fixture
def released_image_name(self):
return "airbyte/source-faker:latest"
async def test_get_container_env_variable_value(self, source_faker_container):
runner = connector_runner.ConnectorRunner(source_faker_container, custom_environment_variables={"FOO": "BAR"})
assert await runner.get_container_env_variable_value("FOO") == "BAR"
@pytest.mark.parametrize("deployment_mode", ["oss", "cloud"])
async def test_set_deployment_mode_env(self, source_faker_container, deployment_mode):
runner = connector_runner.ConnectorRunner(source_faker_container, deployment_mode=deployment_mode)
assert await runner.get_container_env_variable_value("DEPLOYMENT_MODE") == deployment_mode.upper()
def test_parse_airbyte_messages_from_command_output(self, mocker, tmp_path):
old_configuration_path = tmp_path / "config.json"
new_configuration = {"field_a": "new_value_a"}
mock_logging = mocker.MagicMock()
mocker.patch.object(connector_runner, "logging", mock_logging)
mocker.patch.object(connector_runner, "docker")
raw_command_output = "\n".join(
[
AirbyteMessage(
type=AirbyteMessageType.RECORD, record=AirbyteRecordMessage(stream="test_stream", data={"foo": "bar"}, emitted_at=1.0)
).json(exclude_unset=False),
AirbyteMessage(
type=AirbyteMessageType.CONTROL,
control=AirbyteControlMessage(
type=OrchestratorType.CONNECTOR_CONFIG,
emitted_at=1.0,
connectorConfig=AirbyteControlConnectorConfigMessage(config=new_configuration),
),
).json(exclude_unset=False),
"invalid message",
]
)
mocker.patch.object(connector_runner.ConnectorRunner, "_persist_new_configuration")
runner = connector_runner.ConnectorRunner(
mocker.Mock(),
connector_configuration_path=old_configuration_path,
)
runner.parse_airbyte_messages_from_command_output(raw_command_output)
runner._persist_new_configuration.assert_called_once_with(new_configuration, 1)
mock_logging.warning.assert_called_once()
@pytest.mark.parametrize(
"pass_configuration_path, old_configuration, new_configuration, new_configuration_emitted_at, expect_new_configuration",
[
pytest.param(
True,
{"field_a": "value_a"},
{"field_a": "value_a"},
1,
False,
id="Config unchanged: No new configuration persisted",
),
pytest.param(
True, {"field_a": "value_a"}, {"field_a": "new_value_a"}, 1, True, id="Config changed: New configuration persisted"
),
pytest.param(
False,
{"field_a": "value_a"},
{"field_a": "new_value_a"},
1,
False,
id="Config changed but persistence is disable: New configuration not persisted",
),
],
)
def test_persist_new_configuration(
self,
mocker,
tmp_path,
pass_configuration_path,
old_configuration,
new_configuration,
new_configuration_emitted_at,
expect_new_configuration,
):
if pass_configuration_path:
old_configuration_path = tmp_path / "config.json"
with open(old_configuration_path, "w") as old_configuration_file:
json.dump(old_configuration, old_configuration_file)
else:
old_configuration_path = None
runner = connector_runner.ConnectorRunner(mocker.MagicMock(), connector_configuration_path=old_configuration_path)
new_configuration_path = runner._persist_new_configuration(new_configuration, new_configuration_emitted_at)
if not expect_new_configuration:
assert new_configuration_path is None
else:
assert new_configuration_path == tmp_path / "updated_configurations" / f"config|{new_configuration_emitted_at}.json"
async def test_get_connector_container(mocker):
dagger_client = mocker.AsyncMock()
os.environ["CONNECTOR_UNDER_TEST_IMAGE_TAR_PATH"] = "test_tarball_path"
# Mock the functions called within get_connector_container
mocker.patch.object(connector_runner, "get_container_from_id", new=mocker.AsyncMock())
mocker.patch.object(connector_runner, "get_container_from_tarball_path", new=mocker.AsyncMock())
mocker.patch.object(connector_runner, "get_container_from_local_image", new=mocker.AsyncMock())
mocker.patch.object(connector_runner, "get_container_from_dockerhub_image", new=mocker.AsyncMock())
# Test the case when the CONNECTOR_UNDER_TEST_IMAGE_TAR_PATH is set
await connector_runner.get_connector_container(dagger_client, "test_image:tag")
connector_runner.get_container_from_tarball_path.assert_called_with(dagger_client, Path("test_tarball_path"))
# Test the case when the CONNECTOR_CONTAINER_ID is set
Path("/tmp/container_id.txt").write_text("test_container_id")
await connector_runner.get_connector_container(dagger_client, "test_image:tag")
connector_runner.get_container_from_id.assert_called_with(dagger_client, "test_container_id")
Path("/tmp/container_id.txt").unlink()
# Test the case when none of the environment variables are set
os.environ.pop("CONNECTOR_UNDER_TEST_IMAGE_TAR_PATH")
await connector_runner.get_connector_container(dagger_client, "test_image:tag")
connector_runner.get_container_from_local_image.assert_called_with(dagger_client, "test_image:tag")
# Test the case when all previous attempts fail
connector_runner.get_container_from_local_image.return_value = None
await connector_runner.get_connector_container(dagger_client, "test_image:tag")
connector_runner.get_container_from_dockerhub_image.assert_called_with(dagger_client, "test_image:tag")
| TestContainerRunner |
python | apache__airflow | providers/http/src/airflow/providers/http/notifications/http.py | {
"start": 1128,
"end": 3812
} | class ____(BaseNotifier):
"""
HTTP Notifier.
Sends HTTP requests to notify external systems.
:param http_conn_id: HTTP connection id that has the base URL and optional authentication credentials.
:param endpoint: The endpoint to be called i.e. resource/v1/query?
:param method: The HTTP method to use. Defaults to POST.
:param data: Payload to be uploaded or request parameters
:param json: JSON payload to be uploaded
:param headers: Additional headers to be passed through as a dictionary
:param extra_options: Additional options to be used when executing the request
"""
template_fields = ("http_conn_id", "endpoint", "data", "json", "headers", "extra_options")
def __init__(
self,
*,
http_conn_id: str = HttpHook.default_conn_name,
endpoint: str | None = None,
method: str = "POST",
data: dict[str, Any] | str | None = None,
json: dict[str, Any] | str | None = None,
headers: dict[str, Any] | None = None,
extra_options: dict[str, Any] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.http_conn_id = http_conn_id
self.endpoint = endpoint
self.method = method
self.data = data
self.json = json
self.headers = headers
self.extra_options = extra_options or {}
@cached_property
def hook(self) -> HttpHook:
"""HTTP Hook."""
return HttpHook(method=self.method, http_conn_id=self.http_conn_id)
@cached_property
def async_hook(self) -> HttpAsyncHook:
"""HTTP Async Hook."""
return HttpAsyncHook(method=self.method, http_conn_id=self.http_conn_id)
def notify(self, context: Context) -> None:
"""Send HTTP notification (sync)."""
resp = self.hook.run(
endpoint=self.endpoint,
data=self.data,
headers=self.headers,
extra_options=self.extra_options,
json=self.json,
)
self.log.debug("HTTP notification sent: %s %s", resp.status_code, resp.url)
async def async_notify(self, context: Context) -> None:
"""Send HTTP notification (async)."""
async with aiohttp.ClientSession() as session:
resp = await self.async_hook.run(
session=session,
endpoint=self.endpoint,
data=self.data,
json=self.json,
headers=self.headers,
extra_options=self.extra_options,
)
self.log.debug("HTTP notification sent (async): %s %s", resp.status, resp.url)
send_http_notification = HttpNotifier
| HttpNotifier |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_build_notifications.py | {
"start": 12572,
"end": 13683
} | class ____(TestCase):
def setUp(self):
self.project = get(Project)
self.version = get(Version, project=self.project)
self.build = get(Build, version=self.version)
def test_webhook_form_url_length(self):
form = WebHookForm(
{
"url": "https://foobar.com",
"payload": "{}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
},
project=self.project,
)
self.assertTrue(form.is_valid())
form = WebHookForm(
{
"url": "foo" * 500,
"payload": "{}",
"events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id],
},
project=self.project,
)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"url": [
"Enter a valid URL.",
"Ensure this value has at most 600 characters (it has 1507).",
],
},
)
| TestForms |
python | pytransitions__transitions | tests/test_mermaid.py | {
"start": 710,
"end": 6311
} | class ____(TestDiagrams):
graph_engine = "mermaid"
edge_re = re.compile(r"^\s+(?P<src>\w+)\s*-->\s*(?P<dst>\w+)\s*:\s*(?P<attr>.*)$")
node_re = re.compile(r"^\s+state \"\S+(\s+(?P<attr>\[.*\]?))?\" as (?P<node>\S+)")
def test_diagram(self):
m = self.machine_cls(states=self.states, transitions=self.transitions, initial='A', auto_transitions=False,
title='a test', graph_engine=self.graph_engine)
graph = m.get_graph()
self.assertIsNotNone(graph)
_, nodes, edges = self.parse_dot(graph)
# Test that graph properties match the Machine
self.assertEqual(set(m.states.keys()), nodes)
self.assertEqual(len(edges), len(self.transitions))
# write diagram to temp file
target = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
graph.draw(target.name, format='png', prog='dot')
self.assertTrue(os.path.getsize(target.name) > 0)
# backwards compatibility check
m.get_graph().draw(target.name, format='png', prog='dot')
self.assertTrue(os.path.getsize(target.name) > 0)
# cleanup temp file
target.close()
os.unlink(target.name)
def test_to_method_filtering(self):
m = self.machine_cls(states=['A', 'B', 'C'], initial='A', graph_engine=self.graph_engine)
m.add_transition('to_state_A', 'B', 'A')
m.add_transition('to_end', '*', 'C')
_, _, edges = self.parse_dot(m.get_graph())
self.assertEqual(len([e for e in edges if e == 'to_state_A']), 1)
self.assertEqual(len([e for e in edges if e == 'to_end']), 3)
m2 = self.machine_cls(states=['A', 'B', 'C'], initial='A', show_auto_transitions=True,
graph_engine=self.graph_engine)
_, _, edges = self.parse_dot(m2.get_graph())
self.assertEqual(len(edges), 9)
self.assertEqual(len([e for e in edges if e == 'to_A']), 3)
self.assertEqual(len([e for e in edges if e == 'to_C']), 3)
def test_loops(self):
m = self.machine_cls(states=['A'], initial='A', graph_engine=self.graph_engine)
m.add_transition('reflexive', 'A', '=')
m.add_transition('fixed', 'A', None)
g1 = m.get_graph()
dot_string, _, _ = self.parse_dot(g1)
try:
self.assertRegex(dot_string, r'A\s+-->\s+A:\s*(fixed|reflexive)')
except AttributeError: # Python 2 backwards compatibility
self.assertRegexpMatches(dot_string, r'A\s+-->\s+A:\s*(fixed|reflexive)')
def test_roi(self):
m = self.machine_cls(states=['A', 'B', 'C', 'D', 'E', 'F'], initial='A', graph_engine=self.graph_engine)
m.add_transition('to_state_A', 'B', 'A')
m.add_transition('to_state_C', 'B', 'C')
m.add_transition('to_state_F', 'B', 'F')
g1 = m.get_graph(show_roi=True)
dot, nodes, edges = self.parse_dot(g1)
self.assertEqual(0, len(edges))
self.assertIn(r'"A"', dot)
# make sure that generating a graph without ROI has not influence on the later generated graph
# this has to be checked since graph.custom_style is a class property and is persistent for multiple
# calls of graph.generate()
m.to_C()
m.to_E()
_ = m.get_graph()
g2 = m.get_graph(show_roi=True)
dot, _, _ = self.parse_dot(g2)
self.assertNotIn(r'label="A\l"', dot)
m.to_B()
g3 = m.get_graph(show_roi=True)
_, nodes, edges = self.parse_dot(g3)
self.assertEqual(len(edges), 3) # to_state_{A,C,F}
self.assertEqual(len(nodes), 5) # B + A,C,F (edges) + E (previous)
def test_label_attribute(self):
class LabelState(self.machine_cls.state_cls): # type: ignore
def __init__(self, *args, **kwargs):
self.label = kwargs.pop('label')
super(LabelState, self).__init__(*args, **kwargs)
class CustomMachine(self.machine_cls): # type: ignore
state_cls = LabelState
m = CustomMachine(states=[{'name': 'A', 'label': 'LabelA'},
{'name': 'B', 'label': 'NotLabelA'}],
transitions=[{'trigger': 'event', 'source': 'A', 'dest': 'B', 'label': 'LabelEvent'}],
initial='A', graph_engine=self.graph_engine)
dot, _, _ = self.parse_dot(m.get_graph())
self.assertIn(r'"LabelA"', dot)
self.assertIn(r'"NotLabelA"', dot)
self.assertIn("LabelEvent", dot)
self.assertNotIn(r'"A"', dot)
self.assertNotIn("event", dot)
def test_binary_stream(self):
from io import BytesIO
m = self.machine_cls(states=['A', 'B', 'C'], initial='A', auto_transitions=True,
title='A test', show_conditions=True, graph_engine=self.graph_engine)
b1 = BytesIO()
g = m.get_graph()
g.draw(b1)
b2 = g.draw(None)
self.assertEqual(b2, b1.getvalue().decode())
b1.close()
def test_update_on_remove_transition(self):
m = self.machine_cls(states=self.states, transitions=self.transitions, initial='A',
graph_engine=self.graph_engine, show_state_attributes=True)
_, _, edges = self.parse_dot(m.get_graph())
assert "walk" in edges
m.remove_transition(trigger="walk", source="A", dest="B")
_, _, edges = self.parse_dot(m.get_graph())
assert not any("walk" == t["trigger"] for t in m.markup["transitions"])
assert "walk" not in edges
| TestMermaidDiagrams |
python | plotly__plotly.py | plotly/graph_objs/parcoords/line/colorbar/_tickfont.py | {
"start": 233,
"end": 9954
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords.line.colorbar"
_path_str = "parcoords.line.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.parcoords.line
.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.line.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | ray-project__ray | python/ray/tune/tests/test_placeholder.py | {
"start": 270,
"end": 343
} | class ____:
def __init__(self, value):
self.value = value
| Dummy |
python | jazzband__django-model-utils | model_utils/managers.py | {
"start": 11590,
"end": 12399
} | class ____(Generic[ModelT]):
@overload
def __init__(self, *args: models.Q):
...
@overload
def __init__(self, **kwargs: object):
...
def __init__(self, *args: models.Q, **kwargs: object):
if args:
self._q = args[0]
else:
self._q = models.Q(**kwargs)
self._order_by: tuple[Any, ...] | None = None
super().__init__()
def order_by(self, *args: Any) -> QueryManager[ModelT]:
self._order_by = args
return cast('QueryManager[ModelT]', self)
def get_queryset(self) -> QuerySet[ModelT]:
qs = super().get_queryset() # type: ignore[misc]
qs = qs.filter(self._q)
if self._order_by is not None:
return qs.order_by(*self._order_by)
return qs
| QueryManagerMixin |
python | pypa__wheel | src/wheel/_bdist_wheel.py | {
"start": 5053,
"end": 21729
} | class ____(Command):
description = "create a wheel distribution"
supported_compressions = {
"stored": ZIP_STORED,
"deflated": ZIP_DEFLATED,
}
user_options = [
("bdist-dir=", "b", "temporary directory for creating the distribution"),
(
"plat-name=",
"p",
"platform name to embed in generated filenames "
f"(default: {get_platform(None)})",
),
(
"keep-temp",
"k",
"keep the pseudo-installation tree around after "
"creating the distribution archive",
),
("dist-dir=", "d", "directory to put final built distributions in"),
("skip-build", None, "skip rebuilding everything (for testing/debugging)"),
(
"relative",
None,
"build the archive using relative paths (default: false)",
),
(
"owner=",
"u",
"Owner name used when creating a tar file [default: current user]",
),
(
"group=",
"g",
"Group name used when creating a tar file [default: current group]",
),
("universal", None, "make a universal wheel (default: false)"),
(
"compression=",
None,
"zipfile compression (one of: {}) (default: 'deflated')".format(
", ".join(supported_compressions)
),
),
(
"python-tag=",
None,
f"Python implementation compatibility tag (default: '{python_tag()}')",
),
(
"build-number=",
None,
"Build number for this particular version. "
"As specified in PEP-0427, this must start with a digit. "
"[default: None]",
),
(
"py-limited-api=",
None,
"Python tag (cp32|cp33|cpNN) for abi3 wheel tag (default: false)",
),
]
boolean_options = ["keep-temp", "skip-build", "relative", "universal"]
def initialize_options(self):
self.bdist_dir: str = None
self.data_dir = None
self.plat_name: str | None = None
self.plat_tag = None
self.format = "zip"
self.keep_temp = False
self.dist_dir: str | None = None
self.egginfo_dir = None
self.root_is_pure: bool | None = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal: bool = False
self.compression: str | int = "deflated"
self.python_tag: str = python_tag()
self.build_number: str | None = None
self.py_limited_api: str | Literal[False] = False
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command("bdist").bdist_base
self.bdist_dir = os.path.join(bdist_base, "wheel")
egg_info = self.distribution.get_command_obj("egg_info")
egg_info.ensure_finalized() # needed for correct `wheel_dist_name`
self.data_dir = self.wheel_dist_name + ".data"
self.plat_name_supplied = self.plat_name is not None
try:
self.compression = self.supported_compressions[self.compression]
except KeyError:
raise ValueError(f"Unsupported compression: {self.compression}") from None
need_options = ("dist_dir", "plat_name", "skip_build")
self.set_undefined_options("bdist", *zip(need_options, need_options))
self.root_is_pure = not (
self.distribution.has_ext_modules() or self.distribution.has_c_libraries()
)
if self.py_limited_api and not re.match(
PY_LIMITED_API_PATTERN, self.py_limited_api
):
raise ValueError(f"py-limited-api must match '{PY_LIMITED_API_PATTERN}'")
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict("wheel")
if "universal" in wheel:
# please don't define this in your global configs
log.warning(
"The [wheel] section is deprecated. Use [bdist_wheel] instead.",
)
val = wheel["universal"][1].strip()
if val.lower() in ("1", "true", "yes"):
self.universal = True
if self.build_number is not None and not self.build_number[:1].isdigit():
raise ValueError("Build tag (build-number) must start with a digit.")
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
components = (
safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()),
)
if self.build_number:
components += (self.build_number,)
return "-".join(components)
def get_tag(self) -> tuple[str, str, str]:
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = cast(str, self.plat_name)
elif self.root_is_pure:
plat_name = "any"
else:
# macosx contains system version in platform name so need special handle
if self.plat_name and not self.plat_name.startswith("macosx"):
plat_name = self.plat_name
else:
# on macosx always limit the platform name to comply with any
# c-extension modules in bdist_dir, since the user can specify
# a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
# on other platforms, and on macosx if there are no c-extension
# modules, use the default platform name.
plat_name = get_platform(self.bdist_dir)
if _is_32bit_interpreter():
if plat_name in ("linux-x86_64", "linux_x86_64"):
plat_name = "linux_i686"
if plat_name in ("linux-aarch64", "linux_aarch64"):
# TODO armv8l, packaging pull request #690 => this did not land
# in pip/packaging yet
plat_name = "linux_armv7l"
plat_name = (
plat_name.lower().replace("-", "_").replace(".", "_").replace(" ", "_")
)
if self.root_is_pure:
if self.universal:
impl = "py2.py3"
else:
impl = self.python_tag
tag = (impl, "none", plat_name)
else:
impl_name = tags.interpreter_name()
impl_ver = tags.interpreter_version()
impl = impl_name + impl_ver
# We don't work on CPython 3.1, 3.0.
if self.py_limited_api and (impl_name + impl_ver).startswith("cp3"):
impl = self.py_limited_api
abi_tag = "abi3"
else:
abi_tag = str(get_abi_tag()).lower()
tag = (impl, abi_tag, plat_name)
# issue gh-374: allow overriding plat_name
supported_tags = [
(t.interpreter, t.abi, plat_name) for t in tags.sys_tags()
]
assert tag in supported_tags, (
f"would build wheel with unsupported tag {tag}"
)
return tag
def run(self):
build_scripts = self.reinitialize_command("build_scripts")
build_scripts.executable = "python"
build_scripts.force = True
build_ext = self.reinitialize_command("build_ext")
build_ext.inplace = False
if not self.skip_build:
self.run_command("build")
install = self.reinitialize_command("install", reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command("install_scripts")
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ("headers", "scripts", "data", "purelib", "platlib"):
setattr(install, "install_" + key, os.path.join(self.data_dir, key))
basedir_observed = ""
if os.name == "nt":
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, ".."))
self.install_libbase = self.install_lib = basedir_observed
setattr(
install,
"install_purelib" if self.root_is_pure else "install_platlib",
basedir_observed,
)
log.info(f"installing to {self.bdist_dir}")
self.run_command("install")
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir, self._ensure_relative(install.install_base)
)
self.set_undefined_options("install_egg_info", ("target", "egginfo_dir"))
distinfo_dirname = (
f"{safer_name(self.distribution.get_name())}-"
f"{safer_version(self.distribution.get_version())}.dist-info"
)
distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
self.egg2dist(self.egginfo_dir, distinfo_dir)
self.write_wheelfile(distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
with WheelFile(wheel_path, "w", self.compression) as wf:
wf.write_files(archive_root)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, "dist_files", []).append(
(
"bdist_wheel",
"{}.{}".format(*sys.version_info[:2]), # like 3.7
wheel_path,
)
)
if not self.keep_temp:
log.info(f"removing {self.bdist_dir}")
if not self.dry_run:
if sys.version_info < (3, 12):
rmtree(self.bdist_dir, onerror=remove_readonly)
else:
rmtree(self.bdist_dir, onexc=remove_readonly_exc)
def write_wheelfile(
self, wheelfile_base: str, generator: str = f"bdist_wheel ({wheel_version})"
):
from email.message import Message
msg = Message()
msg["Wheel-Version"] = "1.0" # of the spec
msg["Generator"] = generator
msg["Root-Is-Purelib"] = str(self.root_is_pure).lower()
if self.build_number is not None:
msg["Build"] = self.build_number
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split("."):
for abi in abi_tag.split("."):
for plat in plat_tag.split("."):
msg["Tag"] = "-".join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, "WHEEL")
log.info(f"creating {wheelfile_path}")
with open(wheelfile_path, "wb") as f:
BytesGenerator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path: str) -> str:
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
@property
def license_paths(self) -> Iterable[str]:
if setuptools_major_version >= 57:
# Setuptools has resolved any patterns to actual file names
return self.distribution.metadata.license_files or ()
files: set[str] = set()
metadata = self.distribution.get_option_dict("metadata")
if setuptools_major_version >= 42:
# Setuptools recognizes the license_files option but does not do globbing
patterns = cast(Sequence[str], self.distribution.metadata.license_files)
else:
# Prior to those, wheel is entirely responsible for handling license files
if "license_files" in metadata:
patterns = metadata["license_files"][1].split()
else:
patterns = ()
if "license_file" in metadata:
warnings.warn(
'The "license_file" option is deprecated. Use "license_files" instead.',
DeprecationWarning,
stacklevel=2,
)
files.add(metadata["license_file"][1])
if not files and not patterns and not isinstance(patterns, list):
patterns = ("LICEN[CS]E*", "COPYING*", "NOTICE*", "AUTHORS*")
for pattern in patterns:
for path in iglob(pattern):
if path.endswith("~"):
log.debug(
f'ignoring license file "{path}" as it looks like a backup'
)
continue
if path not in files and os.path.isfile(path):
log.info(
f'adding license file "{path}" (matched pattern "{pattern}")'
)
files.add(path)
return files
def egg2dist(self, egginfo_path: str, distinfo_path: str):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p: str) -> None:
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), "*.egg-info")
possible = glob.glob(pat)
err = f"Egg metadata expected at {egginfo_path} but not found"
if possible:
alt = os.path.basename(possible[0])
err += f" ({alt} found - possible misnamed archive file?)"
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, "PKG-INFO")
pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(
egginfo_path,
distinfo_path,
ignore=lambda x, y: {
"PKG-INFO",
"requires.txt",
"SOURCES.txt",
"not-zip-safe",
},
)
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, "dependency_links.txt")
with open(dependency_links_path, encoding="utf-8") as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
pkg_info_path = os.path.join(distinfo_path, "METADATA")
serialization_policy = EmailPolicy(
utf8=True,
mangle_from_=False,
max_line_length=0,
)
with open(pkg_info_path, "w", encoding="utf-8") as out:
Generator(out, policy=serialization_policy).flatten(pkg_info)
for license_path in self.license_paths:
filename = os.path.basename(license_path)
shutil.copy(license_path, os.path.join(distinfo_path, filename))
adios(egginfo_path)
| bdist_wheel |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_iban.py | {
"start": 749,
"end": 1420
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.valid_iban"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(is_valid_iban)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
return is_valid_iban_udf(column)
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
| ColumnValuesToBeValidIban |
python | getsentry__sentry | src/sentry/charts/base.py | {
"start": 196,
"end": 892
} | class ____(Service):
"""
The chart rendering service is used to translate arbitrary data into a
image representation of that data, usually a chart.
"""
__all__ = (
"is_enabled",
"generate_chart",
)
def __init__(self, **options: Any) -> None:
pass
def is_enabled(self) -> bool:
"""
Checks that the chart rendering service is enabled
"""
return bool(options.get("chart-rendering.enabled", False))
def generate_chart(self, style: ChartType, data: Any, size: ChartSize | None = None) -> str:
"""Produces a chart. Returns the public URL for the chart"""
raise NotImplementedError
| ChartRenderer |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/methods/test_astype.py | {
"start": 11399,
"end": 12429
} | class ____:
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_astype_category(self, tz):
obj = date_range("2000", periods=2, tz=tz, name="idx", unit="ns")
result = obj.astype("category")
dti = DatetimeIndex(["2000-01-01", "2000-01-02"], tz=tz).as_unit("ns")
expected = pd.CategoricalIndex(
dti,
name="idx",
)
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_astype_array_fallback(self, tz):
obj = date_range("2000", periods=2, tz=tz, name="idx")
result = obj.astype(bool)
expected = Index(np.array([True, True]), name="idx")
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
| TestAstype |
python | pydantic__pydantic | pydantic-core/tests/validators/test_complex.py | {
"start": 6370,
"end": 6563
} | class ____:
"""Object that defines __float__() method"""
def __init__(self, value):
self.value = value
def __float__(self):
return float(self.value)
| ComplexWithFloat |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 224604,
"end": 224726
} | class ____(spack.error.SpecError):
"""Raised when a detected spec doesn't pass validation checks."""
| InvalidSpecDetected |
python | kamyu104__LeetCode-Solutions | Python/trim-a-binary-search-tree.py | {
"start": 29,
"end": 518
} | class ____(object):
def trimBST(self, root, L, R):
"""
:type root: TreeNode
:type L: int
:type R: int
:rtype: TreeNode
"""
if not root:
return None
if root.val < L:
return self.trimBST(root.right, L, R)
if root.val > R:
return self.trimBST(root.left, L, R)
root.left, root.right = self.trimBST(root.left, L, R), self.trimBST(root.right, L, R)
return root
| Solution |
python | sympy__sympy | sympy/physics/quantum/hilbert.py | {
"start": 7707,
"end": 13022
} | class ____(HilbertSpace):
"""A tensor product of Hilbert spaces [1]_.
The tensor product between Hilbert spaces is represented by the
operator ``*`` Products of the same Hilbert space will be combined into
tensor powers.
A ``TensorProductHilbertSpace`` object takes in an arbitrary number of
``HilbertSpace`` objects as its arguments. In addition, multiplication of
``HilbertSpace`` objects will automatically return this tensor product
object.
Examples
========
>>> from sympy.physics.quantum.hilbert import ComplexSpace, FockSpace
>>> from sympy import symbols
>>> c = ComplexSpace(2)
>>> f = FockSpace()
>>> hs = c*f
>>> hs
C(2)*F
>>> hs.dimension
oo
>>> hs.spaces
(C(2), F)
>>> c1 = ComplexSpace(2)
>>> n = symbols('n')
>>> c2 = ComplexSpace(n)
>>> hs = c1*c2
>>> hs
C(2)*C(n)
>>> hs.dimension
2*n
References
==========
.. [1] https://en.wikipedia.org/wiki/Hilbert_space#Tensor_products
"""
def __new__(cls, *args):
r = cls.eval(args)
if isinstance(r, Basic):
return r
obj = Basic.__new__(cls, *args)
return obj
@classmethod
def eval(cls, args):
"""Evaluates the direct product."""
new_args = []
recall = False
#flatten arguments
for arg in args:
if isinstance(arg, TensorProductHilbertSpace):
new_args.extend(arg.args)
recall = True
elif isinstance(arg, (HilbertSpace, TensorPowerHilbertSpace)):
new_args.append(arg)
else:
raise TypeError('Hilbert spaces can only be multiplied by \
other Hilbert spaces: %r' % arg)
#combine like arguments into direct powers
comb_args = []
prev_arg = None
for new_arg in new_args:
if prev_arg is not None:
if isinstance(new_arg, TensorPowerHilbertSpace) and \
isinstance(prev_arg, TensorPowerHilbertSpace) and \
new_arg.base == prev_arg.base:
prev_arg = new_arg.base**(new_arg.exp + prev_arg.exp)
elif isinstance(new_arg, TensorPowerHilbertSpace) and \
new_arg.base == prev_arg:
prev_arg = prev_arg**(new_arg.exp + 1)
elif isinstance(prev_arg, TensorPowerHilbertSpace) and \
new_arg == prev_arg.base:
prev_arg = new_arg**(prev_arg.exp + 1)
elif new_arg == prev_arg:
prev_arg = new_arg**2
else:
comb_args.append(prev_arg)
prev_arg = new_arg
elif prev_arg is None:
prev_arg = new_arg
comb_args.append(prev_arg)
if recall:
return TensorProductHilbertSpace(*comb_args)
elif len(comb_args) == 1:
return TensorPowerHilbertSpace(comb_args[0].base, comb_args[0].exp)
else:
return None
@property
def dimension(self):
arg_list = [arg.dimension for arg in self.args]
if S.Infinity in arg_list:
return S.Infinity
else:
return reduce(lambda x, y: x*y, arg_list)
@property
def spaces(self):
"""A tuple of the Hilbert spaces in this tensor product."""
return self.args
def _spaces_printer(self, printer, *args):
spaces_strs = []
for arg in self.args:
s = printer._print(arg, *args)
if isinstance(arg, DirectSumHilbertSpace):
s = '(%s)' % s
spaces_strs.append(s)
return spaces_strs
def _sympyrepr(self, printer, *args):
spaces_reprs = self._spaces_printer(printer, *args)
return "TensorProductHilbertSpace(%s)" % ','.join(spaces_reprs)
def _sympystr(self, printer, *args):
spaces_strs = self._spaces_printer(printer, *args)
return '*'.join(spaces_strs)
def _pretty(self, printer, *args):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (DirectSumHilbertSpace,
TensorProductHilbertSpace)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(' ' + '\N{N-ARY CIRCLED TIMES OPERATOR}' + ' '))
else:
pform = prettyForm(*pform.right(' x '))
return pform
def _latex(self, printer, *args):
length = len(self.args)
s = ''
for i in range(length):
arg_s = printer._print(self.args[i], *args)
if isinstance(self.args[i], (DirectSumHilbertSpace,
TensorProductHilbertSpace)):
arg_s = r'\left(%s\right)' % arg_s
s = s + arg_s
if i != length - 1:
s = s + r'\otimes '
return s
| TensorProductHilbertSpace |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/active.py | {
"start": 1800,
"end": 29891
} | class ____:
"""State machine used to track progress through execution of an ExecutionPlan."""
def __init__(
self,
execution_plan: ExecutionPlan,
retry_mode: RetryMode,
sort_key_fn: Optional[Callable[[ExecutionStep], float]] = None,
max_concurrent: Optional[int] = None,
tag_concurrency_limits: Optional[list[dict[str, Any]]] = None,
instance_concurrency_context: Optional[InstanceConcurrencyContext] = None,
step_dependency_config: StepDependencyConfig = StepDependencyConfig.default(),
):
self._plan: ExecutionPlan = check.inst_param(
execution_plan, "execution_plan", ExecutionPlan
)
self._retry_mode = check.inst_param(retry_mode, "retry_mode", RetryMode)
self._retry_state = self._plan.known_state.get_retry_state()
self._instance_concurrency_context = instance_concurrency_context
self._step_dependency_config = step_dependency_config
self._sort_key_fn: Callable[[ExecutionStep], float] = (
check.opt_callable_param(
sort_key_fn,
"sort_key_fn",
)
or _default_sort_key
)
self._max_concurrent = check.opt_int_param(max_concurrent, "max_concurrent")
self._tag_concurrency_limits = check.opt_list_param(
tag_concurrency_limits, "tag_concurrency_limits"
)
self._context_guard: bool = False # Prevent accidental direct use
# We decide what steps to skip based on what outputs are yielded by upstream steps
self._step_outputs: set[StepOutputHandle] = set(self._plan.known_state.ready_outputs)
# All steps to be executed start out here in _pending
self._pending: dict[str, set[str]] = dict(self._plan.get_executable_step_deps())
# track mapping keys from DynamicOutputs, step_key, output_name -> list of keys
# to _gathering while in flight
self._gathering_dynamic_outputs: dict[str, Mapping[str, Optional[list[str]]]] = {}
# then on resolution move to _completed
self._completed_dynamic_outputs: dict[str, Mapping[str, Optional[Sequence[str]]]] = (
dict(self._plan.known_state.dynamic_mappings) if self._plan.known_state else {}
)
self._new_dynamic_mappings: bool = False
# track which upstream deps caused a step to skip
self._skipped_deps: dict[str, Sequence[str]] = {}
# steps move in to these buckets as a result of _update calls
self._executable: list[str] = []
self._pending_skip: list[str] = []
self._pending_retry: list[str] = []
self._pending_abandon: list[str] = []
self._waiting_to_retry: dict[str, float] = {}
self._messaged_concurrency_slots: dict[str, float] = {}
# then are considered _in_flight when vended via get_steps_to_*
self._in_flight: set[str] = set()
# and finally their terminal state is tracked by these sets, via mark_*
self._success: set[str] = set()
self._failed: set[str] = set()
self._skipped: set[str] = set()
self._abandoned: set[str] = set()
# see verify_complete
self._unknown_state: set[str] = set()
self._interrupted: bool = False
# Start the show by loading _executable with the set of _pending steps that have no deps
self._update()
def __enter__(self) -> Self:
self._context_guard = True
return self
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self._context_guard = False
# Exiting due to exception, return to allow exception to bubble
if exc_type or exc_value or traceback:
return
if not self.is_complete:
# generate the state string before exiting the concurrency context
state_str = self._pending_state_str()
else:
state_str = ""
if not self.is_complete:
if self._interrupted:
raise DagsterExecutionInterruptedError(
f"Execution was interrupted before completing the execution plan. {state_str}"
)
else:
raise DagsterInvariantViolationError(
f"Execution finished without completing the execution plan. {state_str}"
)
# See verify_complete - steps for which we did not observe a failure/success event are in an unknown
# state so we raise to ensure pipeline failure.
if len(self._unknown_state) > 0:
if self._interrupted:
raise DagsterExecutionInterruptedError(
f"Execution exited with steps {self._unknown_state} in an unknown state after "
"being interrupted."
)
else:
raise DagsterUnknownStepStateError(
f"Execution exited with steps {self._unknown_state} in an unknown state to this"
" process.\nThis was likely caused by losing communication with the process"
" performing step execution."
)
def _pending_state_str(self) -> str:
assert not self.is_complete
pending_action = (
self._executable + self._pending_abandon + self._pending_retry + self._pending_skip
)
return "{pending_str}{in_flight_str}{action_str}{retry_str}{claim_str}".format(
in_flight_str=f"\nSteps still in flight: {self._in_flight}" if self._in_flight else "",
pending_str=(
f"\nSteps pending processing: {self._pending.keys()}" if self._pending else ""
),
action_str=f"\nSteps pending action: {pending_action}" if pending_action else "",
retry_str=(
f"\nSteps waiting to retry: {self._waiting_to_retry.keys()}"
if self._waiting_to_retry
else ""
),
claim_str=(
"\nSteps waiting to claim:"
f" {self._instance_concurrency_context.pending_claim_steps()}"
if self._instance_concurrency_context
and self._instance_concurrency_context.has_pending_claims()
else ""
),
)
def _should_skip_step(self, step_key: str) -> bool:
successful_or_skipped_steps = self._success | self._skipped
step = self.get_step_by_key(step_key)
for step_input in step.step_inputs:
missing_source_handles = []
for source_handle in step_input.get_step_output_handle_dependencies():
if (
source_handle.step_key in successful_or_skipped_steps
and source_handle not in self._step_outputs
):
missing_source_handles.append(source_handle)
if missing_source_handles:
if (
# for the FromMultipleSources case (aka fan-in), we only skip if all sources
# are missing. for other cases, we skip if any source is missing
not isinstance(step_input.source, FromMultipleSources)
or (
len(missing_source_handles)
== len(step_input.get_step_output_handle_dependencies())
)
):
self._skipped_deps[step_key] = [
f"{h.step_key}.{h.output_name}" for h in missing_source_handles
]
return True
return False
def _all_upstream_outputs_failed_or_abandoned(self, step_key: str) -> bool:
failed_or_abandoned_steps = self._failed | self._abandoned
# check that all upstream outputs have failed or been abandoned
step = self.get_step_by_key(step_key)
for step_input in step.step_inputs:
if any(
source_handle not in self._step_outputs
and source_handle.step_key in failed_or_abandoned_steps
for source_handle in step_input.get_step_output_handle_dependencies()
):
return True
return False
def _has_produced_output(self, step_output_handle: StepOutputHandle) -> bool:
# check if the step output has been produced by this run or any parent run
if step_output_handle in self._step_outputs:
return True
elif step_output_handle.step_key in self._plan.step_keys_to_execute:
# step will be executed in this run, so should wait for this run to
# produce the output instead of looking at past runs
return False
# this case can happen if the original run was executed with AFTER_UPSTREAM_OUTPUTS
parent_state = self._plan.known_state.parent_state
while parent_state is not None:
if step_output_handle in parent_state.produced_outputs:
return True
parent_state = cast("Optional[PastExecutionState]", parent_state.parent_state)
return False
def _all_upstream_outputs_produced(self, step_key: str) -> bool:
# check that all upstream outputs have been emitted
step = self.get_step_by_key(step_key)
for step_input in step.step_inputs:
if any(
not self._has_produced_output(source_handle)
for source_handle in step_input.get_step_output_handle_dependencies()
):
return False
return True
def _update(self) -> None:
"""Moves steps from _pending to _executable / _pending_skip / _pending_retry
as a function of what has been _completed.
"""
new_steps_to_execute: list[str] = []
new_steps_to_skip: list[str] = []
new_steps_to_abandon: list[str] = []
if self._new_dynamic_mappings:
new_step_deps = self._plan.resolve(self._completed_dynamic_outputs)
for step_key, deps in new_step_deps.items():
self._pending[step_key] = deps
self._new_dynamic_mappings = False
resolved_steps = self._success | self._skipped | self._failed | self._abandoned
for step_key, depends_on_steps in self._pending.items():
# traditional behavior, wait for all upstream steps before executing
if self._step_dependency_config.require_upstream_step_success:
if depends_on_steps.issubset(resolved_steps):
if self._should_skip_step(step_key):
new_steps_to_skip.append(step_key)
elif depends_on_steps.intersection(self._failed | self._abandoned):
new_steps_to_abandon.append(step_key)
else:
new_steps_to_execute.append(step_key)
# optional behavior, executes as soon as all upstream outputs are available
else:
if self._should_skip_step(step_key):
new_steps_to_skip.append(step_key)
elif self._all_upstream_outputs_failed_or_abandoned(step_key):
new_steps_to_abandon.append(step_key)
elif self._all_upstream_outputs_produced(step_key):
new_steps_to_execute.append(step_key)
for key in new_steps_to_execute:
self._executable.append(key)
del self._pending[key]
for key in new_steps_to_skip:
self._pending_skip.append(key)
del self._pending[key]
for key in new_steps_to_abandon:
self._pending_abandon.append(key)
del self._pending[key]
ready_to_retry = []
tick_time = time.time()
for key, at_time in self._waiting_to_retry.items():
if tick_time >= at_time:
ready_to_retry.append(key)
for key in ready_to_retry:
self._executable.append(key)
del self._waiting_to_retry[key]
def sleep_interval(self):
now = time.time()
intervals = []
if self._waiting_to_retry:
for t in self._waiting_to_retry.values():
intervals.append(t - now)
if (
self._instance_concurrency_context
and self._instance_concurrency_context.has_pending_claims()
):
intervals.append(
self._instance_concurrency_context.interval_to_next_pending_claim_check()
)
if intervals:
return min(intervals)
return 0
def sleep_til_ready(self) -> None:
sleep_amt = self.sleep_interval()
if sleep_amt > 0:
time.sleep(sleep_amt)
def get_next_step(self) -> Optional[ExecutionStep]:
check.invariant(not self.is_complete, "Can not call get_next_step when is_complete is True")
steps = self.get_steps_to_execute(limit=1)
if not steps:
return None
return steps[0]
def get_step_by_key(self, step_key: str) -> ExecutionStep:
step = self._plan.get_step_by_key(step_key)
return check.inst(step, ExecutionStep)
def get_steps_to_execute(
self,
limit: Optional[int] = None,
) -> Sequence[ExecutionStep]:
check.invariant(
self._context_guard,
"ActiveExecution must be used as a context manager",
)
check.opt_int_param(limit, "limit")
self._update()
steps = sorted(
[self.get_step_by_key(key) for key in self._executable],
key=self._sort_key_fn,
)
run_scoped_concurrency_limits_counter = None
if self._tag_concurrency_limits:
in_flight_steps = [self.get_step_by_key(key) for key in self._in_flight]
run_scoped_concurrency_limits_counter = TagConcurrencyLimitsCounter(
self._tag_concurrency_limits,
in_flight_steps,
)
batch: list[ExecutionStep] = []
for step in steps:
if limit is not None and len(batch) >= limit:
break
if (
self._max_concurrent is not None
and len(batch) + len(self._in_flight) >= self._max_concurrent
):
break
if run_scoped_concurrency_limits_counter:
if run_scoped_concurrency_limits_counter.is_blocked(step):
continue
if run_scoped_concurrency_limits_counter:
run_scoped_concurrency_limits_counter.update_counters_with_launched_item(step)
# fallback to fetching from tags for backwards compatibility
concurrency_key = _pool_key_for_step(step)
if concurrency_key and self._instance_concurrency_context:
try:
step_priority = int(step.tags.get(PRIORITY_TAG, 0))
except ValueError:
step_priority = 0
if not self._instance_concurrency_context.claim(
concurrency_key, step.key, step_priority
):
continue
batch.append(step)
for step in batch:
self._in_flight.add(step.key)
self._executable.remove(step.key)
self._prep_for_dynamic_outputs(step)
return batch
def get_steps_to_skip(self) -> Sequence[ExecutionStep]:
self._update()
steps = []
steps_to_skip = list(self._pending_skip)
for key in steps_to_skip:
step = self.get_step_by_key(key)
steps.append(step)
self._in_flight.add(key)
self._pending_skip.remove(key)
self._gathering_dynamic_outputs # noqa: B018
self._skip_for_dynamic_outputs(step)
return sorted(steps, key=self._sort_key_fn)
def get_steps_to_abandon(self) -> Sequence[ExecutionStep]:
self._update()
steps = []
steps_to_abandon = list(self._pending_abandon)
for key in steps_to_abandon:
steps.append(self.get_step_by_key(key))
self._in_flight.add(key)
self._pending_abandon.remove(key)
return sorted(steps, key=self._sort_key_fn)
def plan_events_iterator(
self, job_context: Union[PlanExecutionContext, PlanOrchestrationContext]
) -> Iterator[DagsterEvent]:
"""Process all steps that can be skipped and abandoned."""
steps_to_skip = self.get_steps_to_skip()
while steps_to_skip:
for step in steps_to_skip:
step_context = job_context.for_step(step)
step_context.log.info(
f"Skipping step {step.key} due to skipped dependencies:"
f" {self._skipped_deps[step.key]}."
)
yield DagsterEvent.step_skipped_event(step_context)
self.mark_skipped(step.key)
steps_to_skip = self.get_steps_to_skip()
steps_to_abandon = self.get_steps_to_abandon()
while steps_to_abandon:
for step in steps_to_abandon:
step_context = job_context.for_step(step)
failed_inputs: list[str] = []
for step_input in step.step_inputs:
failed_inputs.extend(self._failed.intersection(step_input.dependency_keys))
abandoned_inputs: list[str] = []
for step_input in step.step_inputs:
abandoned_inputs.extend(
self._abandoned.intersection(step_input.dependency_keys)
)
step_context.log.error(
"Dependencies for step {step}{fail_str}{abandon_str}. Not executing.".format(
step=step.key,
fail_str=f" failed: {failed_inputs}" if failed_inputs else "",
abandon_str=(
f" were not executed: {abandoned_inputs}" if abandoned_inputs else ""
),
)
)
self.mark_abandoned(step.key)
steps_to_abandon = self.get_steps_to_abandon()
def mark_failed(self, step_key: str) -> None:
self._failed.add(step_key)
self._mark_complete(step_key)
def mark_success(self, step_key: str) -> None:
self._success.add(step_key)
self._mark_complete(step_key)
self._resolve_any_dynamic_outputs(step_key)
def mark_skipped(self, step_key: str) -> None:
self._skipped.add(step_key)
self._mark_complete(step_key)
self._resolve_any_dynamic_outputs(step_key)
def mark_abandoned(self, step_key: str) -> None:
self._abandoned.add(step_key)
self._mark_complete(step_key)
def mark_interrupted(self) -> None:
self._interrupted = True
def check_for_interrupts(self) -> bool:
return pop_captured_interrupt()
def mark_up_for_retry(self, step_key: str, at_time: Optional[float] = None) -> None:
check.invariant(
not self._retry_mode.disabled,
f"Attempted to mark {step_key} as up for retry but retries are disabled",
)
check.opt_float_param(at_time, "at_time")
# if retries are enabled - queue this back up
if self._retry_mode.enabled:
if at_time:
self._waiting_to_retry[step_key] = at_time
else:
self._pending[step_key] = self._plan.get_executable_step_deps()[step_key]
elif self._retry_mode.deferred:
# do not attempt to execute again
self._abandoned.add(step_key)
self._retry_state.mark_attempt(step_key)
self._mark_complete(step_key)
def _mark_complete(self, step_key: str) -> None:
check.invariant(
step_key in self._in_flight,
f"Attempted to mark step {step_key} as complete that was not known to be in flight",
)
self._in_flight.remove(step_key)
def handle_event(self, dagster_event: DagsterEvent) -> None:
check.inst_param(dagster_event, "dagster_event", DagsterEvent)
step_key = cast("str", dagster_event.step_key)
if dagster_event.is_step_failure:
self.mark_failed(step_key)
if self._instance_concurrency_context:
self._instance_concurrency_context.free_step(step_key)
elif dagster_event.is_resource_init_failure:
# Resources are only initialized without a step key in the
# in-process case, and resource initalization happens before the
# ActiveExecution object is created.
check.invariant(
dagster_event.step_key is not None,
"Resource init failure was reported during execution without a step key.",
)
if self._instance_concurrency_context:
self._instance_concurrency_context.free_step(step_key)
elif dagster_event.is_step_success:
self.mark_success(step_key)
if self._instance_concurrency_context:
self._instance_concurrency_context.free_step(step_key)
elif dagster_event.is_step_skipped:
# Skip events are generated by this class. They should not be sent via handle_event
raise DagsterInvariantViolationError(
f"Step {step_key} was reported as skipped from outside the ActiveExecution."
)
elif dagster_event.is_step_up_for_retry:
self.mark_up_for_retry(
step_key,
(
time.time() + dagster_event.step_retry_data.seconds_to_wait
if dagster_event.step_retry_data.seconds_to_wait
else None
),
)
if self._instance_concurrency_context:
self._instance_concurrency_context.free_step(step_key)
elif dagster_event.is_successful_output:
event_specific_data = cast("StepOutputData", dagster_event.event_specific_data)
self.mark_step_produced_output(event_specific_data.step_output_handle)
if dagster_event.step_output_data.step_output_handle.mapping_key:
check.not_none(
self._gathering_dynamic_outputs[step_key][
dagster_event.step_output_data.step_output_handle.output_name
],
).append(dagster_event.step_output_data.step_output_handle.mapping_key)
def verify_complete(self, job_context: IPlanContext, step_key: str) -> None:
"""Ensure that a step has reached a terminal state, if it has not mark it as an unexpected failure."""
if step_key in self._in_flight:
job_context.log.error(
f"Step {step_key} finished without success or failure event. Downstream steps will not"
" execute."
)
self.mark_unknown_state(step_key)
# factored out for test
def mark_unknown_state(self, step_key: str) -> None:
# note the step so that we throw upon plan completion
self._unknown_state.add(step_key)
# mark as abandoned so downstream tasks do not execute
self.mark_abandoned(step_key)
# factored out for test
def mark_step_produced_output(self, step_output_handle: StepOutputHandle) -> None:
self._step_outputs.add(step_output_handle)
@property
def is_complete(self) -> bool:
return (
len(self._pending) == 0
and len(self._in_flight) == 0
and len(self._executable) == 0
and len(self._pending_skip) == 0
and len(self._pending_retry) == 0
and len(self._pending_abandon) == 0
and len(self._waiting_to_retry) == 0
and (
not self._instance_concurrency_context
or not self._instance_concurrency_context.has_pending_claims()
)
)
@property
def retry_state(self) -> RetryState:
return self._retry_state
@property
def has_in_flight_steps(self) -> bool:
return len(self._in_flight) > 0
def get_known_state(self) -> KnownExecutionState:
return KnownExecutionState(
previous_retry_attempts=self._retry_state.snapshot_attempts(),
dynamic_mappings=dict(self._completed_dynamic_outputs),
ready_outputs=self._step_outputs,
step_output_versions=self._plan.known_state.step_output_versions,
parent_state=self._plan.known_state.parent_state,
)
def _prep_for_dynamic_outputs(self, step: ExecutionStep):
dyn_outputs = [step_out for step_out in step.step_outputs if step_out.is_dynamic]
if dyn_outputs:
self._gathering_dynamic_outputs[step.key] = {out.name: [] for out in dyn_outputs}
def _skip_for_dynamic_outputs(self, step: ExecutionStep):
dyn_outputs = [step_out for step_out in step.step_outputs if step_out.is_dynamic]
if dyn_outputs:
# place None to indicate the dynamic output was skipped, different than having 0 entries
self._gathering_dynamic_outputs[step.key] = {out.name: None for out in dyn_outputs}
def _resolve_any_dynamic_outputs(self, step_key: str) -> None:
if step_key in self._gathering_dynamic_outputs:
step = self.get_step_by_key(step_key)
completed_mappings: dict[str, Optional[Sequence[str]]] = {}
for output_name, mappings in self._gathering_dynamic_outputs[step_key].items():
# if no dynamic outputs were returned and the output was marked is_required=False
# set to None to indicate a skip should occur
if not mappings and not step.step_output_dict[output_name].is_required:
completed_mappings[output_name] = None
else:
completed_mappings[output_name] = mappings
self._completed_dynamic_outputs[step_key] = completed_mappings
self._new_dynamic_mappings = True
def rebuild_from_events(
self, dagster_events: Sequence[DagsterEvent]
) -> Sequence[ExecutionStep]:
"""Replay events to rebuild the execution state and continue after a failure.
Returns a list of steps that are possibly in flight. Current status of the event log implies
that the previous run worker might have crashed before launching these steps, or it may have
launched them but they have yet to report a STEP_START event.
"""
self.get_steps_to_execute()
for event in dagster_events:
self.handle_event(event)
self.get_steps_to_execute()
return [self.get_step_by_key(step_key) for step_key in self._in_flight]
def concurrency_event_iterator(
self, plan_context: Union[PlanExecutionContext, PlanOrchestrationContext]
) -> Iterator[DagsterEvent]:
if not self._instance_concurrency_context:
return
pending_claims = self._instance_concurrency_context.pending_claim_steps()
for step_key in pending_claims:
last_messaged_timestamp = self._messaged_concurrency_slots.get(step_key)
if (
not last_messaged_timestamp
or time.time() - last_messaged_timestamp > CONCURRENCY_CLAIM_MESSAGE_INTERVAL
):
step = self.get_step_by_key(step_key)
step_context = plan_context.for_step(step)
pool = check.inst(_pool_key_for_step(step), str)
self._messaged_concurrency_slots[step_key] = time.time()
is_initial_message = last_messaged_timestamp is None
yield DagsterEvent.step_concurrency_blocked(
step_context, pool, initial=is_initial_message
)
| ActiveExecution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 9288,
"end": 9476
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent, GrapheneErrorEvent)
name = "HookErroredEvent"
| GrapheneHookErroredEvent |
python | kamyu104__LeetCode-Solutions | Python/number-of-integers-with-popcount-depth-equal-to-k-ii.py | {
"start": 1002,
"end": 1900
} | class ____(object):
def popcountDepth(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
def count(x):
return D[popcount(x)]+1 if x != 1 else 0
bit = [BIT(len(nums)) for _ in xrange(MAX_K+1)]
for i in xrange(len(nums)):
bit[count(nums[i])].add(i, +1)
result = []
for q in queries:
if q[0] == 1:
_, l, r, k = q
assert(k < len(bit))
result.append(bit[k].query(r)-bit[k].query(l-1))
else:
_, i, x = q
old_d = count(nums[i])
new_d = count(x)
if new_d != old_d:
bit[old_d].add(i, -1)
bit[new_d].add(i, +1)
nums[i] = x
return result
| Solution |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 79958,
"end": 90930
} | class ____(Gemma3nPreTrainedModel):
config: Gemma3nTextConfig
input_modalities = ("text",)
def __init__(self, config: Gemma3nTextConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
# Gemma3n downcasts the below to bfloat16, causing sqrt(3072)=55.4256 to become 55.5. See https://github.com/huggingface/transformers/pull/29402
self.embed_tokens = Gemma3nTextScaledWordEmbedding(
config.vocab_size, config.hidden_size, self.padding_idx, embed_scale=self.config.hidden_size**0.5
)
self.layers = nn.ModuleList(
[Gemma3nTextDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Gemma3nRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Gemma3nRotaryEmbedding(config)
self.gradient_checkpointing = False
self.hidden_size = config.hidden_size
self.hidden_size_per_layer_input = config.hidden_size_per_layer_input
self.embed_tokens_per_layer = Gemma3nTextScaledWordEmbedding(
config.vocab_size_per_layer_input,
config.num_hidden_layers * config.hidden_size_per_layer_input,
self.padding_idx,
embed_scale=config.hidden_size_per_layer_input**0.5,
)
self.per_layer_model_projection = nn.Linear(
self.hidden_size,
config.num_hidden_layers * config.hidden_size_per_layer_input,
bias=False,
)
self.per_layer_projection_norm = Gemma3nRMSNorm(config.hidden_size_per_layer_input, eps=config.rms_norm_eps)
self.altup_projections = nn.ModuleList(
[nn.Linear(self.hidden_size, self.hidden_size, bias=False) for _ in range(1, self.config.altup_num_inputs)]
)
self.altup_unembed_projections = nn.ModuleList(
[nn.Linear(self.hidden_size, self.hidden_size, bias=False) for _ in range(1, self.config.altup_num_inputs)]
)
self.register_buffer("per_layer_projection_scale", torch.tensor(self.hidden_size**-0.5), persistent=False)
self.register_buffer("per_layer_input_scale", torch.rsqrt(torch.tensor(2.0)), persistent=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
per_layer_inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
r"""
per_layer_inputs (torch.Tensor, *optional*, defaults to None):
Pre-computed per-layer embeddings. If None, they are derived from input_ids if provided.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if input_ids is not None:
inputs_embeds = self.embed_tokens(input_ids)
per_layer_inputs = self.get_per_layer_inputs(input_ids)
per_layer_inputs = self.project_per_layer_inputs(inputs_embeds, per_layer_inputs)
if use_cache and past_key_values is None and not self.training:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens,
past_seen_tokens + inputs_embeds.shape[1],
device=inputs_embeds.device,
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
# embed positions
hidden_states_0 = inputs_embeds
# Expand hidden_states to support per-layer inputs
target_magnitude = torch.mean(hidden_states_0**2, dim=-1, keepdim=True) ** 0.5
epsilon_tensor = torch.tensor(1e-5)
temp_hidden_states = [hidden_states_0]
for i in range(1, self.config.altup_num_inputs):
# altup_proj adapted from jax.numpy.einsum("btp,pd->btd", ...)
altup_proj = self.altup_projections[i - 1](hidden_states_0)
current_hidden_state = altup_proj.to(dtype=hidden_states_0.dtype, device=target_magnitude.device)
new_magnitude = torch.mean(current_hidden_state**2, dim=-1, keepdim=True)
new_magnitude = torch.sqrt(torch.maximum(new_magnitude, epsilon_tensor.to(target_magnitude.device)))
current_hidden_state = current_hidden_state * target_magnitude / new_magnitude
temp_hidden_states.append(current_hidden_state)
hidden_states = torch.stack(temp_hidden_states, dim=0) # [num_altup_inputs, batch, seq_len, hidden_size]
position_embeddings = {}
for layer_type in self.config.layer_types:
position_embeddings[layer_type] = self.rotary_emb(hidden_states, position_ids, layer_type)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
if output_hidden_states:
all_hidden_states += (hidden_states,)
causal_mask = causal_mask_mapping[decoder_layer.attention_type]
per_layer_input = per_layer_inputs[:, :, decoder_layer.layer_idx, :]
layer_outputs = decoder_layer(
hidden_states,
position_embeddings[decoder_layer.attention_type],
per_layer_input,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
# add hidden states from the last decoder layer (but before reprojecting to stay consistent with layer output)
if output_hidden_states:
all_hidden_states += (hidden_states,)
# Per-layer inputs to single output
target_magnitude = torch.mean(hidden_states[0] ** 2, dim=-1, keepdim=True) ** 0.5
temp_hidden_states = [hidden_states[0]]
for i in range(1, self.config.altup_num_inputs):
# altup_unembed_projections adapted from jax.numpy.einsum("btp,pd->btd", ...)
altup_unemb_proj: torch.Tensor = self.altup_unembed_projections[i - 1](hidden_states[i])
current_hidden_state = altup_unemb_proj.to(dtype=hidden_states_0.dtype, device=target_magnitude.device)
new_magnitude = torch.mean(current_hidden_state**2, dim=-1, keepdim=True)
new_magnitude = torch.sqrt(torch.maximum(new_magnitude, epsilon_tensor.to(target_magnitude.device)))
current_hidden_state = current_hidden_state * target_magnitude / new_magnitude
temp_hidden_states.append(current_hidden_state)
hidden_states = torch.stack(temp_hidden_states)
hidden_states = torch.mean(hidden_states, dim=0)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def get_per_layer_inputs(self, input_ids: torch.LongTensor) -> torch.Tensor:
return self.embed_tokens_per_layer(input_ids).reshape(
*input_ids.shape,
self.config.num_hidden_layers,
self.hidden_size_per_layer_input,
)
def project_per_layer_inputs(
self,
inputs_embeds: torch.Tensor,
per_layer_inputs: Optional[torch.Tensor] = None,
) -> torch.Tensor:
per_layer_projection: torch.Tensor = self.per_layer_model_projection(inputs_embeds)
per_layer_projection *= self.per_layer_projection_scale.to(
dtype=inputs_embeds.dtype, device=per_layer_projection.device
)
per_layer_projection = per_layer_projection.reshape(
*inputs_embeds.shape[:-1],
self.config.num_hidden_layers,
self.hidden_size_per_layer_input,
)
per_layer_projection = self.per_layer_projection_norm(per_layer_projection)
if per_layer_inputs is None:
return per_layer_projection
if per_layer_projection.shape != per_layer_inputs.shape:
# per-layer inputs are sometimes padded with zeros, slice the relevant embeddings.
per_layer_inputs = per_layer_inputs[..., : self.config.num_hidden_layers, :]
return (per_layer_projection + per_layer_inputs) * self.per_layer_input_scale.to(
dtype=inputs_embeds.dtype, device=per_layer_projection.device
)
@auto_docstring(custom_intro="The base Gemma 3n language model with a language modeling head.")
| Gemma3nTextModel |
python | getsentry__sentry | tests/sentry/auth/test_helper.py | {
"start": 1563,
"end": 1659
} | class ____(TypedDict):
id: str
email: str
name: str
data: dict[str, str]
| _Identity |
python | getsentry__sentry | src/sentry/interfaces/stacktrace.py | {
"start": 11512,
"end": 19455
} | class ____(Interface):
"""
A stacktrace contains a list of frames, each with various bits (most optional)
describing the context of that frame. Frames should be sorted from oldest
to newest.
The stacktrace contains an element, ``frames``, which is a list of hashes. Each
hash must contain **at least** the ``filename`` attribute. The rest of the values
are optional, but recommended.
Additionally, if the list of frames is large, you can explicitly tell the
system that you've omitted a range of frames. The ``frames_omitted`` must
be a single tuple two values: start and end. For example, if you only
removed the 8th frame, the value would be (8, 9), meaning it started at the
8th frame, and went until the 9th (the number of frames omitted is
end-start). The values should be based on a one-index.
The list of frames should be ordered by the oldest call first.
Each frame must contain the following attributes:
``filename``
The relative filepath to the call
OR
``function``
The name of the function being called
OR
``module``
Platform-specific module path (e.g. stacktrace)
The following additional attributes are supported:
``lineno``
The line number of the call
``colno``
The column number of the call
``abs_path``
The absolute path to filename
``context_line``
Source code in filename at lineno
``pre_context``
A list of source code lines before context_line (in order) -- usually [lineno - 5:lineno]
``post_context``
A list of source code lines after context_line (in order) -- usually [lineno + 1:lineno + 5]
``in_app``
Signifies whether this frame is related to the execution of the relevant
code in this stacktrace. For example, the frames that might power the
framework's webserver of your app are probably not relevant, however calls
to the framework's library once you start handling code likely are. See
notes below on implicit ``in_app`` behavior.
``vars``
A mapping of variables which were available within this frame (usually context-locals).
``package``
Name of the package or object file that the frame is contained in. This
for instance can be the name of a DLL, .NET Assembly, jar file, object
file etc.
>>> {
>>> "frames": [{
>>> "abs_path": "/real/file/name.py"
>>> "filename": "file/name.py",
>>> "function": "myfunction",
>>> "vars": {
>>> "key": "value"
>>> },
>>> "pre_context": [
>>> "line1",
>>> "line2"
>>> ],
>>> "context_line": "line3",
>>> "lineno": 3,
>>> "in_app": true,
>>> "post_context": [
>>> "line4",
>>> "line5"
>>> ],
>>> }],
>>> "frames_omitted": [13, 56]
>>> }
Implicit ``in_app`` behavior exists when the value is not specified on all
frames within a stacktrace (or collectively within an exception if this is
part of a chain).
If **any frame** is marked with ``in_app=True`` or ``in_app=False``:
- Set ``in_app=False`` where ``in_app is None``
If **all frames** are marked identical values for ``in_app``:
- Set ``in_app=False`` on all frames
.. note:: This interface can be passed as the 'stacktrace' key in addition
to the full interface path.
"""
score = 1950
grouping_variants = ["system", "app"]
def __iter__(self):
return iter(self.frames)
@classmethod
def to_python(cls, data, **kwargs):
data = dict(data)
frame_list = []
for i, f in enumerate(data.get("frames") or []):
# XXX(dcramer): handle PHP sending an empty array for a frame
frame_list.append(Frame.to_python(f or {}))
data["frames"] = frame_list
data.setdefault("registers", None)
data.setdefault("frames_omitted", None)
return super().to_python(data, **kwargs)
def get_has_system_frames(self):
# This is a simplified logic from how the normalizer works.
# Because this always works on normalized data we do not have to
# consider the "all frames are in_app" case. The normalizer lives
# in stacktraces.normalize_stacktraces_for_grouping which will take
# care of that.
return any(frame.in_app for frame in self.frames)
def get_longest_address(self):
rv = None
for frame in self.frames:
rv = max_addr(rv, frame.instruction_addr)
rv = max_addr(rv, frame.symbol_addr)
return rv
def get_api_context(self, is_public=False, platform=None):
longest_addr = self.get_longest_address()
frame_list = [
f.get_api_context(is_public=is_public, pad_addr=longest_addr, platform=platform)
for f in self.frames
]
return {
"frames": frame_list,
"framesOmitted": self.frames_omitted,
"registers": self.registers,
"hasSystemFrames": self.get_has_system_frames(),
}
def get_api_meta(self, meta, is_public=False, platform=None):
if not meta:
return meta
frame_meta = {}
for index, value in meta.get("frames", {}).items():
if index == "":
continue
frame = self.frames[int(index)]
frame_meta[index] = frame.get_api_meta(value, is_public=is_public, platform=platform)
return {
"": meta.get(""),
"frames": frame_meta,
"framesOmitted": meta.get("frames_omitted"),
"registers": meta.get("registers"),
}
def to_json(self):
return prune_empty_keys(
{
"frames": [f and f.to_json() for f in self.frames] or None,
"frames_omitted": self.frames_omitted,
"registers": self.registers,
}
)
def to_string(self, event) -> str:
return self.get_stacktrace(event, system_frames=False, max_frames=10)
def get_stacktrace(
self, event, system_frames=True, newest_first=None, max_frames=None, header=True
):
if newest_first is None:
newest_first = is_newest_frame_first(event)
result = []
if header:
if newest_first:
result.append(_("Stacktrace (most recent call first):"))
else:
result.append(_("Stacktrace (most recent call last):"))
result.append("")
frames = self.frames
num_frames = len(frames)
if not system_frames:
frames = [f for f in frames if f.in_app is not False]
if not frames:
frames = self.frames
if newest_first:
frames = frames[::-1]
if max_frames:
visible_frames = max_frames
if newest_first:
start, stop = None, max_frames
else:
start, stop = -max_frames, None
else:
visible_frames = len(frames)
start, stop = None, None
if not newest_first and visible_frames < num_frames:
result.extend(
(
"(%d additional frame(s) were not displayed)" % (num_frames - visible_frames,),
"...",
)
)
for frame in frames[start:stop]:
result.append(frame.to_string(event))
if newest_first and visible_frames < num_frames:
result.extend(
(
"...",
"(%d additional frame(s) were not displayed)" % (num_frames - visible_frames,),
)
)
return "\n".join(result)
| Stacktrace |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels43.py | {
"start": 315,
"end": 1906
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels43.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [56185600, 56187520]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": 1,
"custom": [
{
"font": {
"bold": 1,
"italic": 1,
"color": "red",
"baseline": -1,
},
"border": {"color": "red"},
}
],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | numba__numba | numba/core/typed_passes.py | {
"start": 7176,
"end": 7343
} | class ____(BaseTypeInference):
_name = "partial_type_inference"
_raise_errors = False
@register_pass(mutates_CFG=False, analysis_only=False)
| PartialTypeInference |
python | apache__airflow | airflow-core/tests/unit/dag_processing/test_processor.py | {
"start": 38587,
"end": 48181
} | class ____:
"""Test the _execute_task_callbacks function"""
def test_execute_task_callbacks_failure_callback(self, spy_agency):
"""Test _execute_task_callbacks executes failure callbacks"""
called = False
context_received = None
def on_failure(context):
nonlocal called, context_received
called = True
context_received = context
with DAG(dag_id="test_dag") as dag:
BaseOperator(task_id="test_task", on_failure_callback=on_failure)
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
try_number=1,
dag_version_id=uuid.uuid4(),
)
request = TaskCallbackRequest(
filepath="test.py",
msg="Task failed",
ti=ti_data,
bundle_name="testing",
bundle_version=None,
task_callback_type=TaskInstanceState.FAILED,
)
log = structlog.get_logger()
_execute_task_callbacks(dagbag, request, log)
assert called is True
assert context_received is not None
assert context_received["dag"] == dag
assert "ti" in context_received
def test_execute_task_callbacks_retry_callback(self, spy_agency):
"""Test _execute_task_callbacks executes retry callbacks"""
called = False
context_received = None
def on_retry(context):
nonlocal called, context_received
called = True
context_received = context
with DAG(dag_id="test_dag") as dag:
BaseOperator(task_id="test_task", on_retry_callback=on_retry)
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
map_index=-1,
try_number=1,
dag_version_id=uuid.uuid4(),
state=TaskInstanceState.UP_FOR_RETRY,
)
request = TaskCallbackRequest(
filepath="test.py",
msg="Task retrying",
ti=ti_data,
bundle_name="testing",
bundle_version=None,
task_callback_type=TaskInstanceState.UP_FOR_RETRY,
)
log = structlog.get_logger()
_execute_task_callbacks(dagbag, request, log)
assert called is True
assert context_received is not None
assert context_received["dag"] == dag
assert "ti" in context_received
def test_execute_task_callbacks_with_context_from_server(self, spy_agency):
"""Test _execute_task_callbacks with context_from_server creates full context"""
called = False
context_received = None
def on_failure(context):
nonlocal called, context_received
called = True
context_received = context
with DAG(dag_id="test_dag") as dag:
BaseOperator(task_id="test_task", on_failure_callback=on_failure)
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
dag_run = DagRun(
dag_id="test_dag",
run_id="test_run",
logical_date=timezone.utcnow(),
start_date=timezone.utcnow(),
run_type="manual",
state=DagRunState.RUNNING,
)
dag_run.run_after = timezone.utcnow()
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
try_number=1,
dag_version_id=uuid.uuid4(),
)
context_from_server = TIRunContext(
dag_run=dag_run,
max_tries=3,
)
request = TaskCallbackRequest(
filepath="test.py",
msg="Task failed",
ti=ti_data,
bundle_name="testing",
bundle_version=None,
task_callback_type=TaskInstanceState.FAILED,
context_from_server=context_from_server,
)
log = structlog.get_logger()
_execute_task_callbacks(dagbag, request, log)
assert called is True
assert context_received is not None
# When context_from_server is provided, we get a full RuntimeTaskInstance context
assert "dag_run" in context_received
assert "logical_date" in context_received
def test_execute_task_callbacks_not_failure_callback(self, spy_agency):
"""Test _execute_task_callbacks when request is not a failure callback"""
called = False
def on_failure(context):
nonlocal called
called = True
with DAG(dag_id="test_dag") as dag:
BaseOperator(task_id="test_task", on_failure_callback=on_failure)
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
try_number=1,
dag_version_id=uuid.uuid4(),
state=TaskInstanceState.SUCCESS,
)
request = TaskCallbackRequest(
filepath="test.py",
msg="Task succeeded",
ti=ti_data,
bundle_name="testing",
bundle_version=None,
task_callback_type=TaskInstanceState.SUCCESS,
)
log = structlog.get_logger()
_execute_task_callbacks(dagbag, request, log)
# Should not call the callback since it's not a failure callback
assert called is False
def test_execute_task_callbacks_multiple_callbacks(self, spy_agency):
"""Test _execute_task_callbacks with multiple callbacks"""
call_count = 0
def on_failure_1(context):
nonlocal call_count
call_count += 1
def on_failure_2(context):
nonlocal call_count
call_count += 1
with DAG(dag_id="test_dag") as dag:
BaseOperator(task_id="test_task", on_failure_callback=[on_failure_1, on_failure_2])
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id="test_dag",
task_id="test_task",
run_id="test_run",
try_number=1,
dag_version_id=uuid.uuid4(),
state=TaskInstanceState.FAILED,
)
request = TaskCallbackRequest(
filepath="test.py",
msg="Task failed",
ti=ti_data,
bundle_name="testing",
bundle_version=None,
task_callback_type=TaskInstanceState.FAILED,
)
log = structlog.get_logger()
_execute_task_callbacks(dagbag, request, log)
assert call_count == 2
@pytest.mark.parametrize(
("dag_exists", "task_exists", "expected_error"),
[
(False, False, "DAG 'missing_dag' not found in DagBag"),
(True, False, "Task 'missing_task' not found in DAG 'test_dag'"),
],
)
def test_execute_task_callbacks_missing_dag_or_task(
self, spy_agency, dag_exists, task_exists, expected_error
):
"""Test _execute_task_callbacks raises ValueError for missing DAG or task"""
if dag_exists:
with DAG(dag_id="test_dag") as dag:
BaseOperator(task_id="existing_task")
def fake_collect_dags(self, *args, **kwargs):
self.dags[dag.dag_id] = dag
spy_agency.spy_on(DagBag.collect_dags, call_fake=fake_collect_dags, owner=DagBag)
dagbag = DagBag()
dagbag.collect_dags()
dag_id = "test_dag"
task_id = "missing_task"
else:
dagbag = DagBag()
dag_id = "missing_dag"
task_id = "test_task"
ti_data = TIDataModel(
id=uuid.uuid4(),
dag_id=dag_id,
task_id=task_id,
run_id="test_run",
try_number=1,
dag_version_id=uuid.uuid4(),
)
request = TaskCallbackRequest(
filepath="test.py",
msg="Task failed",
ti=ti_data,
bundle_name="testing",
bundle_version=None,
task_callback_type=TaskInstanceState.FAILED,
)
log = structlog.get_logger()
with pytest.raises(ValueError, match=expected_error):
_execute_task_callbacks(dagbag, request, log)
| TestExecuteTaskCallbacks |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_v2_hd_valid_input_test.py | {
"start": 1241,
"end": 4728
} | class ____(tpu_embedding_base_test.TPUEmbeddingBaseTest):
def test_enqueue_dense_sparse_ragged(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
dataset = self._create_high_dimensional_dense_dataset(strategy)
dense_iter = iter(
strategy.experimental_distribute_dataset(
dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
sparse = self._create_high_dimensional_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
ragged = self._create_high_dimensional_ragged_dataset(strategy)
ragged_iter = iter(
strategy.experimental_distribute_dataset(
ragged,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
mid_level_api.build([
TensorShape([self.batch_size, self.data_batch_size, 1]),
TensorShape([self.batch_size, self.data_batch_size, 2]),
TensorShape([self.batch_size, self.data_batch_size, 3])
])
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = (next(dense_iter)[0], next(sparse_iter)[1],
next(ragged_iter)[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
test_fn()
def test_different_input_shapes(self):
strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
sparse = self._create_high_dimensional_sparse_dataset(strategy)
sparse_iter = iter(
strategy.experimental_distribute_dataset(
sparse,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
# Create a feature with shape (1, 3, 1)
dense_feature = constant_op.constant(
np.zeros(3), shape=(1, 3, 1), dtype=dtypes.int32)
dense_dataset = dataset_ops.DatasetV2.from_tensors(
dense_feature).unbatch().repeat().batch(
1 * strategy.num_replicas_in_sync, drop_remainder=True)
dense_iter = iter(
strategy.experimental_distribute_dataset(
dense_dataset,
options=distribute_lib.InputOptions(
experimental_fetch_to_device=False)))
@def_function.function
def test_fn():
def step():
return mid_level_api.dequeue()
features = (next(dense_iter), next(sparse_iter)[1], next(sparse_iter)[2])
mid_level_api.enqueue(features, training=False)
return strategy.run(step)
test_fn()
self.assertEqual(mid_level_api._output_shapes, [
TensorShape((1, 3)),
TensorShape((self.batch_size, self.data_batch_size)),
TensorShape((self.batch_size, self.data_batch_size))
])
def test_output_shapes_priority_over_feature_config_and_build(self):
_, mid_level_api, _ = self._create_strategy_and_mid_level('sgd')
# The output shapes setting in the feature config has the first priority.
mid_level_api._output_shapes = [TensorShape((2, 4)) for _ in range(3)]
mid_level_api.build([TensorShape((2, None, None)) for _ in range(3)])
self.assertEqual(mid_level_api._output_shapes,
[TensorShape((2, 4)) for _ in range(3)])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TPUEmbeddingTest |
python | google__pytype | pytype/tools/analyze_project/pytype_runner_test.py | {
"start": 809,
"end": 1002
} | class ____:
output: str
action: str
input: str
deps: Sequence[str]
imports: str
module: str
# number of lines in the build.ninja preamble
_PREAMBLE_LENGTH = 6
| ExpectedBuildStatement |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/cli/test_kubernetes_command.py | {
"start": 2749,
"end": 12918
} | class ____:
label_selector = "dag_id,task_id,try_number,airflow_version"
@classmethod
def setup_class(cls):
with conf_vars({("core", "executor"): "KubernetesExecutor"}):
importlib.reload(executor_loader)
importlib.reload(cli_parser)
cls.parser = cli_parser.get_parser()
@mock.patch("kubernetes.client.CoreV1Api.delete_namespaced_pod")
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config.load_incluster_config")
def test_delete_pod(self, load_incluster_config, delete_namespaced_pod):
kubernetes_command._delete_pod("dummy", "awesome-namespace")
delete_namespaced_pod.assert_called_with(body=mock.ANY, name="dummy", namespace="awesome-namespace")
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config.load_incluster_config")
def test_running_pods_are_not_cleaned(self, load_incluster_config, list_namespaced_pod, delete_pod):
pod1 = MagicMock()
pod1.metadata.name = "dummy"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Running"
pod1.status.reason = None
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
delete_pod.assert_not_called()
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("airflow.providers.cncf.kubernetes.kube_client.config.load_incluster_config")
def test_cleanup_succeeded_pods(self, load_incluster_config, list_namespaced_pod, delete_pod):
pod1 = MagicMock()
pod1.metadata.name = "dummy"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Succeeded"
pod1.status.reason = None
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
delete_pod.assert_called_with("dummy", "awesome-namespace")
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("kubernetes.config.load_incluster_config")
def test_no_cleanup_failed_pods_wo_restart_policy_never(
self, load_incluster_config, list_namespaced_pod, delete_pod
):
pod1 = MagicMock()
pod1.metadata.name = "dummy2"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Failed"
pod1.status.reason = None
pod1.spec.restart_policy = "Always"
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
delete_pod.assert_not_called()
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("kubernetes.config.load_incluster_config")
def test_cleanup_failed_pods_w_restart_policy_never(
self, load_incluster_config, list_namespaced_pod, delete_pod
):
pod1 = MagicMock()
pod1.metadata.name = "dummy3"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Failed"
pod1.status.reason = None
pod1.spec.restart_policy = "Never"
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
delete_pod.assert_called_with("dummy3", "awesome-namespace")
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("kubernetes.config.load_incluster_config")
def test_cleanup_evicted_pods(self, load_incluster_config, list_namespaced_pod, delete_pod):
pod1 = MagicMock()
pod1.metadata.name = "dummy4"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Failed"
pod1.status.reason = "Evicted"
pod1.spec.restart_policy = "Never"
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
delete_pod.assert_called_with("dummy4", "awesome-namespace")
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("kubernetes.config.load_incluster_config")
def test_cleanup_pending_pods(self, load_incluster_config, list_namespaced_pod, delete_pod):
pod1 = MagicMock()
pod1.metadata.name = "dummy5"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Pending"
pod1.status.reason = "Unschedulable"
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
delete_pod.assert_called_with("dummy5", "awesome-namespace")
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("kubernetes.config.load_incluster_config")
def test_cleanup_api_exception_continue(self, load_incluster_config, list_namespaced_pod, delete_pod):
delete_pod.side_effect = kubernetes.client.rest.ApiException(status=0)
pod1 = MagicMock()
pod1.metadata.name = "dummy"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Succeeded"
pod1.status.reason = None
pods = MagicMock()
pods.metadata._continue = None
pods.items = [pod1]
list_namespaced_pod.return_value = pods
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
list_namespaced_pod.assert_called_once_with(
namespace="awesome-namespace", limit=500, label_selector=self.label_selector
)
load_incluster_config.assert_called_once()
@mock.patch("airflow.providers.cncf.kubernetes.cli.kubernetes_command._delete_pod")
@mock.patch("kubernetes.client.CoreV1Api.list_namespaced_pod")
@mock.patch("kubernetes.config.load_incluster_config")
def test_list_pod_with_continue_token(self, load_incluster_config, list_namespaced_pod, delete_pod):
pod1 = MagicMock()
pod1.metadata.name = "dummy"
pod1.metadata.creation_timestamp = parse("2021-12-20T08:01:07Z")
pod1.status.phase = "Succeeded"
pod1.status.reason = None
pods = MagicMock()
pods.metadata._continue = "dummy-token"
pods.items = [pod1]
next_pods = MagicMock()
next_pods.metadata._continue = None
next_pods.items = [pod1]
list_namespaced_pod.side_effect = [pods, next_pods]
kubernetes_command.cleanup_pods(
self.parser.parse_args(["kubernetes", "cleanup-pods", "--namespace", "awesome-namespace"])
)
calls = [
call.first(namespace="awesome-namespace", limit=500, label_selector=self.label_selector),
call.second(
namespace="awesome-namespace",
limit=500,
label_selector=self.label_selector,
_continue="dummy-token",
),
]
list_namespaced_pod.assert_has_calls(calls)
delete_pod.assert_called_with("dummy", "awesome-namespace")
load_incluster_config.assert_called_once()
| TestCleanUpPodsCommand |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_caret.py | {
"start": 40,
"end": 5197
} | class ____(util.MdCase):
"""Test escaping cases for Caret with smart enabled."""
extension = [
'pymdownx.caret'
]
extension_configs = {
"pymdownx.caret": {
"smart_insert": True
}
}
def test_case_1(self):
"""Test case 1."""
self.check_markdown(
R"x^2^ + y^2^ = 4",
"<p>x<sup>2</sup> + y<sup>2</sup> = 4</p>",
True
)
def test_case_2(self):
"""Test case 2."""
self.check_markdown(
R"Text^superscript^",
"<p>Text<sup>superscript</sup></p>",
True
)
def test_case_3(self):
"""Test case 3."""
self.check_markdown(
R"Text^superscript failed^",
"<p>Text^superscript failed^</p>",
True
)
def test_case_4(self):
"""Test case 4."""
self.check_markdown(
R"Text^superscript\ success^",
"<p>Text<sup>superscript success</sup></p>",
True
)
def test_case_5(self):
"""Test case 5."""
self.check_markdown(
R"Test: ^^ Won't insert ^^",
"<p>Test: ^^ Won't insert ^^</p>",
True
)
def test_case_6(self):
"""Test case 6."""
self.check_markdown(
R"Test: ^^Will insert^^",
"<p>Test: <ins>Will insert</ins></p>",
True
)
def test_case_7(self):
"""Test case 7."""
self.check_markdown(
R"Test: \^\^Escaped\^\^",
"<p>Test: ^^Escaped^^</p>",
True
)
def test_case_8(self):
"""Test case 8."""
self.check_markdown(
R"Test: ^^This will all be inserted ^^because of the placement of the center carets.^^",
"<p>Test: <ins>This will all be inserted ^^because of the placement of the center carets.</ins></p>",
True
)
def test_case_9(self):
"""Test case 9."""
self.check_markdown(
R"Test: ^^This will all be inserted ^^ because of the placement of the center carets.^^",
"<p>Test: <ins>This will all be inserted ^^ because of the placement of the center carets.</ins></p>",
True
)
def test_case_10(self):
"""Test case 10."""
self.check_markdown(
R"Test: ^^This will NOT all be inserted^^ because of the placement of the center caret.^^",
"<p>Test: <ins>This will NOT all be inserted</ins> because of the placement of the center caret.^^</p>",
True
)
def test_case_11(self):
"""Test case 11."""
self.check_markdown(
R"Test: ^^This will all be inserted^ because of the token is less than that of the caret.^^",
"<p>Test: <ins>This will all be inserted^ because of the token is less than that of the caret.</ins></p>",
True
)
def test_complex_cases(self):
"""Test some complex cases."""
self.check_markdown(
R'''
^^^I'm\ insert\ and\ sup^ I am just insert.^^
^^^I'm\ insert\ and\ sup!^^\ I\ am\ just\ sup.^
^sup\ and\ ^^sup\ insert^^^ and ^sup^
^^insert and ^sup\ insert^^^ and ^sup^
^^^I'm\ sup\ and\ insert^ I am just insert.^^ ^sup^
^^^I'm\ insert\ and\ sup!^^\ I\ am\ just\ sup.^ ^sup^
^sup\ and\ ^^sup\ insert^^^ and not sup^
^^insert and ^sup\ insert^^^ and not sup^
^sup\ and\ ^^sup\ insert^^^
^^insert and ^sup\ insert^^^
^sup\ ^^sup\ insert^^\ sup^
^^^sup\ and\ insert^ insert^^: foo bar ^^insert^^
^^^sup\ and\ insert^^\ sup^ foo bar ^^insert^^
^sup\ and\ ^^sup\ insert^^^ ^^insert^^
^^insert and ^sup\ insert^^^ ^^insert^^
''',
'''
<p><ins><sup>I'm insert and sup</sup> I am just insert.</ins></p>
<p><sup><ins>I'm insert and sup!</ins> I am just sup.</sup></p>
<p><sup>sup and <ins>sup insert</ins></sup> and <sup>sup</sup></p>
<p><ins>insert and <sup>sup insert</sup></ins> and <sup>sup</sup></p>
<p><ins><sup>I'm sup and insert</sup> I am just insert.</ins> <sup>sup</sup></p>
<p><sup><ins>I'm insert and sup!</ins> I am just sup.</sup> <sup>sup</sup></p>
<p><sup>sup and <ins>sup insert</ins></sup> and not sup^</p>
<p><ins>insert and <sup>sup insert</sup></ins> and not sup^</p>
<p><sup>sup and <ins>sup insert</ins></sup></p>
<p><ins>insert and <sup>sup insert</sup></ins></p>
<p><sup>sup <ins>sup insert</ins> sup</sup></p>
<p><ins><sup>sup and insert</sup> insert</ins>: foo bar <ins>insert</ins></p>
<p><sup><ins>sup and insert</ins> sup</sup> foo bar <ins>insert</ins></p>
<p><sup>sup and <ins>sup insert</ins></sup> <ins>insert</ins></p>
<p><ins>insert and <sup>sup insert</sup></ins> <ins>insert</ins></p>
''',
True
)
| TestCaretSmart |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF049.py | {
"start": 149,
"end": 191
} | class ____(Enum): ...
@dataclass # Foobar
| E |
python | pandas-dev__pandas | asv_bench/benchmarks/inference.py | {
"start": 3952,
"end": 4355
} | class ____:
params = ([True, False], [50, 500, 5000, 100000])
param_names = ["cache", "count"]
def setup(self, cache, count):
rng = date_range(start="1/1/1971", periods=count)
self.unique_date_strings = rng.strftime("%Y-%m-%d").tolist()
def time_unique_date_strings(self, cache, count):
to_datetime(self.unique_date_strings, cache=cache)
| ToDatetimeCacheSmallCount |
python | joke2k__faker | faker/providers/isbn/__init__.py | {
"start": 131,
"end": 2738
} | class ____(BaseProvider):
"""Generates fake ISBNs.
See https://www.isbn-international.org/content/what-isbn for the
format of ISBNs.
See https://www.isbn-international.org/range_file_generation for the
list of rules pertaining to each prefix/registration group.
"""
rules: Dict[str, Dict[str, List[Tuple[str, str, int]]]] = {}
def _body(self) -> List[str]:
"""Generate the information required to create an ISBN-10 or
ISBN-13.
"""
ean: str = self.random_element(self.rules.keys())
reg_group: str = self.random_element(self.rules[ean].keys())
# Given the chosen ean/group, decide how long the
# registrant/publication string may be.
# We must allocate for the calculated check digit, so
# subtract 1
reg_pub_len: int = MAX_LENGTH - len(ean) - len(reg_group) - 1
# Generate a registrant/publication combination
reg_pub: str = self.numerify("#" * reg_pub_len)
# Use rules to separate the registrant from the publication
rules = self.rules[ean][reg_group]
registrant, publication = self._registrant_publication(reg_pub, rules)
return [ean, reg_group, registrant, publication]
@staticmethod
def _registrant_publication(reg_pub: str, rules: List[Tuple[str, str, int]]) -> Tuple[str, str]:
"""Separate the registration from the publication in a given
string.
:param reg_pub: A string of digits representing a registration
and publication.
:param rules: A list of registrant rules which designate where
to separate the values in the string.
:returns: A (registrant, publication) tuple of strings.
"""
for rule in rules:
if rule[0] <= reg_pub[:-1] <= rule[1]:
reg_len = rule[2]
break
else:
raise Exception(f"Registrant/Publication '{reg_pub}' not found in registrant rule list.")
registrant, publication = reg_pub[:reg_len], reg_pub[reg_len:]
return registrant, publication
def isbn13(self, separator: str = "-") -> str:
"""
:sample:
"""
ean, group, registrant, publication = self._body()
isbn = ISBN13(ean, group, registrant, publication)
return isbn.format(separator)
def isbn10(self, separator: str = "-") -> str:
"""
:sample:
"""
ean, group, registrant, publication = self._body()
isbn = ISBN10(ean, group, registrant, publication)
return isbn.format(separator)
| Provider |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 118304,
"end": 118974
} | class ____(GeneratedAirbyteDestination):
@public
def __init__(self, name: str, destination_path: str):
"""Airbyte Destination for Local Json.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/local-json
Args:
name (str): The name of the destination.
destination_path (str): Path to the directory where json files will be written. The files will be placed inside that local mount. For more information check out our docs
"""
self.destination_path = check.str_param(destination_path, "destination_path")
super().__init__("Local Json", name)
| LocalJsonDestination |
python | astropy__astropy | astropy/io/ascii/fixedwidth.py | {
"start": 15015,
"end": 15339
} | class ____(FixedWidthHeader):
"""Header reader for fixed width tables splitting on whitespace.
For fixed width tables with several header lines, there is typically
a white-space delimited format line, so splitting on white space is
needed.
"""
splitter_class = DefaultSplitter
| FixedWidthTwoLineHeader |
python | pytorch__pytorch | test/inductor/test_mkldnn_pattern_matcher.py | {
"start": 24249,
"end": 160488
} | class ____(TestPatternMatcherBase):
@reduced_f32_on_and_off()
def test_linear_unary(self, device="cpu"):
self.device = device
class M(torch.nn.Module):
def __init__(
self,
unary_fn,
in_features,
out_features,
bias,
**kwargs,
):
super().__init__()
self.linear = torch.nn.Linear(
in_features,
out_features,
bias,
**kwargs,
)
self.unary_fn = unary_fn
def forward(self, x):
x = self.linear(x)
return self.unary_fn(x)
dtypes = []
if is_mkldnn_bf16_supported(self.device):
dtypes.append(torch.bfloat16)
if is_mkldnn_fp16_supported(self.device):
dtypes.append(torch.float16)
if torch.backends.mkldnn.matmul.fp32_precision in ["bf16", "tf32"]:
dtypes.append(torch.float32)
options = itertools.product(unary_list, [True, False], dtypes)
for unary_fn, bias, dtype in options:
if (
dtype != torch.float32
and torch.backends.mkldnn.matmul.fp32_precision == "tf32"
):
continue
metrics.reset()
mod = M(unary_fn, 10, 30, bias=bias).eval()
# only fuse for linear when the dtype is bf16
v = torch.randn(2, 10)
def matcher_check_fn():
match_nodes = unary_list[unary_fn]
if dtype != torch.float32 and self._check_unary_is_decomposed(unary_fn):
# Has extra dtype conversion nodes for autocast.
match_nodes += 2
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_nodes"],
0 if TEST_ACL else match_nodes,
)
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_count"], 1
)
self._test_common(mod, (v,), matcher_check_fn, check_autocast=dtype)
# only generated 1 kernel for "to_dtype"
expected_kernel_count = 2 if TEST_ACL else 1
if dtype == torch.float32:
# In BF32, input is float32, will not generate kernel for "to_dtype"
expected_kernel_count -= 1
self.assertEqual(metrics.generated_kernel_count, expected_kernel_count)
@reduced_f32_on_and_off()
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
def test_linear_fp32(self, device="cpu"):
self.device = device
class M(torch.nn.Module):
def __init__(self, bias):
super().__init__()
self.linear = torch.nn.Linear(10, 30, bias)
def forward(self, x):
return self.linear(x)
for bias in [True, False]:
mod = M(bias=bias).eval()
v = torch.randn(2, 10)
# packing pass.
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_count"], 1
)
self._test_common(mod, (v,), matcher_check_fn)
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
def test_linear_input_non_contiguous_3D_wo_bias(self, device="cpu"):
self.device = device
# Activation is 3D, non-contiguous and without Bias
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(4096, 1024, bias=False)
def forward(self, x):
x = torch.ops.aten.permute.default(x, [0, 2, 1, 3])
x = torch.ops.aten.reshape.default(x, [4, 1, 4096])
return self.linear(x)
mod = M().eval()
v = torch.randn(4, 32, 1, 128)
dtypes = [torch.float]
if is_mkldnn_bf16_supported(self.device):
dtypes.append(torch.bfloat16)
if is_mkldnn_fp16_supported(self.device):
dtypes.append(torch.float16)
for dtype in dtypes:
torch._dynamo.reset()
autocast_enabled = dtype in [torch.bfloat16, torch.float16]
with (
torch.no_grad(),
torch.autocast(
device_type="cpu",
enabled=autocast_enabled,
dtype=dtype,
),
):
expected = mod(v)
actual, (source_code,) = run_and_get_code(
torch.compile(mod, fullgraph=True),
v,
)
self.assertIn(
"torch.ops.mkldnn._linear_pointwise.default"
if autocast_enabled
else "torch.ops.mkl._mkl_linear.default",
source_code,
)
torch.testing.assert_close(actual, expected, atol=1e-2, rtol=1e-2)
@skipIfXpu(
msg="Different with CPU, two linears will be concat on XPU for better performance"
)
def test_linear_add_bias(self, device="cpu"):
self.device = device
class M(torch.nn.Module):
def __init__(self, device, dtype, unary_fn, cast_bias):
super().__init__()
self.linear1 = torch.nn.Linear(10, 64, bias=False)
self.bias1 = torch.randn(64, device=device)
self.linear2 = torch.nn.Linear(10, 64, bias=False)
self.bias2 = torch.randn(64, device=device)
if cast_bias:
self.bias1 = self.bias1.to(dtype=dtype, device=device)
self.bias2 = self.bias2.to(dtype=dtype, device=device)
self.unary_fn = unary_fn
def forward(self, x):
a = self.linear1(x) + self.bias1
b = self.linear2(x) + self.bias2
return self.unary_fn(a), self.unary_fn(b)
dtypes = []
if is_mkldnn_bf16_supported(self.device):
dtypes.append(torch.bfloat16)
if is_mkldnn_fp16_supported(self.device):
dtypes.append(torch.float16)
options = itertools.product(unary_list, dtypes)
for unary_fn, dtype in options:
metrics.reset()
fold_mod = M(self.device, dtype, unary_fn, cast_bias=True).eval()
v = torch.randn(2, 10)
def folder_matcher_check_fn():
match_nodes = unary_list[unary_fn]
if self._check_unary_is_decomposed(unary_fn):
# Has extra dtype conversion nodes for autocast.
match_nodes += 2
# we have 2 linears, so we double the matcher_count/nodes
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_nodes"],
0 if TEST_ACL else match_nodes * 2,
)
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_count"], 2
)
self._test_common(
fold_mod,
(v,),
folder_matcher_check_fn,
check_autocast=dtype,
)
self.assertEqual(metrics.generated_kernel_count, 3 if TEST_ACL else 1)
# we won't fold the bias if bias is not same dtype with weight
# https://github.com/pytorch/pytorch/pull/129138
metrics.reset()
mod = M(self.device, dtype, unary_fn, cast_bias=False).eval()
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_count"], 2
)
self._test_common(mod, (v,), matcher_check_fn, check_autocast=dtype)
# 1 kernel for "to_lowp", 2 kernels for unary ops
self.assertEqual(metrics.generated_kernel_count, 3)
@reduced_f32_on_and_off()
def test_linear_binary(self, device="cpu"):
self.device = device
class M(torch.nn.Module):
def __init__(self, binary_fn, in_channels, out_channels, bias, **kwargs):
super().__init__()
self.linear = torch.nn.Linear(
in_channels, out_channels, bias=bias, **kwargs
)
self.binary_fn = binary_fn
def forward(self, x, y):
x = self.linear(x)
x = self.binary_fn(x, y.clone())
return x
dtypes = []
if is_mkldnn_bf16_supported(self.device):
dtypes.append(torch.bfloat16)
if is_mkldnn_fp16_supported(self.device):
dtypes.append(torch.float16)
if torch.backends.mkldnn.matmul.fp32_precision in ["bf16", "tf32"]:
dtypes.append(torch.float32)
options = itertools.product(
binary_list, [[2, 3, 10], [2, 10]], [True, False], dtypes
)
out_feature = 30
for binary_fn, input_shape, bias, dtype in options:
metrics.reset()
if (
dtype != torch.float32
and torch.backends.mkldnn.matmul.fp32_precision == "tf32"
):
continue
def matcher_check_fn():
self.assertEqual(
counters["inductor"][
"mkldnn_conv_binary_unary_fusion_matcher_nodes"
],
0 if TEST_ACL else 2,
)
reshape_linear_reshape_match_nodes = 3 if len(input_shape) == 3 else 0
self.assertEqual(
counters["inductor"]["mkldnn_reshape_linear_reshape_matcher_nodes"],
reshape_linear_reshape_match_nodes,
)
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_count"], 1
)
mod = M(binary_fn, input_shape[-1], out_feature, bias).eval()
v = torch.randn(input_shape)
other = torch.randn(input_shape[:-1] + [out_feature]).to(dtype)
self._test_common(
mod,
(
v,
other,
),
matcher_check_fn,
check_autocast=dtype,
)
# only generated 1 kernel for "to_dtype"
expected_kernel_count = 2 if TEST_ACL else 1
if dtype == torch.float32:
# In BF32, input is float32, will not generate kernel for "to_dtype"
expected_kernel_count -= 1
self.assertEqual(metrics.generated_kernel_count, expected_kernel_count)
def test_linear_binary_broadcast_shapes(self, device="cpu"):
self.device = device
class M(torch.nn.Module):
def __init__(self, binary_fn, in_channels, out_channels, bias, **kwargs):
super().__init__()
self.linear = torch.nn.Linear(
in_channels, out_channels, bias=bias, **kwargs
)
self.binary_fn = binary_fn
def forward(self, x, y):
x = self.linear(x)
x = self.binary_fn(x, y.clone())
return x
dtypes = []
if is_mkldnn_bf16_supported(self.device):
dtypes.append(torch.bfloat16)
if is_mkldnn_fp16_supported(self.device):
dtypes.append(torch.float16)
options = itertools.product(
binary_list,
(
([2, 3, 10], [1, 1, 30]),
([2, 10], [1, 30]),
),
(True, False),
dtypes,
)
out_feature = 30
for binary_fn, (input_shape, other_shape), bias, dtype in options:
metrics.reset()
mod = M(binary_fn, input_shape[-1], out_feature, bias).eval()
v = torch.randn(input_shape)
other = torch.randn(other_shape).to(dtype)
def matcher_check_fn():
reshape_linear_reshape_match_nodes = 3 if len(input_shape) == 3 else 0
self.assertEqual(
counters["inductor"]["mkldnn_reshape_linear_reshape_matcher_nodes"],
reshape_linear_reshape_match_nodes,
)
self.assertEqual(
counters["inductor"][
"mkldnn_conv_binary_unary_fusion_matcher_nodes"
],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_nodes"], 1
)
self._test_common(
mod,
(
v,
other,
),
matcher_check_fn,
check_autocast=dtype,
)
self.assertEqual(metrics.generated_kernel_count, 2 if TEST_ACL else 1)
@skipIfXpu(
msg="Different with CPU, two linears will be concat on XPU for better performance"
)
def test_multi_linear_share_same_input(self, device="cpu"):
self.device = device
# llama pattern.
class M(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.w1 = torch.nn.Linear(16, 16, bias=False)
self.w2 = torch.nn.Linear(16, 16, bias=False)
def forward(self, x):
return F.silu(self.w1(x)) * F.relu(self.w2(x))
dtypes = []
if is_mkldnn_bf16_supported(self.device):
dtypes.append(torch.bfloat16)
if is_mkldnn_fp16_supported(self.device):
dtypes.append(torch.float16)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_nodes"],
0 if TEST_ACL else 7,
)
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["mkldnn_reshape_linear_reshape_matcher_nodes"], 6
)
self.assertEqual(
counters["inductor"]["mkldnn_linear_weight_pack_matcher_count"], 2
)
for dtype in dtypes:
mod = M().to(dtype).eval()
v = torch.randn(2, 4, 16).to(dtype)
self._test_common(mod, (v,), matcher_check_fn, rtol=1e-2, atol=1e-2)
def _qconv2d_test_helper(
self,
device="cpu",
int8_mixed_bf16=False,
quantization_with_autocast=False,
):
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(3, 128, kernel_size=3, stride=1)
self.conv2 = torch.nn.Conv2d(128, 128, kernel_size=3, stride=1)
self.conv3 = torch.nn.Conv2d(
128, 128, kernel_size=3, stride=1, groups=4
)
def forward(self, x):
return self.conv3(self.conv2(self.conv(x)))
mod = M().eval().to(device=device)
v = (
torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False)
.add(1)
.to(device=device)
)
def matcher_check_fn():
# 1. Dequant-Conv2D pattern matched in QConv2D weight prepack * 1
# int8_mixed_fp32: [dequant_node, dequantize_per_channel, clone, convolution]
# int8_mixed_bf16: [dequant_node, optional(convert_element_type_4),
# dequantize_per_channel, optional(convert_element_type_3), clone, convolution]
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 3
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_nodes"],
(16 if quantization_with_autocast else 18) if int8_mixed_bf16 else 12,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 3
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
quantization_with_autocast=quantization_with_autocast,
)
if torch._inductor.config.cpp_wrapper:
self._test_code_common(
mod,
(v,),
[f"aoti_torch_{device}__qconv_pointwise_tensor"],
[],
check_quantization=True,
num_include_ops=[3],
)
else:
self._test_code_common(
mod,
(v,),
["torch.ops.onednn.qconv_pointwise.tensor"],
[],
check_quantization=True,
num_include_ops=[3],
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfRocm
def test_qconv2d_cpu(self):
r"""
This testcase will quantize a single Conv2d module.
"""
self._qconv2d_test_helper("cpu")
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_xpu(self):
r"""
This testcase will quantize a single Conv2d module.
"""
self._qconv2d_test_helper("xpu")
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfRocmArch(MI300_ARCH + MI350_ARCH)
def test_qconv2d_int8_mixed_bf16(self):
r"""
This testcase will quantize a single Conv2d module with int8_mixed_bf16 quantization.
"""
self._qconv2d_test_helper(int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfRocmArch(MI300_ARCH + MI350_ARCH)
def test_qconv2d_int8_mixed_bf16_use_autocast(self):
r"""
This testcase will quantize a single Conv2d module with int8_mixed_bf16 quantization.
"""
self._qconv2d_test_helper(int8_mixed_bf16=True, quantization_with_autocast=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize a single Conv2d module with int8_mixed_bf16 quantization.
"""
self._qconv2d_test_helper(device="xpu", int8_mixed_bf16=True)
def _qconv2d_unary_test_helper(
self,
device="cpu",
int8_mixed_bf16=False,
unary_op=torch.nn.ReLU(),
qconv_unary_matcher_nodes=None,
):
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(3, 128, kernel_size=3, stride=1)
self.unary_fn = copy.deepcopy(unary_op)
self.conv2 = torch.nn.Conv2d(
128, 128, kernel_size=3, stride=1, bias=False
)
self.unary_fn2 = copy.deepcopy(unary_op)
def forward(self, x):
tmp = self.unary_fn(self.conv(x))
return self.unary_fn2(self.conv2(tmp))
mod = M().eval().to(device=device)
v = (
torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False)
.add(1)
.to(device=device)
)
def matcher_check_fn():
# 1. Dequant-Conv2D pattern matched in quantization weight prepack * 2
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
# 2. QConv2D Unary fusion in post-grad fusion pass * 2
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 2
)
if qconv_unary_matcher_nodes:
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_nodes"],
0 if TEST_ACL else qconv_unary_matcher_nodes,
)
self._test_common(
mod,
(v,),
check_quantization=True,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
matcher_check_fn=matcher_check_fn,
)
if torch._inductor.config.cpp_wrapper:
self._test_code_common(
mod,
(v,),
[f"aoti_torch_{device}__qconv_pointwise_tensor"],
[],
check_quantization=True,
num_include_ops=[2],
)
else:
self._test_code_common(
mod,
(v,),
["torch.ops.onednn.qconv_pointwise.tensor"],
[],
check_quantization=True,
num_include_ops=[2],
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_relu_cpu(self):
r"""
This testcase will quantize Conv2d->ReLU pattern.
"""
self._qconv2d_unary_test_helper(device="cpu")
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_relu_xpu(self):
r"""
This testcase will quantize Conv2d->ReLU pattern.
"""
self._qconv2d_unary_test_helper(device="xpu")
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qconv2d_relu_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize Conv2d->ReLU pattern with int8_mixed_bf16 quantization.
"""
self._qconv2d_unary_test_helper(int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_relu6_cpu(self):
r"""
This testcase will quantize Conv2d->ReLU6 pattern.
"""
self._qconv2d_unary_test_helper(device="cpu", unary_op=torch.nn.ReLU6())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_relu6_xpu(self):
r"""
This testcase will quantize Conv2d->ReLU6 pattern.
"""
self._qconv2d_unary_test_helper(device="xpu", unary_op=torch.nn.ReLU6())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_hardtanh_cpu(self):
r"""
This testcase will quantize Conv2d->Hardtanh pattern.
"""
self._qconv2d_unary_test_helper(device="cpu", unary_op=torch.nn.Hardtanh())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_hardtanh_xpu(self):
r"""
This testcase will quantize Conv2d->Hardtanh pattern.
"""
self._qconv2d_unary_test_helper(device="xpu", unary_op=torch.nn.Hardtanh())
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qconv2d_hardtanh_int8_mixed_bf16_cpu(self):
r"""
This testcase will quantize Conv2d->Hardtanh pattern.
Match.nodes:
[qconv2d_pointwise_default, convert_element_type, clamp_min, clamp_max, convert_element_type, quantize_per_tensor]
[qconv2d_pointwise_default, convert_element_type, clamp_min, clamp_max, convert_element_type]
"""
self._qconv2d_unary_test_helper(
unary_op=torch.nn.Hardtanh(),
int8_mixed_bf16=True,
qconv_unary_matcher_nodes=11,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_hardtanh_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize Conv2d->Hardtanh pattern.
Match.nodes:
[qconv2d_pointwise_default, convert_element_type, clamp_min, clamp_max, convert_element_type, quantize_per_tensor]
[qconv2d_pointwise_default, convert_element_type, clamp_min, clamp_max, convert_element_type]
"""
self._qconv2d_unary_test_helper(
device="xpu",
unary_op=torch.nn.Hardtanh(),
int8_mixed_bf16=True,
qconv_unary_matcher_nodes=11,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_hardswish_cpu(self):
r"""
This testcase will quantize Conv2d->Hardswish pattern.
"""
self._qconv2d_unary_test_helper(device="cpu", unary_op=torch.nn.Hardswish())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_hardswish_xpu(self):
r"""
This testcase will quantize Conv2d->Hardswish pattern.
"""
self._qconv2d_unary_test_helper(device="xpu", unary_op=torch.nn.Hardswish())
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qconv2d_hardswish_int8_mixed_bf16_cpu(self):
r"""
This testcase will quantize Conv2d->Hardswish pattern.
Match.nodes:
[qconv2d_pointwise_default, convert_element_type, add, clamp_min,
clamp_max, mul, div, convert_element_type, quantize_per_tensor]
[qconv2d_pointwise_default, convert_element_type, add, clamp_min, clamp_max, mul, div, convert_element_type]
"""
self._qconv2d_unary_test_helper(
unary_op=torch.nn.Hardswish(),
int8_mixed_bf16=True,
qconv_unary_matcher_nodes=17,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_hardswish_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize Conv2d->Hardswish pattern.
Match.nodes:
[qconv2d_pointwise_default, convert_element_type, add, clamp_min,
clamp_max, mul, div, convert_element_type, quantize_per_tensor]
[qconv2d_pointwise_default, convert_element_type, add, clamp_min, clamp_max, mul, div, convert_element_type]
"""
self._qconv2d_unary_test_helper(
device="xpu",
unary_op=torch.nn.Hardswish(),
int8_mixed_bf16=True,
qconv_unary_matcher_nodes=17,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_silu_cpu(self):
r"""
This testcase will quantize Conv2d->SiLU pattern.
"""
self._qconv2d_unary_test_helper(device="cpu", unary_op=torch.nn.SiLU())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_silu_xpu(self):
r"""
This testcase will quantize Conv2d->SiLU pattern.
"""
self._qconv2d_unary_test_helper(device="xpu", unary_op=torch.nn.SiLU())
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qconv2d_silu_int8_mixed_bf16_cpu(self):
r"""
This testcase will quantize Conv2d->SiLU pattern.
Match.nodes:
[qconv2d_pointwise_default, convert_element_type, sigmoid, mul,
convert_element_type, quantize_per_tensor]
[qconv2d_pointwise_default, convert_element_type, sigmoid, mul, convert_element_type]
"""
self._qconv2d_unary_test_helper(
unary_op=torch.nn.SiLU(),
int8_mixed_bf16=True,
qconv_unary_matcher_nodes=11,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_silu_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize Conv2d->SiLU pattern.
Match.nodes:
[qconv2d_pointwise_default, convert_element_type, sigmoid, mul,
convert_element_type, quantize_per_tensor]
[qconv2d_pointwise_default, convert_element_type, sigmoid, mul, convert_element_type]
"""
self._qconv2d_unary_test_helper(
device="xpu",
unary_op=torch.nn.SiLU(),
int8_mixed_bf16=True,
qconv_unary_matcher_nodes=11,
)
def _qconv2d_add_test_helper(
self, device="cpu", use_relu=False, int8_mixed_bf16=False
):
r"""
This testcase will quantize a Conv2d->Add pattern as:
X
/ \
Conv1(X) Conv2(X)
\ /
Add
|
Optional(relu)
|
Y
"""
class M(torch.nn.Module):
def __init__(
self,
add_fn,
use_relu,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.conv2 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.add_fn = add_fn
self.relu = torch.nn.ReLU()
self.conv3 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1, bias=False)
self.conv4 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1, bias=False)
self.add_fn2 = add_fn
self.relu2 = torch.nn.ReLU()
self.use_relu = use_relu
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
tmp = self.add_fn(x1, x2)
if self.use_relu:
tmp = self.relu(tmp)
tmp1 = self.conv3(tmp)
tmp2 = self.conv4(tmp)
res = self.add_fn2(tmp1, tmp2)
if self.use_relu:
res = self.relu2(res)
return res
for add_fn in quantization_add_fn_list + quantization_inplace_add_fn_list:
mod = M(add_fn, use_relu).eval().to(device=device)
v = (
torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False)
.add(1)
.to(device=device)
)
def matcher_check_fn():
# 1. Dequant-Conv2D pattern matched in quantization weight prepack * 4
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 4
)
# 2. Qconv2d Binary Unary fusion in post-grad fusion pass * 2
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_lower_count"],
0 if TEST_ACL else 2,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
)
if not TEST_ACL:
if torch._inductor.config.cpp_wrapper:
self._test_code_common(
mod,
(v,),
[
f"aoti_torch_{device}__qconv_pointwise_tensor",
f"aoti_torch_{device}__qconv2d_pointwise_binary_tensor",
],
[],
check_quantization=True,
num_include_ops=[2, 2],
)
else:
self._test_code_common(
mod,
(v,),
[
"torch.ops.onednn.qconv_pointwise.tensor",
"torch.ops.onednn.qconv2d_pointwise.binary_tensor",
],
[],
check_quantization=True,
num_include_ops=[2, 2],
)
def _qconv2d_add_test_helper2(
self, device="cpu", use_relu=False, int8_mixed_bf16=False
):
r"""
This testcase will quantize two Conv2d->Add patterns as:
Conv(X) extra input
\ /
Add
|
Optional(relu)
|
Y
, and
extra input Conv(X)
\ /
Add
|
Optional(relu)
|
Y
"""
class M(torch.nn.Module):
def __init__(
self,
add_fn,
use_relu,
swap_inputs,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.add_fn = add_fn
self.relu = torch.nn.ReLU()
self.conv2 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1, bias=False)
self.add_fn2 = add_fn
self.relu2 = torch.nn.ReLU()
self.use_relu = use_relu
self.swap_inputs = swap_inputs
def forward(self, x, x2, x3):
x1 = self.conv1(x)
if self.swap_inputs:
tmp = self.add_fn(x2, x1)
else:
tmp = self.add_fn(x1, x2)
if self.use_relu:
tmp = self.relu(tmp)
tmp1 = self.conv2(tmp)
if self.swap_inputs:
res = self.add_fn2(x3, tmp1)
else:
res = self.add_fn2(tmp1, x3)
if self.use_relu:
res = self.relu2(res)
return res
for add_fn, swap_inputs in itertools.product(
quantization_add_fn_list + quantization_inplace_add_fn_list, [False, True]
):
mod = M(add_fn, use_relu, swap_inputs).eval().to(device=device)
x = torch.randn(
(1, 3, 8, 8), dtype=torch.float32, requires_grad=False, device=device
)
x2 = torch.randn(
(1, 6, 6, 6), dtype=torch.float32, requires_grad=False, device=device
)
x3 = torch.randn(
(1, 6, 4, 4), dtype=torch.float32, requires_grad=False, device=device
)
def matcher_check_fn():
# 1. Dequant-Conv2D pattern matched in quantization weight prepack * 2
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
# 2. Qconv2d Binary Unary fusion in post-grad fusion pass * 2
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_lower_count"],
0 if TEST_ACL else 2,
)
self._test_common(
mod,
(x, x2, x3),
matcher_check_fn,
check_quantization=True,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
)
if not TEST_ACL:
if torch._inductor.config.cpp_wrapper:
self._test_code_common(
mod,
(x, x2, x3),
[f"aoti_torch_{device}__qconv2d_pointwise_binary_tensor"],
[],
check_quantization=True,
num_include_ops=[2],
)
else:
self._test_code_common(
mod,
(x, x2, x3),
["torch.ops.onednn.qconv2d_pointwise.binary_tensor"],
[],
check_quantization=True,
num_include_ops=[2],
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_add_cpu(self):
self._qconv2d_add_test_helper()
self._qconv2d_add_test_helper2()
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_add_xpu(self):
self._qconv2d_add_test_helper(device="xpu")
self._qconv2d_add_test_helper2(device="xpu")
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qconv2d_add_int8_mixed_bf16(self):
self._qconv2d_add_test_helper(int8_mixed_bf16=True)
self._qconv2d_add_test_helper2(int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_add_int8_mixed_bf16_xpu(self):
self._qconv2d_add_test_helper(device="xpu", int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_add_relu_cpu(self):
self._qconv2d_add_test_helper(use_relu=True)
self._qconv2d_add_test_helper2(use_relu=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_add_relu_xpu(self):
self._qconv2d_add_test_helper(device="xpu", use_relu=True)
self._qconv2d_add_test_helper2(device="xpu", use_relu=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qconv2d_add_relu_int8_mixed_bf16(self):
self._qconv2d_add_test_helper(use_relu=True, int8_mixed_bf16=True)
self._qconv2d_add_test_helper2(use_relu=True, int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qconv2d_add_relu_int8_mixed_bf16_xpu(self):
self._qconv2d_add_test_helper(device="xpu", use_relu=True, int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_add_broadcast_shapes_cpu(self):
r"""
This testcase will quantize Conv2d->add pattern using broadcast shape inputs.
Conv2d->Add fusion will fail for the broadcast shape inputs case.
"""
class M(torch.nn.Module):
def __init__(self, use_bias):
super().__init__()
self.conv = torch.nn.Conv2d(32, 32, kernel_size=3, stride=1)
def forward(self, x1, x2):
return torch.add(self.conv(x1), x2)
bias_list = [True, False]
for bias in bias_list:
mod = M(bias).eval()
x1 = torch.randn((2, 32, 9, 9))
x2 = torch.randn((2, 32, 1, 1))
def matcher_check_fn():
# 1. Dequant-Conv2D pattern matched in quantization weight prepack * 1
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 1
)
# 2. Qconv2d Binary Unary fusion in post-grad fusion pass * 0
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"], 0
)
self._test_common(
mod,
(x1, x2),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_with_concat_cpu(self):
channel_1 = 32
channel_2 = 16
channel_3 = 8
channel_4 = int(channel_2 * 2 + channel_3)
class Model(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(
channel_1, channel_2, 1, stride=1, dilation=1, padding=0
)
self.conv2 = torch.nn.Conv2d(
channel_1, channel_2, 1, stride=1, dilation=1, padding=0
)
self.conv3 = torch.nn.Conv2d(
channel_2, channel_3, 3, stride=1, dilation=1, padding=1
)
self.conv = torch.nn.Conv2d(
channel_4, channel_2, 1, stride=1, dilation=1, padding=0
)
def forward(self, x: torch.Tensor):
x1 = self.conv1(x)
x2 = self.conv2(x)
x3 = self.conv3(x2)
res = torch.cat([x1, x2, x3], dim=1)
res = self.conv(res)
return res
mod = Model().eval()
v = torch.randn(
(8, channel_1, 40, 40), dtype=torch.float32, requires_grad=False
)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 4
)
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 3,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 4
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_add_2(self):
r"""
This testcase prevents this pattern be matched as a conv_binary fusion by mistake.
Conv(X) 3
\ /
Add
We see this pattern in Mobilenet v3 large which add is decomposed from torch.nn.Hardswish or torch.nn.Hardsigmoid.
"""
class M(torch.nn.Module):
def __init__(
self,
post_op,
):
super().__init__()
self.conv = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.post_op = post_op
def forward(self, x):
return self.post_op(self.conv(x))
for post_op in [
torch.nn.Hardswish(inplace=True),
torch.nn.Hardsigmoid(inplace=True),
]:
mod = M(post_op).eval()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(
1
)
def matcher_check_fn():
# Shouldn't hit conv binary fusion
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"], 0
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv2d_add_3(self):
r"""
This testcase will test below model:
x
/ \
conv1 maxpool
\ / \
add conv2
\ /
cat
Based on default recipe of x86InductorQuantizer, we will see this pattern after convert:
qconv1 maxpool
\ |
\ q1
\ / \
\ dq1 qconv2
\ /
add
|
q2
Since q1 has 2 users and qconv2 is not ancestor node of qconv1, we shouldn't fuse:
int8
/
qconv1 dq1
\ /
add
|
q2
|
int8
Instead we can match and fuse this pattern into qconv_binary:
qconv1 fp32
\ /
add
|
fp32
"""
class M(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 3, kernel_size=3, stride=1)
self.conv2 = torch.nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.maxpool = torch.nn.MaxPool2d(
kernel_size=3, stride=1, padding=0, dilation=1
)
def forward(self, x):
tmp1 = self.conv1(x)
tmp2 = self.maxpool(x)
add = torch.add(tmp1, tmp2)
tmp3 = self.conv2(tmp2)
return torch.cat((add, tmp3), dim=1)
mod = M().eval()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(1)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"],
0 if TEST_ACL else 1,
)
# The matched qconv binary pattern should have 2 nodes [qconv, add]
# instead of 11 which has dequant in binary input and output quant
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_nodes"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_lower_count"],
0 if TEST_ACL else 1,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfRocm
def test_qat_qconv2d(self):
r"""
This testcase will quantize a single Conv2d module with qat flow.
"""
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(3, 128, kernel_size=3, stride=1)
self.bn = torch.nn.BatchNorm2d(128)
def forward(self, x):
return self.bn(self.conv(x))
mod = M().train()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=True).add(1)
def matcher_check_fn():
# 1. Dequant-conv pattern matched in quantization weight prepack * 1
# [dequantize_per_tensor, dequantize_per_channel, clone, convolution]
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 1
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_nodes"], 4
)
# 2. QConv2D Unary fusion in post-grad fusion pass * 1
# [qconv2d_pointwise_default, quantize_per_tensor]
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 1,
)
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_nodes"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 1
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
is_qat=True,
)
def _qat_qconv2d_unary_cpu_test_helper(
self,
unary_op=torch.nn.ReLU(),
):
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, kernel_size=3, stride=1)
self.unary_fn = copy.deepcopy(unary_op)
self.bn = torch.nn.BatchNorm2d(3)
self.conv2 = torch.nn.Conv2d(3, 3, kernel_size=3, stride=1)
self.unary_fn2 = copy.deepcopy(unary_op)
self.bn2 = torch.nn.BatchNorm2d(3)
def forward(self, x):
tmp = self.unary_fn(self.bn(self.conv(x)))
return self.unary_fn2(self.bn2(self.conv2(tmp)))
mod = M()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=True).add(1)
def matcher_check_fn():
# 1. Dequant-conv pattern matched in quantization weight prepack * 1
# [convert_element_type_1, sub, mul_1, dequantize_per_channel, clone, convolution]
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
# 2. QConv2D Unary fusion in post-grad fusion pass * 1
# [qconv2d_pointwise_default, relu, div_1, round_2, add_1, clamp_min_1, clamp_max_1, convert_element_type_2]
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 2
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
is_qat=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qat_qconv2d_relu(self):
r"""
This testcase will quantize Conv2d->ReLU pattern with qat flow.
"""
self._qat_qconv2d_unary_cpu_test_helper()
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qat_qconv2d_relu6(self):
r"""
This testcase will quantize Conv2d->ReLU6 pattern with qat flow.
"""
self._qat_qconv2d_unary_cpu_test_helper(unary_op=torch.nn.ReLU6())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qat_qconv2d_hardtanh(self):
r"""
This testcase will quantize Conv2d->Hardtanh pattern with qat flow.
"""
self._qat_qconv2d_unary_cpu_test_helper(unary_op=torch.nn.Hardtanh())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qat_qconv2d_silu(self):
r"""
This testcase will quantize Conv2d->SiLU pattern with qat flow.
"""
self._qat_qconv2d_unary_cpu_test_helper(unary_op=torch.nn.SiLU())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qat_qconv2d_hardswish(self):
r"""
This testcase will quantize Conv2d->Hardswish pattern with qat flow.
"""
self._qat_qconv2d_unary_cpu_test_helper(unary_op=torch.nn.Hardswish())
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfRocm
def test_qat_qconv2d_add(self):
r"""
This testcase will quantize a Conv2d->Add pattern as:
X
/ \
Conv1(X) Conv2(X)
\ /
Add
|
Y
"""
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.bn1 = torch.nn.BatchNorm2d(6)
self.conv2 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.bn2 = torch.nn.BatchNorm2d(6)
def forward(self, x):
x1 = self.bn1(self.conv1(x))
x2 = self.bn2(self.conv2(x))
return x1 + x2
mod = M().train()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=True).add(1)
def matcher_check_fn():
# 1. Dequant-conv pattern matched in quantization weight prepack * 2
# [dequantize_per_tensor, dequantize_per_channel, clone, convolution]
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_nodes"], 8
)
# 2. Qconv2d Binary fusion in post-grad fusion pass * 1
# [qconv2d_pointwise_default_1, dequantize_per_tensor, add_3, quantize_per_tensor]
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"],
0 if TEST_ACL else 1,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_nodes"],
0 if TEST_ACL else 4,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_lower_count"],
0 if TEST_ACL else 1,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
is_qat=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfRocm
def test_qat_qconv2d_add_relu(self):
r"""
This testcase will quantize a Conv2d->Add->ReLU pattern as:
X
/ \
Conv1(X) Conv2(X)
\ /
Add
|
ReLU
|
Y
"""
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.bn1 = torch.nn.BatchNorm2d(6)
self.conv2 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.bn2 = torch.nn.BatchNorm2d(6)
self.relu = torch.nn.ReLU()
def forward(self, x):
x1 = self.bn1(self.conv1(x))
x2 = self.bn2(self.conv2(x))
return self.relu(x1 + x2)
mod = M().train()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=True).add(1)
def matcher_check_fn():
# 1. Dequant-conv pattern matched in quantization weight prepack * 2
# [dequantize_per_tensor, dequantize_per_channel, clone, convolution]
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_nodes"], 8
)
# 2. Qconv2d Binary fusion in post-grad fusion pass * 1
# [qconv2d_pointwise_default_1, dequantize_per_tensor, add_3, relu, quantize_per_tensor]
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"],
0 if TEST_ACL else 1,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_nodes"],
0 if TEST_ACL else 5,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_lower_count"],
0 if TEST_ACL else 1,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
is_qat=True,
)
def _test_qconv2d_dequant_promotion_helper(self, device="cpu"):
r"""
This testcase tests if dequant node before conv2d is promoted correctly:
X
|
Conv1(X)
/ \
Conv2(X) Conv3(X)
\ /
Add
|
Y
"""
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.conv2 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.conv3 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
def forward(self, x):
temp = self.conv1(x)
temp = self.conv2(temp) + self.conv3(temp)
return temp
mod = M().eval().to(device=device)
v = (
torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False)
.add(1)
.to(device=device)
)
def matcher_check_fn():
# 1. Dequant pattern matcher for dequant promotion * 1
# [dequantize_per_tensor]
self.assertEqual(counters["inductor"]["dequant_promotion_matcher_count"], 1)
self.assertEqual(counters["inductor"]["dequant_promotion_matcher_nodes"], 1)
# 2. Dequant-conv pattern matched in quantization weight prepack * 3
# [dequantize_per_tensor, dequantize_per_channel, clone, convolution]
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 3
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_nodes"], 12
)
# 3. Qconv2d Binary fusion in post-grad fusion pass * 1
# [qconv2d_pointwise_default_1, add_3]
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_count"],
0 if TEST_ACL else 1,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_matcher_nodes"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv2d_binary_lower_count"],
0 if TEST_ACL else 1,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfRocm
def test_qconv2d_dequant_promotion_cpu(self):
self._test_qconv2d_dequant_promotion_helper()
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfRocm
@skipIfNoXPU
def test_qconv2d_dequant_promotion_xpu(self):
self._test_qconv2d_dequant_promotion_helper(device="xpu")
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qconv1d_relu_cpu(self):
r"""
This testcase will quantize Conv1d->ReLU pattern.
"""
device = "cpu"
unary_op = torch.nn.ReLU()
class M(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.conv = torch.nn.Conv1d(3, 128, kernel_size=3, stride=1)
self.unary_fn = copy.deepcopy(unary_op)
self.conv2 = torch.nn.Conv1d(
128, 128, kernel_size=3, stride=1, bias=False
)
self.unary_fn2 = copy.deepcopy(unary_op)
def forward(self, x):
tmp = self.unary_fn(self.conv(x))
return self.unary_fn2(self.conv2(tmp))
mod = M().eval().to(device=device)
v = (
torch.randn((1, 3, 8), dtype=torch.float32, requires_grad=False)
.add(1)
.to(device=device)
)
def matcher_check_fn():
# 1. Dequant-Conv2D pattern matched in quantization weight prepack * 2
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
# 2. QConv2D Unary fusion in post-grad fusion pass * 2
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 2
)
self._test_common(
mod,
(v,),
check_quantization=True,
matcher_check_fn=matcher_check_fn,
)
def _qlinear_test_helper(
self,
inputs,
device="cpu",
int8_mixed_bf16=False,
do_permute=False,
matcher_check_fn=None,
bias=True,
is_dynamic=False,
is_qat=False,
quantization_with_autocast=False,
):
class M(torch.nn.Module):
def __init__(self, use_bias, do_permute=False):
super().__init__()
self.linear = torch.nn.Linear(4, 3, use_bias)
self.linear2 = torch.nn.Linear(3, 4, use_bias)
self.do_permute = do_permute
def forward(self, x):
if self.do_permute:
x = torch.reshape(torch.permute(x, (0, 2, 3, 1)), (2, 12, 4))
return self.linear2(self.linear(x))
mod = M(bias, do_permute=do_permute).eval().to(device=device)
assert isinstance(inputs, tuple)
def __convert_tensor_to_device(input, device):
return input.to(device=device) if isinstance(input, torch.Tensor) else input
inputs = tuple(__convert_tensor_to_device(input, device) for input in inputs)
def _default_matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 2
)
self._test_common(
mod,
inputs,
matcher_check_fn=(
matcher_check_fn
if matcher_check_fn is not None
else _default_matcher_check_fn
),
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
check_quantization=True,
is_qat=is_qat,
is_dynamic=is_dynamic,
quantization_with_autocast=quantization_with_autocast,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_cpu(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper((torch.randn((2, 4)),), bias=bias)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_xpu(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 4)).to(device="xpu"),), device="xpu", bias=bias
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_dynamic_qlinear_cpu(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 4)),), bias=bias, is_dynamic=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_dynamic_qlinear_qat_cpu(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 4)),), bias=bias, is_dynamic=True, is_qat=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_dynamic_qlinear_input_dim_exceeds_2(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 3, 4)),), bias=bias, is_dynamic=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_int8_mixed_bf16(self):
r"""
This testcase will quantize a single Linear Module with int8_mixed_bf16 quantization.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 4)),), int8_mixed_bf16=True, bias=bias
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_int8_mixed_bf16_use_autocast(self):
r"""
This testcase will quantize a single Linear Module with int8_mixed_bf16 quantization.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 4)),),
int8_mixed_bf16=True,
bias=bias,
quantization_with_autocast=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoXPU
def test_qlinear_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize a single Linear Module with int8_mixed_bf16 quantization.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 4)).to(device="xpu"),),
device="xpu",
int8_mixed_bf16=True,
bias=bias,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_input_dim_exceeds_2(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper((torch.randn((2, 3, 4)),), bias=bias)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_input_dim_exceeds_2_xpu(self):
r"""
This testcase will quantize a single Linear Module.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 3, 4)).to(device="xpu"),), device="xpu", bias=bias
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_int8_mixed_bf16_input_dim_exceeds_2(self):
r"""
This testcase will quantize a single Linear Module with int8_mixed_bf16 quantization.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 3, 4)),), int8_mixed_bf16=True, bias=bias
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_int8_mixed_bf16_input_dim_exceeds_2_use_autocast(self):
r"""
This testcase will quantize a single Linear Module with int8_mixed_bf16 quantization.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 3, 4)),),
int8_mixed_bf16=True,
bias=bias,
quantization_with_autocast=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_int8_mixed_bf16_input_dim_exceeds_2_xpu(self):
r"""
This testcase will quantize a single Linear Module with int8_mixed_bf16 quantization.
"""
for bias in [True, False]:
self._qlinear_test_helper(
(torch.randn((2, 3, 4)).to(device="xpu"),),
device="xpu",
int8_mixed_bf16=True,
bias=bias,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_input_dim_exceeds_2_and_not_contiguous(self):
r"""
This testcase will quantize a single Linear Module.
* Input dim exceeds 2
* Input not contiguous
"""
for bias in [True, False]:
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
13 if bias else 12,
)
self._qlinear_test_helper(
(torch.randn((2, 4, 3, 4)),),
do_permute=True,
matcher_check_fn=matcher_check_fn,
bias=bias,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_int8_mixed_bf16_input_dim_exceeds_2_and_not_contiguous(self):
r"""
This testcase will quantize a single Linear Module for int8_bf16.
* Input dim exceeds 2
* Input not contiguous
"""
for bias in [True, False]:
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
17 if bias else 16,
)
self._qlinear_test_helper(
(torch.randn((2, 4, 3, 4)),),
int8_mixed_bf16=True,
do_permute=True,
matcher_check_fn=matcher_check_fn,
bias=bias,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_int8_mixed_bf16_input_dim_exceeds_2_and_not_contiguous_use_autocast(
self,
):
r"""
This testcase will quantize a single Linear Module for int8_bf16.
* Input dim exceeds 2
* Input not contiguous
"""
for bias in [True, False]:
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
16 if bias else 15,
)
self._qlinear_test_helper(
(torch.randn((2, 4, 3, 4)),),
int8_mixed_bf16=True,
do_permute=True,
matcher_check_fn=matcher_check_fn,
bias=bias,
quantization_with_autocast=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_int8_mixed_bf16_input_dim_exceeds_2_and_not_contiguous_xpu(self):
r"""
This testcase will quantize a single Linear Module for int8_bf16.
* Input dim exceeds 2
* Input not contiguous
"""
for bias in [True, False]:
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
17 if bias else 16,
)
self._qlinear_test_helper(
(torch.randn((2, 4, 3, 4)).to(device="xpu"),),
device="xpu",
int8_mixed_bf16=True,
do_permute=True,
matcher_check_fn=matcher_check_fn,
bias=bias,
)
def _qlinear_unary_test_helper(
self, inputs, unary_op=torch.nn.ReLU(), device="cpu", int8_mixed_bf16=False
):
class M(torch.nn.Module):
def __init__(self, use_bias):
super().__init__()
self.linear = torch.nn.Linear(4, 4, use_bias)
self.unary_fn = copy.deepcopy(unary_op)
self.linear2 = torch.nn.Linear(4, 4, use_bias)
self.unary_fn2 = copy.deepcopy(unary_op)
def forward(self, x):
tmp = self.unary_fn(self.linear(x))
return self.unary_fn2(self.linear2(tmp))
bias_list = [True, False]
for bias in bias_list:
mod = M(bias).eval().to(device=device)
def matcher_check_fn():
# 1. dequant-linear pattern matched in quantization weight prepack
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 2
)
# 2. QLinear Unary fusion in post-grad fusion pass
self.assertEqual(
counters["inductor"]["qlinear_unary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qlinear_unary_lower_count"],
0 if TEST_ACL else 2,
)
self._test_common(
mod,
inputs,
matcher_check_fn,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_relu_cpu(self):
r"""
This testcase will quantize a Linear->ReLU pattern.
"""
self._qlinear_unary_test_helper((torch.randn((2, 4)),))
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_relu_xpu(self):
r"""
This testcase will quantize a Linear->ReLU pattern.
"""
self._qlinear_unary_test_helper(
(torch.randn((2, 4)).to(device="xpu"),), device="xpu"
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_relu_int8_mixed_bf16(self):
r"""
This testcase will quantize a Linear->ReLU pattern with int8_mixed_bf16 quantization.
"""
self._qlinear_unary_test_helper((torch.randn((2, 4)),), int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_relu_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize a Linear->ReLU pattern with int8_mixed_bf16 quantization.
"""
self._qlinear_unary_test_helper(
(torch.randn((2, 4)).to(device="xpu"),), device="xpu", int8_mixed_bf16=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_relu_input_dim_exceeds_2(self):
r"""
This testcase will quantize a Linear->ReLU pattern.
"""
self._qlinear_unary_test_helper((torch.randn((2, 3, 4)),))
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_relu_input_dim_exceeds_2_xpu(self):
r"""
This testcase will quantize a Linear->ReLU pattern.
"""
self._qlinear_unary_test_helper(
(torch.randn((2, 3, 4)).to(device="xpu"),), device="xpu"
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_relu_int8_mixed_bf16_input_dim_exceeds_2(self):
r"""
This testcase will quantize a Linear->ReLU pattern with int8_mixed_bf16 quantization.
"""
self._qlinear_unary_test_helper((torch.randn((2, 3, 4)),), int8_mixed_bf16=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_relu_int8_mixed_bf16_input_dim_exceeds_2_xpu(self):
r"""
This testcase will quantize a Linear->ReLU pattern with int8_mixed_bf16 quantization.
"""
self._qlinear_unary_test_helper(
(torch.randn((2, 3, 4)).to(device="xpu"),),
device="xpu",
int8_mixed_bf16=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_gelu_cpu(self):
r"""
This testcase will quantize a Linear->GELU pattern.
"""
for gelu in [torch.nn.GELU("none"), torch.nn.GELU("tanh")]:
self._qlinear_unary_test_helper((torch.randn((2, 4)),), gelu)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_gelu_xpu(self):
r"""
This testcase will quantize a Linear->GELU pattern.
"""
for gelu in [torch.nn.GELU("none"), torch.nn.GELU("tanh")]:
self._qlinear_unary_test_helper(
(torch.randn((2, 4)).to(device="xpu"),), gelu, device="xpu"
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_gelu_int8_mixed_bf16(self):
r"""
This testcase will quantize a Linear->GELU pattern with int8_mixed_bf16 quantization.
"""
for gelu in [torch.nn.GELU("none"), torch.nn.GELU("tanh")]:
self._qlinear_unary_test_helper(
(torch.randn((2, 4)),), gelu, int8_mixed_bf16=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_gelu_int8_mixed_bf16_xpu(self):
r"""
This testcase will quantize a Linear->GELU pattern with int8_mixed_bf16 quantization.
"""
for gelu in [torch.nn.GELU("none"), torch.nn.GELU("tanh")]:
self._qlinear_unary_test_helper(
(torch.randn((2, 4)).to(device="xpu"),),
gelu,
device="xpu",
int8_mixed_bf16=True,
)
def _qlinear_add_test_helper(
self,
device="cpu",
use_relu=False,
int8_mixed_bf16=False,
is_qat=True,
is_dynamic=True,
):
r"""
This testcase will quantize two consecutive Linear->Add(->relu) patterns as:
X
/ \
linear(X) linear(X)
\ /
Add
|
Optional(relu)
/ \
linear(X) linear(X)
\ /
Add
|
Optional(relu)
|
Y
"""
def fake_quant(x):
# to produce a float32 result as extra input
qlib = torch.ops.quantized_decomposed
if device == "cpu":
qmin, qmax, dtype = 0, 255, torch.uint8
else:
qmin, qmax, dtype = -128, 127, torch.int8
x = qlib.quantize_per_tensor.default(x, 0.0166785, 42, qmin, qmax, dtype)
x = qlib.dequantize_per_tensor.default(x, 0.0166785, 42, qmin, qmax, dtype)
return x
class M(torch.nn.Module):
def __init__(
self,
add_fn,
use_relu,
fake_quant_before_extra_input,
):
super().__init__()
self.linear1 = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
self.add_fn = add_fn
self.relu = torch.nn.ReLU()
self.linear3 = torch.nn.Linear(4, 4)
self.linear4 = torch.nn.Linear(4, 4)
self.add_fn2 = add_fn
self.relu2 = torch.nn.ReLU()
self.use_relu = use_relu
self.fake_quant_before_extra_input = fake_quant_before_extra_input
def forward(self, x):
x1 = self.linear1(x)
x2 = self.linear2(x)
if self.fake_quant_before_extra_input:
x2 = fake_quant(x2)
tmp = self.add_fn(x1, x2)
if self.use_relu:
tmp = self.relu(tmp)
tmp1 = self.linear3(tmp)
tmp2 = self.linear4(tmp)
if self.fake_quant_before_extra_input:
tmp2 = fake_quant(tmp2)
res = self.add_fn2(tmp1, tmp2)
if self.use_relu:
res = self.relu2(res)
return res
add_fn_list = [
lambda x, y: x + y,
lambda x, y: y + x,
lambda x, y: x.add_(y),
lambda x, y: y.add_(x),
]
fake_quant_x2_list = [False, True] if int8_mixed_bf16 else [False]
shape_list = [(4, 4), [4, 4, 4]]
cases = itertools.product(add_fn_list, fake_quant_x2_list, shape_list)
for add_fn, fq_x2, shape in cases:
mod = M(add_fn, use_relu, fq_x2).eval().to(device=device)
v = torch.randn(
shape, dtype=torch.float32, requires_grad=False, device=device
).add(1)
def matcher_check_fn():
# 1. Dequant-linear pattern matched in quantization weight prepack * 4
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 4
)
# pattern = [dequant_per_tensor, (convert_dtype), dequant_per_channel, (convert_dtype), permute, addmm]
nodes_per_match = 6 if int8_mixed_bf16 else 4
if len(shape) == 3:
# pattern = [dequant_per_tensor, (convert_dtype), (view), \
# dequant_per_channel, (convert_dtype), (view), permute, addmm]
nodes_per_match += 2
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
4 * nodes_per_match,
)
# 2. Qlinear Binary Unary fusion in post-grad fusion pass * 2
self.assertEqual(
counters["inductor"]["qlinear_binary_matcher_count"],
0 if TEST_ACL else 2,
)
# Two linear-binary patterns are matched
# matched patter1 = [qlinear, add, (convert dtype), (relu), quantize_per_tensor]
# matched patter2 = [qlinear, add, (convert dtype), (relu)]
# If add_fn is x.add_(y), x is bf16 and y is fp32, there is a to_bf16 node after binary
to_bf16_after_binary = 2 * (add_fn == add_fn_list[2] and fq_x2)
expected_matcher_nodes = (
(4 if is_dynamic else 5) + 2 * use_relu + to_bf16_after_binary
)
self.assertEqual(
counters["inductor"]["qlinear_binary_matcher_nodes"],
0 if TEST_ACL else expected_matcher_nodes,
)
self.assertEqual(
counters["inductor"]["qlinear_binary_lower_count"],
0 if TEST_ACL else 2,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
is_qat=is_qat,
is_dynamic=is_dynamic,
)
if TEST_ACL:
continue
if torch._inductor.config.cpp_wrapper:
# For CPP wrapper
self._test_code_common(
mod,
(v,),
[
f"aoti_torch_{device}__qlinear_pointwise_tensor",
f"aoti_torch_{device}__qlinear_pointwise_binary_tensor",
],
[],
check_quantization=True,
num_include_ops=[2, 2],
)
else:
# For python wrapper
self._test_code_common(
mod,
(v,),
[
"torch.ops.onednn.qlinear_pointwise.tensor",
"torch.ops.onednn.qlinear_pointwise.binary",
],
[],
check_quantization=True,
num_include_ops=[2, 2],
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@parametrize("use_relu", [True, False])
@parametrize("is_qat", [True, False])
@parametrize("is_dynamic", [True, False])
def test_qlinear_add_cpu(self, use_relu, is_qat, is_dynamic):
self._qlinear_add_test_helper(
use_relu=use_relu, is_qat=is_qat, is_dynamic=is_dynamic
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
@config.patch({"fx_graph_cache": False})
@parametrize("use_relu", [True])
@parametrize("is_qat", [False])
@parametrize("is_dynamic", [False])
def test_qlinear_add_xpu(self, use_relu, is_qat, is_dynamic):
self._qlinear_add_test_helper(
device="xpu", use_relu=use_relu, is_qat=is_qat, is_dynamic=is_dynamic
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@parametrize("use_relu", [True, False])
@parametrize("is_qat", [True, False])
@parametrize("is_dynamic", [True, False])
def test_qlinear_add_int8_mixed_bf16(self, use_relu, is_qat, is_dynamic):
self._qlinear_add_test_helper(
int8_mixed_bf16=True,
use_relu=use_relu,
is_qat=is_qat,
is_dynamic=is_dynamic,
)
@skipIfNoXPU
@parametrize("use_relu", [True, False])
@parametrize("is_qat", [False])
@parametrize("is_dynamic", [False])
def test_qlinear_add_int8_mixed_bf16_xpu(self, use_relu, is_qat, is_dynamic):
self._qlinear_add_test_helper(
device="xpu",
int8_mixed_bf16=True,
use_relu=use_relu,
is_qat=is_qat,
is_dynamic=is_dynamic,
)
def _test_qlinear_fp8_inductor_cpu_helper(self, qlinear_op, post_op="none"):
dtype = torch.float8_e4m3fn
qlinear_prepack = torch.ops.onednn.qlinear_prepack
post_op_algo = "none"
unary_post_op_args = ()
batch_size = 1
output_dtype = torch.float8_e4m3fn
y_scale, y_zp = 0.07, 0
ic = 4
oc = 16
torch._dynamo.reset()
used_y_scale = y_scale
used_y_zp = y_zp
x = torch.rand(batch_size, ic)
w = torch.rand(oc, ic)
qx = x.to(dtype)
qw = w.to(dtype)
x_scale = 0.5
w_scales = torch.randn(oc)
b = torch.rand(oc)
x_zp = 0
w_zps = torch.zeros_like(w_scales, dtype=torch.int)
if post_op == "none":
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.qw_packed = qlinear_prepack(qw, x.shape)
def forward(self, qx):
qy = qlinear_op(
qx,
x_scale,
x_zp,
self.qw_packed,
w_scales,
w_zps,
b,
used_y_scale,
used_y_zp,
output_dtype,
post_op,
unary_post_op_args,
post_op_algo,
)
return qy
elif post_op == "add":
x2 = torch.rand(batch_size, oc)
binary_alpha = 1.0 # we only support alpha=1.0 now
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.qw_packed = qlinear_prepack(qw, x.shape)
def forward(self, qx):
qy = qlinear_op(
qx,
x_scale,
x_zp,
self.qw_packed,
w_scales,
w_zps,
x2,
b,
used_y_scale,
used_y_zp,
output_dtype,
1.0,
0,
"add",
binary_alpha,
"none",
unary_post_op_args,
post_op_algo,
)
return qy
with torch.no_grad():
model = Mod()
y_refe = model(qx)
y_test = torch.compile(model)(qx)
self.assertEqual(y_refe.float(), y_test.float())
@skipIfNoONEDNN
def test_qlinear_fp8_inductor_cpu(self):
qlinear_op = torch.ops.onednn.qlinear_pointwise.default
self._test_qlinear_fp8_inductor_cpu_helper(qlinear_op, "none")
@skipIfNoONEDNN
def test_qlinear_add_fp8_inductor_cpu(self):
qlinear_op = torch.ops.onednn.qlinear_pointwise.binary
self._test_qlinear_fp8_inductor_cpu_helper(qlinear_op, "add")
def _qlinear_dequant_promotion_test_helper(
self,
inputs,
device="cpu",
int8_mixed_bf16=False,
is_dynamic=False,
matcher_check_fn=None,
):
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.linear1 = torch.nn.Linear(4, 4)
self.linear2 = torch.nn.Linear(4, 4)
self.linear3 = torch.nn.Linear(4, 4)
def forward(self, x):
temp = self.linear1(x)
temp = self.linear2(temp) + self.linear3(temp)
return temp
mod = M().eval().to(device=device)
def default_matcher_check_fn():
# 1. Dequant pattern matcher for dequant promotion * 1
self.assertEqual(counters["inductor"]["dequant_promotion_matcher_count"], 1)
# 2. dequant-linear pattern matched in quantization weight prepack * 3
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 3
)
# 3. QLinear Unary fusion in post-grad fusion pass * 1
self.assertEqual(
counters["inductor"]["qlinear_unary_matcher_count"],
0 if TEST_ACL else 1,
)
self._test_common(
mod,
inputs,
matcher_check_fn=(
matcher_check_fn
if matcher_check_fn is not None
else default_matcher_check_fn
),
check_autocast=torch.bfloat16 if int8_mixed_bf16 else torch.float,
check_quantization=True,
is_dynamic=is_dynamic,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_dequant_promotion_cpu(self):
r"""
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper((torch.randn((2, 4)),))
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_dequant_promotion_xpu(self):
r"""
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 4)).to(device="xpu"),), device="xpu"
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_dequant_promotion_int8_mixed_bf16(self):
r"""
Test with int8_mixed_bf16 quantization.
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 4)),), int8_mixed_bf16=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_dequant_promotion_int8_mixed_bf16_xpu(self):
r"""
Test with int8_mixed_bf16 quantization.
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 4)).to(device="xpu"),), device="xpu", int8_mixed_bf16=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_dequant_promotion_cpu_input_dim_exceeds_2(self):
r"""
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper((torch.randn((2, 3, 4)),))
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_dequant_promotion_input_dim_exceeds_2_xpu(self):
r"""
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 3, 4)).to(device="xpu"),), device="xpu"
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
def test_qlinear_dequant_promotion_int8_mixed_bf16_input_dim_exceeds_2(self):
r"""
Test with int8_mixed_bf16 quantization.
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 3, 4)),), int8_mixed_bf16=True
)
@skipIfNoDynamoSupport
@skipIfNoONEDNNBF16
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_dequant_promotion_int8_mixed_bf16_input_dim_exceeds_2_xpu(self):
r"""
Test with int8_mixed_bf16 quantization.
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 3, 4)).to(device="xpu"),),
device="xpu",
int8_mixed_bf16=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_dequant_promotion_dynamic_cpu(self):
r"""
This testcase test if dequant node before linear is promoted correctly:
X
|
Linear1(X)
/ \
Linear2(X) Linear3(X)
\ /
Add
|
Y
"""
def matcher_check_fn():
# 1. Dequant pattern matcher for dequant promotion * 1
self.assertEqual(counters["inductor"]["dequant_promotion_matcher_count"], 1)
# 2. dequant-linear pattern matched in quantization weight prepack * 3
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 3
)
self._qlinear_dequant_promotion_test_helper(
(torch.randn((2, 4)),),
matcher_check_fn=matcher_check_fn,
is_dynamic=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
@config.patch({"fx_graph_cache": False})
def test_qlinear_mul_xpu(self):
r"""
This testcase will quantize a Linear->Mul pattern.
"""
class M(torch.nn.Module):
def __init__(self, use_bias):
super().__init__()
self.linear = torch.nn.Linear(4, 5, use_bias)
def forward(self, x1, x2):
return torch.mul(self.linear(x1), x2)
bias_list = [True, False]
for bias in bias_list:
mod = M(bias).eval().to(device="xpu")
x1 = torch.randn((2, 4)).to(device="xpu")
x2 = torch.randn((2, 5)).to(device="xpu")
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 1
)
self._test_common(
mod,
(x1, x2),
check_quantization=True,
matcher_check_fn=matcher_check_fn,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_qlinear_mul_cpu(self):
r"""
This testcase will quantize a Linear->Mul pattern.
"""
class M(torch.nn.Module):
def __init__(self, use_bias):
super().__init__()
self.linear = torch.nn.Linear(4, 5, use_bias)
def forward(self, x1, x2):
return torch.mul(self.linear(x1), x2)
bias_list = [True, False]
for bias in bias_list:
mod = M(bias).eval()
x1 = torch.randn((2, 4))
x2 = torch.randn((2, 5))
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 1
)
self._test_common(
mod,
(x1, x2),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
@skipIfNoXPU
def test_qlinear_mul(self):
r"""
This testcase will quantize a Linear->Mul pattern.
"""
class M(torch.nn.Module):
def __init__(self, use_bias):
super().__init__()
self.linear = torch.nn.Linear(4, 5, use_bias)
def forward(self, x1, x2):
return torch.mul(self.linear(x1), x2)
bias_list = [True, False]
for bias in bias_list:
mod = M(bias).eval().to(device="xpu")
x1 = torch.randn((2, 4)).to(device="xpu")
x2 = torch.randn((2, 5)).to(device="xpu")
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 1
)
self._test_common(
mod,
(x1, x2),
check_quantization=True,
matcher_check_fn=matcher_check_fn,
)
@skipIfNoDynamoSupport
def test_qmaxpool2d(self):
r"""
This testcase will quantize Conv2d->ReLU->MaxPool2d pattern.
"""
class M(torch.nn.Module):
def __init__(
self,
kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 64, 7, bias=True, stride=2, padding=3, dilation=1
)
self.relu = torch.nn.ReLU()
self.maxpool = torch.nn.MaxPool2d(3, **kwargs)
def forward(self, x):
return self.maxpool(self.relu(self.conv(x)))
kwargs_list = [
{"stride": 2},
{"stride": 2, "padding": 1},
{"stride": 2, "padding": 1, "dilation": 1},
{"stride": 2, "padding": 1, "dilation": 1, "ceil_mode": False},
]
for kwargs in kwargs_list:
mod = M(kwargs).eval()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(
1
)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qmaxpool2d_matcher_count"],
0 if TEST_ACL else 1,
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 1
)
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 1,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"],
0 if TEST_ACL else 1,
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
def test_qflatten(self):
r"""
This testcase will quantize Conv2d->AdaptiveAvgPool2d->flatten->cat pattern.
"""
class M(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 64, 7, bias=True, stride=2, padding=3, dilation=1
)
self.relu = torch.nn.ReLU()
self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
return torch.cat(
[
torch.flatten(
self.adaptive_avg_pool2d(self.relu(self.conv(x))), 1
)
]
)
mod = M().eval()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(1)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qreshape_matcher_count"], 0 if TEST_ACL else 1
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
@skipIfNoDynamoSupport
def test_qcat(self):
r"""
This testcase will quantize cat based pattern:
X
/ \
Conv1(X) Pow(x)
\ \
\ Conv2(X)
\ /
Cat
|
Y
"""
class M(torch.nn.Module):
def __init__(
self,
):
super().__init__()
self.conv = torch.nn.Conv2d(
3, 64, 7, bias=True, stride=2, padding=3, dilation=1
)
self.conv2 = torch.nn.Conv2d(
3, 64, 7, bias=True, stride=2, padding=3, dilation=1
)
def forward(self, x):
temp1 = self.conv(x)
temp2 = self.conv2(torch.pow(x, 2))
return torch.cat((temp1, temp2), 1)
mod = M().eval()
v = torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(1)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qcat_matcher_count"], 0 if TEST_ACL else 1
)
self.assertEqual(
counters["inductor"]["qconv_weight_prepack_matcher_count"], 2
)
self.assertEqual(
counters["inductor"]["qconv_unary_matcher_count"],
0 if TEST_ACL else 2,
)
self.assertEqual(
counters["inductor"]["qconv_unary_lower_count"], 0 if TEST_ACL else 2
)
self._test_common(
mod,
(v,),
matcher_check_fn,
check_quantization=True,
)
# https://github.com/pytorch/pytorch/issues/99841.
def test_hardtanh_pattern_fallback(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv_transpose = torch.nn.ConvTranspose2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, min_value, max_value):
conv_transpose_output = self.conv_transpose(x)
clamp_min_output = torch.clamp_min(conv_transpose_output, min_value)
clamp_max_output = torch.clamp_max(clamp_min_output, max_value)
return clamp_max_output
# check works for min_value > max_value.
min_values = [3, torch.randn(1, 32, 28, 28)]
max_values = [0, torch.randn(1, 32, 28, 28)]
v = torch.randn(1, 3, 28, 28)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_nodes"],
0 if TEST_ACL else 3,
)
self.assertEqual(
counters["inductor"]["mkldnn_conv_weight_pack_matcher_count"], 1
)
for min_value, max_value in zip(min_values, max_values):
mod = Model().eval()
self._test_common(mod, (v, min_value, max_value), matcher_check_fn)
def test_leaky_relu_pattern_fallback(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, negative_slope):
conv_out = self.conv(x)
return torch.where(conv_out > 0, conv_out, conv_out * negative_slope)
negative_slopes = [0.1, torch.randn(1, 32, 28, 28)]
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["mkldnn_unary_fusion_matcher_nodes"],
0 if TEST_ACL else 4,
)
self.assertEqual(
counters["inductor"]["mkldnn_conv_weight_pack_matcher_count"], 1
)
with torch.no_grad():
v = torch.randn(1, 3, 28, 28)
for negative_slope in negative_slopes:
mod = Model().eval()
self._test_common(mod, (v, negative_slope), matcher_check_fn)
# https://github.com/pytorch/pytorch/issues/99838.
def test_conv2d_add_scalar(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
out_conv = self.conv(x)
out = torch.add(out_conv, 1.0)
return out
def matcher_check_fn():
self.assertEqual(counters["inductor"]["binary_folding"], 1)
self.assertEqual(
counters["inductor"]["mkldnn_conv_weight_pack_matcher_count"], 1
)
with torch.no_grad():
mod = Model().eval()
v = torch.randn(1, 3, 28, 28)
self._test_common(mod, (v,), matcher_check_fn)
@xfailIfACL
def test_conv2d_binary_inplace_fusion_pass_cpu(
self, include_ops=None, exclude_ops=None
):
class Model_v1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, other):
conv_out = self.conv(x)
return torch.add(conv_out, other.relu())
class Model_v2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
self.conv2 = torch.nn.Conv2d(
in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1
)
self.conv3 = torch.nn.Conv2d(
in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, _):
conv_out1 = self.conv(x)
pow_out = torch.pow(conv_out1, 2)
conv_out2 = self.conv2(pow_out)
conv_out3 = self.conv3(conv_out2)
res = torch.add(conv_out3, pow_out)
return res
input = torch.randn(1, 3, 28, 28).to(memory_format=torch.channels_last)
others = [
torch.randn(1, 32, 28, 28).to(memory_format=torch.channels_last),
torch.randn(1, 32, 28, 28).to(memory_format=torch.channels_last),
]
mod_v1 = Model_v1().to(memory_format=torch.channels_last).eval()
mod_v2 = Model_v2().to(memory_format=torch.channels_last).eval()
if include_ops is None:
include_ops = ["mkldnn._convolution_pointwise_.binary"]
if exclude_ops is None:
exclude_ops = ["mkldnn._convolution_pointwise.binary"]
for other, mod in zip(others, [mod_v1, mod_v2]):
self._test_code_common(mod, (input, other), include_ops, exclude_ops)
@xfailIfACL
def test_conv2d_binary_inplace_fusion_failed_cpu(
self, include_ops=None, exclude_ops=None
):
# Written buffer is graph input, we can't fuse inplace.
class Model_v1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, other):
conv_out = self.conv(x)
return torch.add(conv_out, other)
# Written buffer is an alias tensor, we can't fuse inplace.
class Model_v2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, other):
conv_out = self.conv(x)
return torch.add(conv_out, other[1:2, :, :, :]), other
class Model_v3(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
self.conv2 = torch.nn.Conv2d(
in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, _):
pow_out = torch.pow(self.conv(x), 2)
other2 = F.relu(pow_out)
conv_out2 = self.conv2(pow_out)
res = torch.add(conv_out2, pow_out)
res = res + other2
return res
# Written buffer is an ReinterpretView, we can't fuse inplace.
class Model_v4(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 32, 3, padding=1, bias=True)
self.linear = torch.nn.Linear(32 * 28, 32 * 28)
self.relu = torch.nn.ReLU()
def forward(self, x, y):
x = self.conv(self.relu(x))
y = self.linear(y)
y = torch.cat((y, y + 1), 1)
y = torch.ops.aten.permute.default(y, [0, 2, 1]).reshape(1, 32, 28, 28)
return x + y
class Model_v5(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(32, 32, 3, padding=1, bias=True)
self.relu = torch.nn.ReLU()
def forward(self, _, x):
x1 = self.relu(x)
return self.conv(x1) + x1
input = torch.randn(1, 3, 28, 28).to(memory_format=torch.channels_last)
others = [
torch.randn(1, 32, 28, 28).to(memory_format=torch.channels_last),
torch.randn(2, 32, 28, 28).to(memory_format=torch.channels_last),
torch.randn(1, 32, 28, 28).to(memory_format=torch.channels_last),
torch.randn(1, 14, 32 * 28),
torch.randn(1, 32, 28, 28).to(memory_format=torch.channels_last),
]
mod_v1 = Model_v1().to(memory_format=torch.channels_last).eval()
mod_v2 = Model_v2().to(memory_format=torch.channels_last).eval()
mod_v3 = Model_v3().to(memory_format=torch.channels_last).eval()
mod_v4 = Model_v4().to(memory_format=torch.channels_last).eval()
mod_v5 = Model_v5().to(memory_format=torch.channels_last).eval()
if include_ops is None:
include_ops = ["mkldnn._convolution_pointwise.binary"]
if exclude_ops is None:
exclude_ops = ["mkldnn._convolution_pointwise_.binary"]
for other, mod in zip(others, [mod_v1, mod_v2, mod_v3, mod_v4, mod_v5]):
self._test_code_common(mod, (input, other), include_ops, exclude_ops)
def test_conv2d_binary_fusion_failed(self):
# we don't support alpha !=1 case or other has different size with conv's output.
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1
)
def forward(self, x, other, alpha):
conv_out = self.conv(x)
return torch.add(conv_out, other, alpha=alpha)
# https://github.com/pytorch/pytorch/issues/100802.
# we can't do the fusion when add's inputs are same tensor.
class Model2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
out = self.conv(x)
out = torch.add(out, out)
return out
# https://github.com/pytorch/pytorch/issues/101374.
# we can't do the fusion when add's inputs are mixed dtype.
class Model3(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(
in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1
)
def forward(self, x):
temp = self.conv(x)
other = torch.ones(temp.shape, dtype=torch.double)
out = torch.add(temp, other)
return out
input = torch.randn(1, 3, 28, 28).to(memory_format=torch.channels_last)
others = [
torch.randn(1, 32, 28, 28).to(memory_format=torch.channels_last),
torch.randn(32, 28, 28),
]
include_ops = ["mkldnn._convolution_pointwise"]
exclude_ops = [
"mkldnn._convolution_pointwise.binary",
"mkldnn._convolution_pointwise_.binary",
]
# case1
for other, alpha in zip(others, [0.1, 1.0]):
mod = Model().to(memory_format=torch.channels_last).eval()
self._test_code_common(mod, (input, other, alpha), include_ops, exclude_ops)
# case2:
mod = Model2().to(memory_format=torch.channels_last).eval()
self._test_code_common(mod, (input,), include_ops, exclude_ops)
# case3:
mod = Model3().to(memory_format=torch.channels_last).eval()
self._test_code_common(mod, (input,), include_ops, exclude_ops)
@xfailIfACL
def test_reproduce_99842_issue(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
def forward(self, input_tensor):
x = self.conv(input_tensor)
x = F.relu(x + torch.ones(x.size()))
return x
input = torch.randn(1, 3, 14, 14)
mod = Model().eval()
include_ops = ["mkldnn._convolution_pointwise_.binary"]
self._test_code_common(mod, (input,), include_ops, [])
def test_reproduce_113440_issue_1(self):
class Mod(torch.nn.Module):
def __init__(
self,
add_fn,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.conv2 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.add_fn = add_fn
self.relu = torch.nn.ReLU(inplace=True)
self.conv3 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.conv4 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.add_fn2 = add_fn
self.relu2 = torch.nn.ReLU(inplace=True)
self.use_relu = True
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
tmp = self.add_fn(x1, x2)
if self.use_relu:
tmp = self.relu(tmp)
tmp1 = self.conv3(tmp)
tmp2 = self.conv4(tmp)
res = self.add_fn2(tmp1, tmp2)
if self.use_relu:
res = self.relu2(res)
return res
with torch.no_grad():
example_inputs = (
torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(
1
),
)
example_inputs[0].get_device()
m = Mod(
lambda x, y: x.add_(y),
).eval()
om = torch.compile(m)
om(*example_inputs)
om(*example_inputs)
def test_reproduce_113440_issue_2(self):
class Mod(torch.nn.Module):
def __init__(
self,
add_fn,
**kwargs,
):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.conv2 = torch.nn.Conv2d(3, 6, kernel_size=3, stride=1)
self.add_fn = add_fn
self.relu = torch.nn.ReLU(inplace=True)
self.conv3 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.conv4 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.add_fn2 = add_fn
self.relu2 = torch.nn.ReLU(inplace=True)
self.conv5 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.conv6 = torch.nn.Conv2d(6, 6, kernel_size=3, stride=1)
self.conv7 = torch.nn.Conv2d(6, 6, kernel_size=1, stride=1)
self.add_fn3 = add_fn
self.relu3 = torch.nn.ReLU(inplace=True)
self.use_relu = True
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x)
tmp = self.add_fn(x1, x2)
if self.use_relu:
tmp = self.relu(tmp)
tmp1 = self.conv3(tmp)
res = self.relu2(tmp1)
return res
with torch.no_grad():
example_inputs = (
torch.randn((1, 3, 8, 8), dtype=torch.float32, requires_grad=False).add(
1
),
)
m = Mod(
lambda x, y: x.add_(y),
).eval()
om = torch.compile(m)
om(*example_inputs)
om(*example_inputs)
@unittest.skipIf(not TEST_MKL, "Test requires MKL")
@xfailIfACL
@torch._dynamo.config.patch("inline_inbuilt_nn_modules", True)
def test_reproduce_121253_issue_addmm_fusion_check(self):
class Mod(torch.nn.Module):
def __init__(self, weight, bias, beta, alpha):
super().__init__()
self.weight = weight
self.bias = bias
self.beta = beta
self.alpha = alpha
def forward(self, x):
return torch.addmm(
self.bias, x, self.weight, beta=self.beta, alpha=self.alpha
)
dtypes = [torch.float32]
if torch.ops.mkldnn._is_mkldnn_bf16_supported():
dtypes.append(torch.bfloat16)
for dtype in dtypes:
linear_op = (
"mkl._mkl_linear"
if dtype == torch.float32
else "mkldnn._linear_pointwise"
)
for beta, alpha in zip([1.0, 0.1, 0.0], [1.0, 0.1, 1.0]):
weight = torch.nn.Parameter(torch.randn(64, 64, dtype=dtype))
bias = torch.nn.Parameter(torch.randn(64, dtype=dtype))
mod = Mod(weight, bias, beta, alpha).to(dtype).eval()
with torch.no_grad():
x = torch.randn(1, 64, dtype=dtype)
include_ops = []
exclude_ops = []
if (beta != 1.0 and beta != 0.0) or alpha != 1.0:
exclude_ops = [linear_op]
else:
include_ops = [linear_op]
self._test_code_common(mod, (x,), include_ops, exclude_ops)
@skipIfNoDynamoSupport
def test_woq_int8(self):
class M(torch.nn.Module):
def __init__(self, is_permute):
super().__init__()
self.is_permute = is_permute
def forward(self, x, weight, scales):
if self.is_permute:
weight = weight.t()
m = torch.mm(
x.reshape(-1, x.shape[-1]),
weight.to(x.dtype),
)
y = m * scales.to(m.dtype)
y = y.reshape(*x.shape[:-1], y.shape[-1])
return y
else:
return (
torch.nn.functional.linear(x, weight.to(dtype=x.dtype)) * scales
)
x_shape = (1, 1, 256)
s_shape = 12
x_strides = [
(256, 256, 1), # linear dispatching to mm
(256, 32, 1), # linear dispatching to bmm
]
is_permutes = [False, True]
for x_stride, is_permute in itertools.product(x_strides, is_permutes):
mod = M(is_permute=is_permute).eval()
x = torch.randn(x_shape, dtype=torch.bfloat16).as_strided(x_shape, x_stride)
w_shape = (12, 256)
w = torch.randint(-128, 127, w_shape, dtype=torch.int8)
s = torch.randn(s_shape, dtype=torch.bfloat16)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["woq_matcher_count"], 0 if TEST_ACL else 1
)
self._test_common(
mod,
(x, w, s),
matcher_check_fn,
check_quantization=False,
atol=0.001,
rtol=0.07,
)
@skipIfNoDynamoSupport
def test_woq_int4_cpu(self):
class M(torch.nn.Module):
def __init__(self, in_feature, out_feature, group_size):
super().__init__()
self.weight = torch.randint(
0, 255, (out_feature, in_feature // 2), dtype=torch.uint8
)
self.group_size = group_size
self.qScaleAndZeros = torch.rand(
(in_feature // group_size, out_feature, 2), dtype=torch.bfloat16
)
def forward(self, x):
if x.ndim > 2:
x = x.reshape(-1, x.shape[-1])
y = torch.ops.aten._weight_int4pack_mm_for_cpu.default(
x, self.weight, self.group_size, self.qScaleAndZeros
)
return y.reshape(*x.shape[:-1], y.shape[-1])
return torch.ops.aten._weight_int4pack_mm_for_cpu.default(
x, self.weight, self.group_size, self.qScaleAndZeros
)
bs = 4
seq = 8
x_dim_list = [2, 3]
in_feature_list = [256, 512]
out_feature_list = [256, 512]
group_size_list = [64, 128]
cases = itertools.product(
x_dim_list, in_feature_list, out_feature_list, group_size_list
)
for x_dim, in_feature, out_feature, group_size in cases:
x_shape = (seq, in_feature) if x_dim == 2 else (bs, seq, in_feature)
x = torch.randn(x_shape, dtype=torch.bfloat16)
m = M(in_feature, out_feature, group_size).eval()
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["woq_matcher_count"], 0 if TEST_ACL else 1
)
include_ops = [
"aoti_torch_cpu__weight_int4pack_mm_cpu_tensor"
if torch._inductor.config.cpp_wrapper
else "torch.ops.quantized.int4mm_packed_weight_cpu.default"
]
self._test_code_common(
m,
(x,),
include_ops,
["torch.ops.aten._weight_int4pack_mm_for_cpu.default"],
)
def _test_linear_dynamic_fp16_helper(self, use_relu: bool):
class M(torch.nn.Module):
def __init__(self, bias: bool, use_relu: bool):
super().__init__()
self.linear = torch.nn.Linear(256, 256, bias=bias)
self.relu = torch.nn.ReLU()
self.use_relu = use_relu
def forward(self, x):
if self.use_relu:
return self.relu(self.linear(x))
return self.linear(x)
quantizer = X86InductorQuantizer().set_global(
xiq.get_default_x86_inductor_quantization_config()
)
quantizer.set_module_type_qconfig(
torch.nn.Linear, xiq.get_x86_inductor_linear_dynamic_fp16_config()
)
bias_list = [True, False]
input_ndim_list = [2, 3]
x_contig_list = [True, False]
cases = itertools.product(bias_list, input_ndim_list, x_contig_list)
for bias, input_ndim, x_contig in cases:
x_shape = (4, 256) if input_ndim == 2 else (4, 1, 256)
x = torch.randn(x_shape)
if not x_contig:
x = x[0::2, ...]
mod = M(bias, use_relu).eval()
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 1
)
# Matched nodes:
# (1) w to fp16, (2) w to fp32, (3) permute w, (4) mm/addmm/bmm
# If x.ndim == 3 and x is contiguous, two view nodes are added.
# If x.ndim == 3 and x is not contiguous, two expand nodes and one add node are added.
nodes_count = 4
if input_ndim > 2:
if x_contig:
nodes_count += 2
else:
nodes_count += 3 if bias else 2
if use_relu:
nodes_count += 1
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
nodes_count,
)
self._test_common(
mod,
(x,),
atol=1e-2,
rtol=1e-2,
matcher_check_fn=matcher_check_fn,
check_quantization=True,
quantizer=quantizer,
)
linear_op_str = (
"torch.ops.onednn.linear_relu_dynamic_fp16.default"
if use_relu
else "torch.ops.onednn.linear_dynamic_fp16.default"
)
self._test_code_common(
mod,
(x,),
[linear_op_str],
["torch.ops.aten.addmm.default", "torch.ops.aten.mm.default"],
check_quantization=True,
quantizer=quantizer,
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_linear_dynamic_fp16(self):
self._test_linear_dynamic_fp16_helper(use_relu=False)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
def test_linear_relu_dynamic_fp16(self):
self._test_linear_dynamic_fp16_helper(use_relu=True)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
# TODO: investigate options of torch.compile in fbcode
@unittest.skipIf(IS_FBCODE, "Failing in fbcode")
@parametrize("has_bias", [True, False])
@parametrize("dtype", [torch.float, torch.bfloat16])
@parametrize("per_channel_quant", [True, False])
@parametrize("dynamic", [True, False])
def test_smooth_quant_with_int_mm(
self, has_bias, dtype, per_channel_quant, dynamic
):
r"""
This testcase check if we can match the SmoothQuant int8 linear pattern from Torchao.
The pattern is:
(no bias) reshape -> _int_mm -> convert_element_type -> (expand -> mul) -> mul -> reshape
or
(with bias) pattern_no_bias -> add -> reshape -> reshape
"""
if dtype == torch.bfloat16 and not torch.ops.mkldnn._is_mkldnn_bf16_supported():
return
M = 16
in_feature = 32
out_feature = 64
q_min, q_max = -32, 31
class Mod(torch.nn.Module):
def __init__(
self, dtype: torch.dtype, has_bias: bool, per_channel_quant: bool
):
super().__init__()
self.dtype = dtype
self.has_bias = has_bias
self.b = torch.randint(
q_min, q_max, [in_feature, out_feature], dtype=torch.int8
)
self.per_channel_quant = per_channel_quant
a_scale_per_tensor = torch.rand([1], dtype=dtype) * 0.01 + 0.01
a_scale_per_channel = torch.rand([M, 1], dtype=dtype) * 0.01 + 0.01
self.a_scale = (
a_scale_per_channel
if self.per_channel_quant
else a_scale_per_tensor
)
self.b_scale = torch.rand([out_feature]) * 0.01 + 0.01
self.b_scale = self.b_scale.to(dtype)
self.bias = torch.rand([out_feature], dtype=dtype) if has_bias else None
def forward(self, a):
out_shape = a.shape[:-1] + (self.b.size(-1),)
a_reshaped = a.reshape(-1, a.size(-1))
c = torch._int_mm(a_reshaped, self.b)
c = c.to(self.dtype)
c_shape = c.shape
a_scale = self.a_scale.expand(c.shape)
c = c * a_scale
c = c * self.b_scale
if self.has_bias:
c = c.reshape([1, *list(c_shape)])
c = c + self.bias
c = c.reshape(c_shape)
c = c.reshape(out_shape)
return c
mod = Mod(dtype, has_bias, per_channel_quant).eval()
a = torch.randint(q_min, q_max, [1, M, in_feature], dtype=torch.int8)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 1
)
if dynamic:
nodes_count = 10 if has_bias else 7
else:
nodes_count = 7 if has_bias else 6
if counters["inductor"]["removed_pointless_view_pair"] == 0:
# Removing pointless view pairs affect how the pattern
# for this test is matched.
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_nodes"],
nodes_count,
)
self._test_common(
mod,
(a,),
matcher_check_fn=matcher_check_fn,
check_autocast=dtype,
compile_options={"dynamic": dynamic},
)
@skipIfNoDynamoSupport
@skipIfNoONEDNN
# TODO: investigate options of torch.compile in fbcode
@unittest.skipIf(IS_FBCODE, "Failing in fbcode")
@parametrize("has_bias", [True, False])
@parametrize("dtype", [torch.float, torch.bfloat16])
@parametrize("dynamic", [True, False])
@parametrize("reshape_a", [True, False])
@parametrize(
"M",
[
1,
32,
],
)
@parametrize("inplace_add", [True, False])
@parametrize("expand_a_scale", [True, False])
def test_da8w8_sym_act_sym_wgt_with_int_mm(
self, has_bias, dtype, dynamic, reshape_a, M, inplace_add, expand_a_scale
):
r"""
This testcase check if we can match the int8_dynamic_activation_int8_weight int8 linear pattern from torchao,
when activation is symmetrically quantized dynamically & weights are symmetrically quantized (statically)
The pattern is:
(no bias) _int_mm -> convert_element_type -> ([expand_a] -> mul) -> mul
or
(with bias) pattern_no_bias -> add
Expansion of the scale of activation is optional.
The pattern depiction doesn't mean that convert_element_type output is fed into expand_a as input,
but simply that activation scale may be applied after an expand operation on it.
"""
if dtype == torch.bfloat16 and not torch.ops.mkldnn._is_mkldnn_bf16_supported():
return
in_feature = 32
out_feature = 64
q_min, q_max = -32, 31
# we only test for qlinear_binary in this case
test_for_pointwise_binary = bool(
M == 1
and inplace_add
and not expand_a_scale
and not dynamic
and not has_bias
)
if test_for_pointwise_binary and not IS_X86:
self.skipTest("Some UTs are only supported on x86_64 CPUs")
class Mod(torch.nn.Module):
def __init__(self, dtype: torch.dtype, has_bias: bool):
super().__init__()
self.dtype = dtype
self.has_bias = has_bias
self.b = torch.randint(
q_min, q_max, [in_feature, out_feature], dtype=torch.int8
)
self.a_scale = torch.rand([M, 1], dtype=dtype) * 0.01 + 0.01
self.b_scale = torch.rand([out_feature]) * 0.01 + 0.01
self.b_scale = self.b_scale.to(dtype)
self.bias = torch.rand([out_feature], dtype=dtype) if has_bias else None
self.additive = torch.rand([M, out_feature], dtype=dtype)
def forward(self, a):
if reshape_a:
a_reshaped = a.reshape(-1, a.size(-1))
else:
a_reshaped = a
c = torch._int_mm(a_reshaped, self.b)
c = c.to(self.dtype)
if expand_a_scale:
a_scale = self.a_scale.expand(c.shape)
else:
a_scale = self.a_scale
c = c * a_scale
c = c * self.b_scale
if self.has_bias:
c = c + self.bias
elif inplace_add and test_for_pointwise_binary:
# When M is 1, dynamic shapes are enabled with torch.compile, has_bias is False,
# expand_a_scale is False and inplace_add is true,
# the output's outermost dim's stride can't be determined due to some Inductor bug.
c.add_(self.additive)
return c
mod = Mod(dtype, has_bias).eval()
a = torch.randint(q_min, q_max, [M, in_feature], dtype=torch.int8)
def matcher_check_fn():
self.assertEqual(
counters["inductor"]["qlinear_weight_prepack_matcher_count"], 1
)
self._test_common(
mod,
(a,),
matcher_check_fn,
check_autocast=dtype,
compile_options={"dynamic": dynamic},
)
if test_for_pointwise_binary:
self.assertEqual(counters["inductor"]["qlinear_binary_matcher_count"], 1)
| TestPatternMatcher |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 6319,
"end": 6985
} | class ____:
"""Table 3.41 Entries in a file specification dictionary."""
Type = "/Type"
FS = "/FS" # The name of the file system to be used to interpret this file specification
F = "/F" # A file specification string of the form described in §3.10.1
UF = "/UF" # A Unicode string of the file as described in §3.10.1
DOS = "/DOS"
Mac = "/Mac"
Unix = "/Unix"
ID = "/ID"
V = "/V"
EF = "/EF" # dictionary, containing a subset of the keys F, UF, DOS, Mac, and Unix
RF = "/RF" # dictionary, containing arrays of /EmbeddedFile
DESC = "/Desc" # description of the file
Cl = "/Cl"
| FileSpecificationDictionaryEntries |
python | plotly__plotly.py | plotly/graph_objs/barpolar/hoverlabel/_font.py | {
"start": 233,
"end": 17148
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar.hoverlabel"
_path_str = "barpolar.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.hoverlabel.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | encode__django-rest-framework | tests/test_atomic_requests.py | {
"start": 654,
"end": 788
} | class ____(APIView):
def post(self, request, *args, **kwargs):
BasicModel.objects.create()
raise Exception
| ErrorView |
python | pytorch__pytorch | test/jit/test_autodiff.py | {
"start": 270,
"end": 5244
} | class ____(JitTestCase):
def test_undefined_tensor_lists(self):
def fn(tensor_list: List[torch.Tensor], add_tensor):
cat = torch.cat(tensor_list, dim=1)
r = torch.sin(cat + add_tensor)
return r
fn_s = torch.jit.script(fn)
a = torch.rand((3, 6), requires_grad=True)
b = torch.rand((3, 10), requires_grad=True)
x = [a, b]
y = torch.rand((3, 16), requires_grad=True)
ret = fn_s(x, y)
ret.sum().backward()
ret = fn_s(x, y)
ret.sum().backward()
ret = fn_s(x, y)
s = ret.sum()
# backward_fn expects 2 inputs: (grad_output, current_grad_r)
# current_grad_r is provided because we need to add this contribution
# to grad_r when we return it.
backward_fn = s.grad_fn.next_functions[0][0]
# check behavior with defined tensor
grad_out = torch.rand((3, 16))
grad_inputs = backward_fn(grad_out, None)
# expect 3 tensors: grad_y, grad_a, grad_b
self.assertEqual(3, len(grad_inputs))
for x in grad_inputs:
self.assertTrue(isinstance(x, torch.Tensor))
# now test with undefined grad_out
grad_inputs = backward_fn(None, None)
# expect all of them to be None
self.assertEqual(3, len(grad_inputs))
for x in grad_inputs:
if x is not None:
self.assertEqual(0, torch.max(torch.abs(x)).item())
def test_requires_grad_outputs(self):
# outputs should require_grad only if eager outputs would require_grad.
def fn(a, b, c):
return a.relu() + b.relu(), c.relu()
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
fn_s = torch.jit.script(fn)
for _ in range(4):
x, y = fn_s(a, b, c)
self.assertFalse(x.requires_grad)
self.assertTrue(y.requires_grad)
def test_requires_grad_outputs_profiled_twice(self):
# the value "r" is used twice, by gammaln and by entr, so it is profiled twice.
# So during autodiff graph formation the profile nodes are unmerged because
# they are aliasing. Then the DifferentiableGraph doesn't have a profile
# node on the output. The requires_grad info should then be added onto the
# output value (otherwise autodiff will make the output require_grad).
# Note: this relies on gammaln and entr not having autodiff implementations.
def fn(a, b, c):
r = a.relu().relu()
return torch.special.gammaln(r), torch.special.entr(r), c.cos().relu()
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
for _ in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
def test_requires_grad_outputs_side_effects(self):
# same as above, but also add a CallFunction in between.
@torch.jit.ignore
def python_fn(x):
return x.relu()
def fn(a, b, c):
r = a.relu().relu()
z = python_fn(r)
return torch.relu(r), torch.nn.functional.gelu(r), c.cos().relu()
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
for _ in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
def test_autodiff_requires_grad_nograd(self):
@torch.jit.ignore
def python_fn(x):
return x.relu()
def fn(a, b, c):
x = a.sin().relu()
y = python_fn(b)
with torch.no_grad():
z = x + c
return x, y, z
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=True)
b = torch.rand((10, 10), requires_grad=True)
c = torch.rand((10, 10), requires_grad=True)
for _ in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestAutodiffJit |
python | pytorch__pytorch | benchmarks/functional_autograd_benchmark/torchaudio_models.py | {
"start": 14581,
"end": 16540
} | class ____(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super().__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except Exception as e:
raise ImportError(
"TransformerEncoder module does not exist in PyTorch 1.1 or lower."
) from e
self.model_type = "Transformer"
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
# Not sure how this works in the original code
# nn.init.zeros_(self.decoder)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
# This will be created once during warmup
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = nn.Transformer.generate_square_subsequent_mask(len(src)).to(
device
)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
# From https://github.com/pytorch/text/tree/master/torchtext/nn/modules
| TransformerModel |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 7144,
"end": 7550
} | class ____(_TestDCTBase):
def test_definition_matlab(self):
# Test correspondence with MATLAB (orthornomal mode).
dt = np.result_type(np.float32, self.rdt)
for xr, yr in zip(X, Y):
x = np.array(xr, dtype=dt)
y = dct(x, norm="ortho", type=2)
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=self.dec)
| _TestDCTIIBase |
python | Lightning-AI__lightning | tests/tests_pytorch/helpers/simple_models.py | {
"start": 2803,
"end": 4439
} | class ____(LightningModule):
def __init__(self):
super().__init__()
setattr(self, "layer_0", nn.Linear(16, 64))
setattr(self, "layer_0a", torch.nn.ReLU())
for i in range(1, 3):
setattr(self, f"layer_{i}", nn.Linear(64, 64))
setattr(self, f"layer_{i}a", torch.nn.ReLU())
setattr(self, "layer_end", nn.Linear(64, 1))
self.train_mse = MeanSquaredError()
self.valid_mse = MeanSquaredError()
self.test_mse = MeanSquaredError()
def forward(self, x):
x = self.layer_0(x)
x = self.layer_0a(x)
x = self.layer_1(x)
x = self.layer_1a(x)
x = self.layer_2(x)
x = self.layer_2a(x)
return self.layer_end(x)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.01)
return [optimizer], []
def training_step(self, batch, batch_idx):
x, y = batch
out = self.forward(x)
loss = F.mse_loss(out, y)
self.log("train_loss", loss, prog_bar=False)
self.log("train_MSE", self.train_mse(out, y), prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
out = self.forward(x)
self.log("val_loss", F.mse_loss(out, y), prog_bar=False)
self.log("val_MSE", self.valid_mse(out, y), prog_bar=True)
def test_step(self, batch, batch_idx):
x, y = batch
out = self.forward(x)
self.log("test_loss", F.mse_loss(out, y), prog_bar=False)
self.log("test_MSE", self.test_mse(out, y), prog_bar=True)
| RegressionModel |
python | ray-project__ray | rllib/offline/offline_policy_evaluation_runner.py | {
"start": 2261,
"end": 6472
} | class ____(OfflinePreLearner):
def __call__(self, batch: Dict[str, numpy.ndarray]) -> Dict[str, numpy.ndarray]:
# If we directly read in episodes we just convert to list.
if self.input_read_episodes:
# Import `msgpack` for decoding.
import msgpack
import msgpack_numpy as mnp
# Read the episodes and decode them.
episodes: List[SingleAgentEpisode] = [
SingleAgentEpisode.from_state(
msgpack.unpackb(state, object_hook=mnp.decode)
)
for state in batch["item"]
]
# Ensure that all episodes are done and no duplicates are in the batch.
episodes = self._validate_episodes(episodes)
# Add the episodes to the buffer.
self.episode_buffer.add(episodes)
# TODO (simon): Refactor into a single code block for both cases.
episodes = self.episode_buffer.sample(
num_items=self.config.train_batch_size_per_learner,
batch_length_T=(
self.config.model_config.get("max_seq_len", 0)
if self._module.is_stateful()
else None
),
n_step=self.config.get("n_step", 1) or 1,
# TODO (simon): This can be removed as soon as DreamerV3 has been
# cleaned up, i.e. can use episode samples for training.
sample_episodes=True,
to_numpy=True,
)
# Else, if we have old stack `SampleBatch`es.
elif self.input_read_sample_batches:
episodes: List[
SingleAgentEpisode
] = OfflinePreLearner._map_sample_batch_to_episode(
self._is_multi_agent,
batch,
to_numpy=True,
schema=SCHEMA | self.config.input_read_schema,
input_compress_columns=self.config.input_compress_columns,
)[
"episodes"
]
# Ensure that all episodes are done and no duplicates are in the batch.
episodes = self._validate_episodes(episodes)
# Add the episodes to the buffer.
self.episode_buffer.add(episodes)
# Sample steps from the buffer.
episodes = self.episode_buffer.sample(
num_items=self.config.train_batch_size_per_learner,
batch_length_T=(
self.config.model_config.get("max_seq_len", 0)
if self._module.is_stateful()
else None
),
n_step=self.config.get("n_step", 1) or 1,
# TODO (simon): This can be removed as soon as DreamerV3 has been
# cleaned up, i.e. can use episode samples for training.
sample_episodes=True,
to_numpy=True,
)
# Otherwise we map the batch to episodes.
else:
episodes: List[SingleAgentEpisode] = self._map_to_episodes(
self._is_multi_agent,
batch,
schema=SCHEMA | self.config.input_read_schema,
to_numpy=False,
input_compress_columns=self.config.input_compress_columns,
observation_space=self.observation_space,
action_space=self.action_space,
)["episodes"]
episode_dicts = []
for episode in episodes:
# Note, we expect users to provide terminated episodes in `SingleAgentEpisode`
# or `SampleBatch` format. Otherwise computation of episode returns will be
# biased.
episode_dict = {}
episode_dict[Columns.OBS] = episode.get_observations(slice(0, len(episode)))
episode_dict[Columns.ACTIONS] = episode.get_actions()
episode_dict[Columns.REWARDS] = episode.get_rewards()
episode_dict[Columns.ACTION_LOGP] = episode.get_extra_model_outputs(
key=Columns.ACTION_LOGP
)
episode_dicts.append(episode_dict)
return {"episodes": episode_dicts}
| OfflinePolicyPreEvaluator |
python | astropy__astropy | astropy/extern/ply/yacc.py | {
"start": 4886,
"end": 5349
} | class ____(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
| PlyLogger |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/auth/managers/simple/test_user.py | {
"start": 822,
"end": 1118
} | class ____:
def test_get_id(self, test_admin):
assert test_admin.get_id() == "test"
def test_get_name(self, test_admin):
assert test_admin.get_name() == "test"
def test_get_role(self, test_admin):
assert test_admin.get_role() == "admin"
| TestSimpleAuthManagerUser |
python | pdm-project__pdm | tests/cli/conftest.py | {
"start": 1713,
"end": 3753
} | class ____:
mock_pypi: MagicMock
uploaded: list[Any]
@pytest.fixture
# @pytest.mark.usefixtures("mock_run_gpg", "prepare_packages")
def mock_publish(mock_pypi, uploaded) -> PublishMock:
return PublishMock(
mock_pypi=mock_pypi,
uploaded=uploaded,
)
@pytest.fixture
def _echo(project):
"""
Provides an echo.py script producing cross-platform expectable outputs
"""
(project.root / "echo.py").write_text(
textwrap.dedent(
"""\
import os, sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline='\\n')
name = sys.argv[1]
vars = " ".join([f"{v}={os.getenv(v)}" for v in sys.argv[2:]])
print(f"{name} CALLED with {vars}" if vars else f"{name} CALLED")
"""
)
)
@pytest.fixture(name="keyring")
def keyring_fixture(mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch) -> Keyring:
from unearth.auth import AuthInfo, KeyringBaseProvider
class MockKeyringProvider(KeyringBaseProvider):
def __init__(self) -> None:
self._store: dict[str, dict[str, str]] = {}
def save_auth_info(self, url: str, username: str, password: str) -> None:
self._store.setdefault(url, {})[username] = password
def get_auth_info(self, url: str, username: str | None) -> AuthInfo | None:
d = self._store.get(url, {})
if username is not None and username in d:
return username, d[username]
if username is None and d:
return next(iter(d.items()))
return None
def delete_auth_info(self, url: str, username: str) -> None:
self._store.get(url, {}).pop(username, None)
provider = MockKeyringProvider()
mocker.patch("unearth.auth.get_keyring_provider", return_value=provider)
monkeypatch.setattr(keyring, "provider", provider)
monkeypatch.setattr(keyring, "enabled", True)
keyring.get_auth_info.cache_clear()
return keyring
| PublishMock |
python | anthropics__anthropic-sdk-python | src/anthropic/types/web_search_tool_request_error_param.py | {
"start": 230,
"end": 498
} | class ____(TypedDict, total=False):
error_code: Required[
Literal["invalid_tool_input", "unavailable", "max_uses_exceeded", "too_many_requests", "query_too_long"]
]
type: Required[Literal["web_search_tool_result_error"]]
| WebSearchToolRequestErrorParam |
python | huggingface__transformers | src/transformers/models/kyutai_speech_to_text/modeling_kyutai_speech_to_text.py | {
"start": 11197,
"end": 14233
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: KyutaiSpeechToTextConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[KyutaiSpeechToTextConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
| KyutaiSpeechToTextRotaryEmbedding |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/c_tests.py | {
"start": 4164,
"end": 5042
} | class ____(Task.Task):
color = 'PINK'
def run(self):
txt = self.inputs[0].read(flags='rb').decode('latin-1')
if txt.find('LiTTleEnDian') > -1:
self.generator.tmp.append('little')
elif txt.find('BIGenDianSyS') > -1:
self.generator.tmp.append('big')
else:
return -1
@feature('grep_for_endianness')
@after_method('apply_link')
def grep_for_endianness_fun(self):
self.create_task('grep_for_endianness', self.link_task.outputs[0])
@conf
def check_endianness(self):
tmp = []
def check_msg(self):
return tmp[0]
self.check(
fragment=ENDIAN_FRAGMENT,
features='c cshlib grep_for_endianness',
msg='Checking for endianness',
define='ENDIANNESS',
tmp=tmp,
okmsg=check_msg,
confcache=None
)
return tmp[0]
| grep_for_endianness |
python | jschneier__django-storages | tests/test_utils.py | {
"start": 436,
"end": 1636
} | class ____(TestCase):
def test_clean_name(self):
"""Test the base case of clean_name."""
path = utils.clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_pathlib(self):
"""Test for pathlib.Path handling."""
path = pathlib.Path("path/to/anywhere")
self.assertEqual(utils.clean_name(path), "path/to/anywhere")
path = pathlib.PurePath("path/to/anywhere")
self.assertEqual(utils.clean_name(path), "path/to/anywhere")
def test_clean_name_normalize(self):
"""
Test the normalization of clean_name
"""
path = utils.clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""Test the clean_name when the path has a trailing slash."""
path = utils.clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""Test the clean_name when the path has a trailing slash."""
path = utils.clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
| CleanNameTests |
python | kamyu104__LeetCode-Solutions | Python/three-divisors.py | {
"start": 35,
"end": 330
} | class ____(object):
def isThree(self, n):
"""
:type n: int
:rtype: bool
"""
cnt = 0
i = 1
while i*i <= n and cnt <= 3:
if n%i == 0:
cnt += 1 if i*i == n else 2
i += 1
return cnt == 3
| Solution |
python | django__django | tests/check_framework/test_model_checks.py | {
"start": 5171,
"end": 9243
} | class ____(SimpleTestCase):
def test_collision_in_same_model(self):
index = models.Index(fields=["id"], name="foo")
class Model(models.Model):
class Meta:
indexes = [index, index]
self.assertEqual(
checks.run_checks(app_configs=self.apps.get_app_configs()),
[
Error(
"index name 'foo' is not unique for model check_framework.Model.",
id="models.E029",
),
],
)
def test_collision_in_different_models(self):
index = models.Index(fields=["id"], name="foo")
class Model1(models.Model):
class Meta:
indexes = [index]
class Model2(models.Model):
class Meta:
indexes = [index]
self.assertEqual(
checks.run_checks(app_configs=self.apps.get_app_configs()),
[
Error(
"index name 'foo' is not unique among models: "
"check_framework.Model1, check_framework.Model2.",
id="models.E030",
),
],
)
def test_collision_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
indexes = [models.Index(fields=["id"], name="foo")]
abstract = True
class Model1(AbstractModel):
pass
class Model2(AbstractModel):
pass
self.assertEqual(
checks.run_checks(app_configs=self.apps.get_app_configs()),
[
Error(
"index name 'foo' is not unique among models: "
"check_framework.Model1, check_framework.Model2.",
id="models.E030",
),
],
)
def test_no_collision_abstract_model_interpolation(self):
class AbstractModel(models.Model):
name = models.CharField(max_length=20)
class Meta:
indexes = [
models.Index(fields=["name"], name="%(app_label)s_%(class)s_foo")
]
abstract = True
class Model1(AbstractModel):
pass
class Model2(AbstractModel):
pass
self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [])
@modify_settings(INSTALLED_APPS={"append": "basic"})
@isolate_apps("basic", "check_framework", kwarg_name="apps")
def test_collision_across_apps(self, apps):
index = models.Index(fields=["id"], name="foo")
class Model1(models.Model):
class Meta:
app_label = "basic"
indexes = [index]
class Model2(models.Model):
class Meta:
app_label = "check_framework"
indexes = [index]
self.assertEqual(
checks.run_checks(app_configs=apps.get_app_configs()),
[
Error(
"index name 'foo' is not unique among models: basic.Model1, "
"check_framework.Model2.",
id="models.E030",
),
],
)
@modify_settings(INSTALLED_APPS={"append": "basic"})
@isolate_apps("basic", "check_framework", kwarg_name="apps")
def test_no_collision_across_apps_interpolation(self, apps):
index = models.Index(fields=["id"], name="%(app_label)s_%(class)s_foo")
class Model1(models.Model):
class Meta:
app_label = "basic"
constraints = [index]
class Model2(models.Model):
class Meta:
app_label = "check_framework"
constraints = [index]
self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [])
@isolate_apps("check_framework", attr_name="apps")
@override_system_checks([checks.model_checks.check_all_models])
@skipUnlessDBFeature("supports_table_check_constraints")
| IndexNameTests |
python | simonw__datasette | datasette/permissions.py | {
"start": 293,
"end": 847
} | class ____:
"""Context manager to temporarily skip permission checks.
This is not a stable API and may change in future releases.
Usage:
with SkipPermissions():
# Permission checks are skipped within this block
response = await datasette.client.get("/protected")
"""
def __enter__(self):
self.token = _skip_permission_checks.set(True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
_skip_permission_checks.reset(self.token)
return False
| SkipPermissions |
python | sphinx-doc__sphinx | tests/roots/test-add_enumerable_node/enumerable_node.py | {
"start": 72,
"end": 242
} | class ____(nodes.figure):
pass
def visit_my_figure(self, node):
self.visit_figure(node)
def depart_my_figure(self, node):
self.depart_figure(node)
| my_figure |
python | prabhupant__python-ds | data_structures/graphs/cycle_in_undirected_graph_iterative.py | {
"start": 654,
"end": 1677
} | class ____:
def __init__(self, vertices):
self.graph = defaultdict(list)
self.vertices = vertices
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def dfs(self):
visited = [False] * self.vertices
parent = [-1] * self.vertices
stack = []
for v in range(self.vertices):
if visited[v] == False:
visited[v] = True
stack.append(v)
while stack:
s = stack.pop()
for i in self.graph[s]:
if visited[i] == False:
parent[i] = s
stack.append(i)
visited[i] = True
elif parent[s] != i:
return "Contains Cycle"
return "No cycle"
g = Graph(5)
g.add_edge(1, 0)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(0, 3)
g.add_edge(3, 4)
print(g.dfs())
| Graph |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 9161,
"end": 9334
} | class ____(A):
# This should generate error because list[int] is narrower
# than Iterable[int].
def test(self, t: list[int]) -> Sequence[str]: ...
| NarrowerArgument |
python | google__python-fire | fire/test_components.py | {
"start": 5219,
"end": 5434
} | class ____:
def __eq__(self, other):
raise ValueError('Instances of this class cannot be compared.')
def __ne__(self, other):
raise ValueError('Instances of this class cannot be compared.')
| NonComparable |
python | pytorch__pytorch | test/mobile/model_test/math_ops.py | {
"start": 13775,
"end": 14384
} | class ____(torch.nn.Module):
def forward(self):
return self.spectral_ops()
def spectral_ops(self):
a = torch.randn(10)
b = torch.randn(10, 8, 4, 2)
return len(
torch.stft(a, 8),
torch.stft(a, torch.tensor(8)),
torch.istft(b, 8),
torch.bartlett_window(2, dtype=torch.float),
torch.blackman_window(2, dtype=torch.float),
torch.hamming_window(4, dtype=torch.float),
torch.hann_window(4, dtype=torch.float),
torch.kaiser_window(4, dtype=torch.float),
)
| SpectralOpsModule |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_bytes_returned.py | {
"start": 891,
"end": 1075
} | class ____:
""" __bytes__ returns node which does not have 'value' in AST """
def __bytes__(self): # [invalid-bytes-returned]
return lambda: b"some bytes"
| ThirdBadBytes |
python | numba__numba | numba/core/imputils.py | {
"start": 275,
"end": 3816
} | class ____(object):
"""
A registry of function and attribute implementations.
"""
def __init__(self, name='unspecified'):
self.name = name
self.functions = []
self.getattrs = []
self.setattrs = []
self.casts = []
self.constants = []
def lower(self, func, *argtys):
"""
Decorate an implementation of *func* for the given argument types.
*func* may be an actual global function object, or any
pseudo-function supported by Numba, such as "getitem".
The decorated implementation has the signature
(context, builder, sig, args).
"""
def decorate(impl):
self.functions.append((impl, func, argtys))
return impl
return decorate
def _decorate_attr(self, impl, ty, attr, impl_list, decorator):
real_impl = decorator(impl, ty, attr)
impl_list.append((real_impl, attr, real_impl.signature))
return impl
def lower_getattr(self, ty, attr):
"""
Decorate an implementation of __getattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, typ, val).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.getattrs,
_decorate_getattr)
return decorate
def lower_getattr_generic(self, ty):
"""
Decorate the fallback implementation of __getattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, typ, val, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_getattr().
"""
return self.lower_getattr(ty, None)
def lower_setattr(self, ty, attr):
"""
Decorate an implementation of __setattr__ for type *ty* and
the attribute *attr*.
The decorated implementation will have the signature
(context, builder, sig, args).
"""
def decorate(impl):
return self._decorate_attr(impl, ty, attr, self.setattrs,
_decorate_setattr)
return decorate
def lower_setattr_generic(self, ty):
"""
Decorate the fallback implementation of __setattr__ for type *ty*.
The decorated implementation will have the signature
(context, builder, sig, args, attr). The implementation is
called for attributes which haven't been explicitly registered
with lower_setattr().
"""
return self.lower_setattr(ty, None)
def lower_cast(self, fromty, toty):
"""
Decorate the implementation of implicit conversion between
*fromty* and *toty*.
The decorated implementation will have the signature
(context, builder, fromty, toty, val).
"""
def decorate(impl):
self.casts.append((impl, (fromty, toty)))
return impl
return decorate
def lower_constant(self, ty):
"""
Decorate the implementation for creating a constant of type *ty*.
The decorated implementation will have the signature
(context, builder, ty, pyval).
"""
def decorate(impl):
self.constants.append((impl, (ty,)))
return impl
return decorate
def __repr__(self):
return f"Lowering Registry<{self.name}>"
| Registry |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/key_binding/key_bindings.py | {
"start": 16403,
"end": 18132
} | class ____(_Proxy):
"""
Wraps around a `KeyBindings`. Disable/enable all the key bindings according to
the given (additional) filter.::
@Condition
def setting_is_true():
return True # or False
registry = ConditionalKeyBindings(key_bindings, setting_is_true)
When new key bindings are added to this object. They are also
enable/disabled according to the given `filter`.
:param registries: List of :class:`.KeyBindings` objects.
:param filter: :class:`~prompt_toolkit.filters.Filter` object.
"""
def __init__(
self, key_bindings: KeyBindingsBase, filter: FilterOrBool = True
) -> None:
_Proxy.__init__(self)
self.key_bindings = key_bindings
self.filter = to_filter(filter)
def _update_cache(self) -> None:
"If the original key bindings was changed. Update our copy version."
expected_version = self.key_bindings._version
if self._last_version != expected_version:
bindings2 = KeyBindings()
# Copy all bindings from `self.key_bindings`, adding our condition.
for b in self.key_bindings.bindings:
bindings2.bindings.append(
Binding(
keys=b.keys,
handler=b.handler,
filter=self.filter & b.filter,
eager=b.eager,
is_global=b.is_global,
save_before=b.save_before,
record_in_macro=b.record_in_macro,
)
)
self._bindings2 = bindings2
self._last_version = expected_version
| ConditionalKeyBindings |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 3128,
"end": 4224
} | class ____(BaseIO):
fname = "__test__.csv"
params = ([1000, 10000], ["D", "h"])
param_names = ["nobs", "freq"]
def setup(self, nobs, freq):
rng = period_range(start="2000-01-01", periods=nobs, freq=freq)
self.data = DataFrame(rng)
if freq == "D":
self.default_fmt = "%Y-%m-%d"
elif freq == "h":
self.default_fmt = "%Y-%m-%d %H:00"
def time_frame_period_formatting_default(self, nobs, freq):
self.data.to_csv(self.fname)
def time_frame_period_formatting_default_explicit(self, nobs, freq):
self.data.to_csv(self.fname, date_format=self.default_fmt)
def time_frame_period_formatting(self, nobs, freq):
# Nb: `date_format` is not actually taken into account here today, so the
# performance is currently identical to `time_frame_period_formatting_default`
# above. This timer is therefore expected to degrade when GH#51621 is fixed.
# (Remove this comment when GH#51621 is fixed.)
self.data.to_csv(self.fname, date_format="%Y-%m-%d___%H:%M:%S")
| ToCSVPeriod |
python | django-haystack__django-haystack | test_haystack/test_query.py | {
"start": 36829,
"end": 38021
} | class ____(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.cpkmmsi = CharPKMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.cpkmmsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
self.msqs = SearchQuerySet()
# Stow.
reset_search_queries()
def tearDown(self):
# Restore.
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_pickling(self):
results = self.msqs.all()
for res in results:
# Make sure the cache is full.
pass
in_a_pickle = pickle.dumps(results)
like_a_cuke = pickle.loads(in_a_pickle)
self.assertEqual(len(like_a_cuke), len(results))
self.assertEqual(like_a_cuke[0].id, results[0].id)
| PickleSearchQuerySetTestCase |
python | pydantic__pydantic | pydantic/mypy.py | {
"start": 3266,
"end": 5666
} | class ____(Plugin):
"""The Pydantic mypy plugin."""
def __init__(self, options: Options) -> None:
self.plugin_config = PydanticPluginConfig(options)
self._plugin_data = self.plugin_config.to_data()
super().__init__(options)
def get_base_class_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
"""Update Pydantic model class."""
sym = self.lookup_fully_qualified(fullname)
if sym and isinstance(sym.node, TypeInfo): # pragma: no branch
# No branching may occur if the mypy cache has not been cleared
if sym.node.has_base(BASEMODEL_FULLNAME):
return self._pydantic_model_class_maker_callback
return None
def get_metaclass_hook(self, fullname: str) -> Callable[[ClassDefContext], None] | None:
"""Update Pydantic `ModelMetaclass` definition."""
if fullname == MODEL_METACLASS_FULLNAME:
return self._pydantic_model_metaclass_marker_callback
return None
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
"""Adjust return type of `from_orm` method call."""
if fullname.endswith('.from_orm'):
return from_attributes_callback
return None
def report_config_data(self, ctx: ReportConfigContext) -> dict[str, Any]:
"""Return all plugin config data.
Used by mypy to determine if cache needs to be discarded.
"""
return self._plugin_data
def _pydantic_model_class_maker_callback(self, ctx: ClassDefContext) -> None:
transformer = PydanticModelTransformer(ctx.cls, ctx.reason, ctx.api, self.plugin_config)
transformer.transform()
def _pydantic_model_metaclass_marker_callback(self, ctx: ClassDefContext) -> None:
"""Reset dataclass_transform_spec attribute of ModelMetaclass.
Let the plugin handle it. This behavior can be disabled
if 'debug_dataclass_transform' is set to True', for testing purposes.
"""
if self.plugin_config.debug_dataclass_transform:
return
info_metaclass = ctx.cls.info.declared_metaclass
assert info_metaclass, "callback not passed from 'get_metaclass_hook'"
if getattr(info_metaclass.type, 'dataclass_transform_spec', None):
info_metaclass.type.dataclass_transform_spec = None
| PydanticPlugin |
python | catalyst-team__catalyst | catalyst/contrib/layers/pooling.py | {
"start": 5023,
"end": 5861
} | class ____(nn.Module):
"""@TODO: Docs (add `Example`). Contribution is welcome."""
def __init__(self, in_features, activation_fn="Sigmoid"):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
self.avg = GlobalAvgPool2d()
self.max = GlobalMaxPool2d()
self.attn = GlobalAttnPool2d(in_features, activation_fn)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
return torch.cat([self.avg(x), self.max(x), self.attn(x)], 1)
@staticmethod
def out_features(in_features):
"""Returns number of channels produced by the pooling.
Args:
in_features: number of channels in the input sample
Returns:
number of output features
"""
return in_features * 3
| GlobalConcatAttnPool2d |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 17455,
"end": 18871
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "UnsupportedOperationError"
def __init__(self, message: Optional[str] = None):
super().__init__()
self.message = check.str_param(message, "message") or "Unsupported operation."
types = [
GrapheneAssetNotFoundError,
GrapheneConflictingExecutionParamsError,
GrapheneConfigTypeNotFoundError,
GrapheneDagsterTypeNotFoundError,
GrapheneError,
GrapheneInvalidOutputError,
GrapheneInvalidPipelineRunsFilterError,
GrapheneInvalidStepError,
GrapheneInvalidSubsetError,
GrapheneModeNotFoundError,
GrapheneNoModeProvidedError,
GraphenePartitionSetNotFoundError,
GraphenePipelineNotFoundError,
GraphenePipelineRunConflict,
GrapheneRunConflict,
GraphenePipelineRunNotFoundError,
GraphenePipelineSnapshotNotFoundError,
GraphenePresetNotFoundError,
GraphenePythonError,
GrapheneUnauthorizedError,
GrapheneReloadNotSupported,
GrapheneRepositoryLocationNotFound,
GrapheneRepositoryNotFoundError,
GrapheneResourceNotFoundError,
GrapheneRunGroupNotFoundError,
GrapheneRunNotFoundError,
GrapheneScheduleNotFoundError,
GrapheneSchedulerNotDefinedError,
GrapheneSensorNotFoundError,
GrapheneUnsupportedOperationError,
GrapheneDuplicateDynamicPartitionError,
]
| GrapheneUnsupportedOperationError |
python | ray-project__ray | python/ray/util/collective/tests/single_node_cpu_tests/test_gloo_group_isolation.py | {
"start": 232,
"end": 1724
} | class ____:
def __init__(self):
pass
def init_gloo_group(
self, world_size: int, rank: int, group_name: str, gloo_timeout: int = 30000
):
col.init_collective_group(
world_size, rank, Backend.GLOO, group_name, gloo_timeout
)
return True
def get_gloo_timeout(self, group_name: str) -> int:
g = col.get_group_handle(group_name)
# Check if the group is initialized correctly
assert isinstance(g, GLOOGroup)
return g._gloo_context.getTimeout()
def test_two_groups_in_one_cluster(ray_start_single_node):
name1 = "name_1"
name2 = "name_2"
time1 = 40000
time2 = 60000
w1 = Worker.remote()
ret1 = w1.init_gloo_group.remote(1, 0, name1, time1)
w2 = Worker.remote()
ret2 = w2.init_gloo_group.remote(1, 0, name2, time2)
assert ray.get(ret1)
assert ray.get(ret2)
assert ray.get(w1.get_gloo_timeout.remote(name1)) == time1
assert ray.get(w2.get_gloo_timeout.remote(name2)) == time2
def test_failure_when_initializing(shutdown_only):
# job1
ray.init()
w1 = Worker.remote()
ret1 = w1.init_gloo_group.remote(2, 0, "name_1")
ray.wait([ret1], timeout=1)
time.sleep(5)
ray.shutdown()
# job2
ray.init()
w2 = Worker.remote()
ret2 = w2.init_gloo_group.remote(1, 0, "name_1")
assert ray.get(ret2)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-x", __file__]))
| Worker |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/assets.py | {
"start": 442,
"end": 651
} | class ____(graphene.ObjectType):
assets = non_null_list(GrapheneAssetRecord)
cursor = graphene.Field(graphene.String)
class Meta:
name = "AssetRecordConnection"
| GrapheneAssetRecordConnection |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 13072,
"end": 13342
} | class ____(Pattern): # pragma: no cover
""" Return a simple text of `group(2)` of a Pattern. """
def handleMatch(self, m: re.Match[str]) -> str:
""" Return string content of `group(2)` of a matching pattern. """
return m.group(2)
| SimpleTextPattern |
python | scipy__scipy | scipy/_lib/_array_api.py | {
"start": 24837,
"end": 37757
} | class ____:
cpu: bool | None # None if not applicable
gpu: bool | None
warnings: list[str] = dataclasses.field(default_factory=list)
def _render(self, value):
if value is None:
return "n/a"
if not value:
return "⛔"
if self.warnings:
res = "⚠️ " + '; '.join(self.warnings)
assert len(res) <= 20, "Warnings too long"
return res
return "✅"
def __str__(self):
cpu = self._render(self.cpu)
gpu = self._render(self.gpu)
return f"{cpu:20} {gpu:20}"
def _make_sphinx_capabilities(
# lists of tuples [(module name, reason), ...]
skip_backends=(), xfail_backends=(),
# @pytest.mark.skip/xfail_xp_backends kwargs
cpu_only=False, np_only=False, out_of_scope=False, exceptions=(),
# xpx.lazy_xp_backends kwargs
allow_dask_compute=False, jax_jit=True,
# list of tuples [(module name, reason), ...]
warnings = (),
# unused in documentation
reason=None,
):
if out_of_scope:
return {"out_of_scope": True}
exceptions = set(exceptions)
# Default capabilities
capabilities = {
"numpy": _XPSphinxCapability(cpu=True, gpu=None),
"array_api_strict": _XPSphinxCapability(cpu=True, gpu=None),
"cupy": _XPSphinxCapability(cpu=None, gpu=True),
"torch": _XPSphinxCapability(cpu=True, gpu=True),
"jax.numpy": _XPSphinxCapability(cpu=True, gpu=True,
warnings=[] if jax_jit else ["no JIT"]),
# Note: Dask+CuPy is currently untested and unsupported
"dask.array": _XPSphinxCapability(cpu=True, gpu=None,
warnings=["computes graph"] if allow_dask_compute else []),
}
# documentation doesn't display the reason
for module, _ in list(skip_backends) + list(xfail_backends):
backend = capabilities[module]
if backend.cpu is not None:
backend.cpu = False
if backend.gpu is not None:
backend.gpu = False
for module, backend in capabilities.items():
if np_only and module not in exceptions | {"numpy"}:
if backend.cpu is not None:
backend.cpu = False
if backend.gpu is not None:
backend.gpu = False
elif cpu_only and module not in exceptions and backend.gpu is not None:
backend.gpu = False
for module, warning in warnings:
backend = capabilities[module]
backend.warnings.append(warning)
return capabilities
def _make_capabilities_note(fun_name, capabilities, extra_note=None):
if "out_of_scope" in capabilities:
# It will be better to link to a section of the dev-arrayapi docs
# that explains what is and isn't in-scope, but such a section
# doesn't exist yet. Using :ref:`dev-arrayapi` as a placeholder.
note = f"""
**Array API Standard Support**
`{fun_name}` is not in-scope for support of Python Array API Standard compatible
backends other than NumPy.
See :ref:`dev-arrayapi` for more information.
"""
return textwrap.dedent(note)
# Note: deliberately not documenting array-api-strict
note = f"""
**Array API Standard Support**
`{fun_name}` has experimental support for Python Array API Standard compatible
backends in addition to NumPy. Please consider testing these features
by setting an environment variable ``SCIPY_ARRAY_API=1`` and providing
CuPy, PyTorch, JAX, or Dask arrays as array arguments. The following
combinations of backend and device (or other capability) are supported.
==================== ==================== ====================
Library CPU GPU
==================== ==================== ====================
NumPy {capabilities['numpy'] }
CuPy {capabilities['cupy'] }
PyTorch {capabilities['torch'] }
JAX {capabilities['jax.numpy'] }
Dask {capabilities['dask.array'] }
==================== ==================== ====================
""" + (extra_note or "") + " See :ref:`dev-arrayapi` for more information."
return textwrap.dedent(note)
def xp_capabilities(
*,
# Alternative capabilities table.
# Used only for testing this decorator.
capabilities_table=None,
# Generate pytest.mark.skip/xfail_xp_backends.
# See documentation in conftest.py.
# lists of tuples [(module name, reason), ...]
skip_backends=(), xfail_backends=(),
cpu_only=False, np_only=False, reason=None,
out_of_scope=False, exceptions=(),
# lists of tuples [(module name, reason), ...]
warnings=(),
# xpx.testing.lazy_xp_function kwargs.
# Refer to array-api-extra documentation.
allow_dask_compute=False, jax_jit=True,
# Extra note to inject into the docstring
extra_note=None,
):
"""Decorator for a function that states its support among various
Array API compatible backends.
This decorator has two effects:
1. It allows tagging tests with ``@make_xp_test_case`` or
``make_xp_pytest_param`` (see below) to automatically generate
SKIP/XFAIL markers and perform additional backend-specific
testing, such as extra validation for Dask and JAX;
2. It automatically adds a note to the function's docstring, containing
a table matching what has been tested.
See Also
--------
make_xp_test_case
make_xp_pytest_param
array_api_extra.testing.lazy_xp_function
"""
capabilities_table = (xp_capabilities_table if capabilities_table is None
else capabilities_table)
if out_of_scope:
np_only = True
capabilities = dict(
skip_backends=skip_backends,
xfail_backends=xfail_backends,
cpu_only=cpu_only,
np_only=np_only,
out_of_scope=out_of_scope,
reason=reason,
exceptions=exceptions,
allow_dask_compute=allow_dask_compute,
jax_jit=jax_jit,
warnings=warnings,
)
sphinx_capabilities = _make_sphinx_capabilities(**capabilities)
def decorator(f):
# Don't use a wrapper, as in some cases @xp_capabilities is
# applied to a ufunc
capabilities_table[f] = capabilities
note = _make_capabilities_note(f.__name__, sphinx_capabilities, extra_note)
doc = FunctionDoc(f)
doc['Notes'].append(note)
doc = str(doc).split("\n", 1)[1].lstrip(" \n") # remove signature
try:
f.__doc__ = doc
except AttributeError:
# Can't update __doc__ on ufuncs if SciPy
# was compiled against NumPy < 2.2.
pass
return f
return decorator
def make_xp_test_case(*funcs, capabilities_table=None):
capabilities_table = (xp_capabilities_table if capabilities_table is None
else capabilities_table)
"""Generate pytest decorator for a test function that tests functionality
of one or more Array API compatible functions.
Read the parameters of the ``@xp_capabilities`` decorator applied to the
listed functions and:
- Generate the ``@pytest.mark.skip_xp_backends`` and
``@pytest.mark.xfail_xp_backends`` decorators
for the decorated test function
- Tag the function with `xpx.testing.lazy_xp_function`
Example::
@make_xp_test_case(f1)
def test_f1(xp):
...
@make_xp_test_case(f2)
def test_f2(xp):
...
@make_xp_test_case(f1, f2)
def test_f1_and_f2(xp):
...
The above is equivalent to::
@pytest.mark.skip_xp_backends(...)
@pytest.mark.skip_xp_backends(...)
@pytest.mark.xfail_xp_backends(...)
@pytest.mark.xfail_xp_backends(...)
def test_f1(xp):
...
etc., where the arguments of ``skip_xp_backends`` and ``xfail_xp_backends`` are
determined by the ``@xp_capabilities`` decorator applied to the functions.
See Also
--------
xp_capabilities
make_xp_pytest_marks
make_xp_pytest_param
array_api_extra.testing.lazy_xp_function
"""
marks = make_xp_pytest_marks(*funcs, capabilities_table=capabilities_table)
return lambda func: functools.reduce(lambda f, g: g(f), marks, func)
def make_xp_pytest_param(func, *args, capabilities_table=None):
"""Variant of ``make_xp_test_case`` that returns a pytest.param for a function,
with all necessary skip_xp_backends and xfail_xp_backends marks applied::
@pytest.mark.parametrize(
"func", [make_xp_pytest_param(f1), make_xp_pytest_param(f2)]
)
def test(func, xp):
...
The above is equivalent to::
@pytest.mark.parametrize(
"func", [
pytest.param(f1, marks=[
pytest.mark.skip_xp_backends(...),
pytest.mark.xfail_xp_backends(...), ...]),
pytest.param(f2, marks=[
pytest.mark.skip_xp_backends(...),
pytest.mark.xfail_xp_backends(...), ...]),
)
def test(func, xp):
...
Parameters
----------
func : Callable
Function to be tested. It must be decorated with ``@xp_capabilities``.
*args : Any, optional
Extra pytest parameters for the use case, e.g.::
@pytest.mark.parametrize("func,verb", [
make_xp_pytest_param(f1, "hello"),
make_xp_pytest_param(f2, "world")])
def test(func, verb, xp):
# iterates on (func=f1, verb="hello")
# and (func=f2, verb="world")
See Also
--------
xp_capabilities
make_xp_test_case
make_xp_pytest_marks
array_api_extra.testing.lazy_xp_function
"""
import pytest
marks = make_xp_pytest_marks(func, capabilities_table=capabilities_table)
return pytest.param(func, *args, marks=marks, id=func.__name__)
def make_xp_pytest_marks(*funcs, capabilities_table=None):
"""Variant of ``make_xp_test_case`` that returns a list of pytest marks,
which can be used with the module-level `pytestmark = ...` variable::
pytestmark = make_xp_pytest_marks(f1, f2)
def test(xp):
...
In this example, the whole test module is dedicated to testing `f1` or `f2`,
and the two functions have the same capabilities, so it's unnecessary to
cherry-pick which test tests which function.
The above is equivalent to::
pytestmark = [
pytest.mark.skip_xp_backends(...),
pytest.mark.xfail_xp_backends(...), ...]),
]
def test(xp):
...
See Also
--------
xp_capabilities
make_xp_test_case
make_xp_pytest_param
array_api_extra.testing.lazy_xp_function
"""
capabilities_table = (xp_capabilities_table if capabilities_table is None
else capabilities_table)
import pytest
marks = []
for func in funcs:
capabilities = capabilities_table[func]
exceptions = capabilities['exceptions']
reason = capabilities['reason']
if capabilities['cpu_only']:
marks.append(pytest.mark.skip_xp_backends(
cpu_only=True, exceptions=exceptions, reason=reason))
if capabilities['np_only']:
marks.append(pytest.mark.skip_xp_backends(
np_only=True, exceptions=exceptions, reason=reason))
for mod_name, reason in capabilities['skip_backends']:
marks.append(pytest.mark.skip_xp_backends(mod_name, reason=reason))
for mod_name, reason in capabilities['xfail_backends']:
marks.append(pytest.mark.xfail_xp_backends(mod_name, reason=reason))
lazy_kwargs = {k: capabilities[k]
for k in ('allow_dask_compute', 'jax_jit')}
lazy_xp_function(func, **lazy_kwargs)
return marks
# Is it OK to have a dictionary that is mutated (once upon import) in many places?
xp_capabilities_table = {} # type: ignore[var-annotated]
def xp_device_type(a: Array) -> Literal["cpu", "cuda", None]:
if is_numpy_array(a):
return "cpu"
if is_cupy_array(a):
return "cuda"
if is_torch_array(a):
# TODO this can return other backends e.g. tpu but they're unsupported in scipy
return a.device.type
if is_jax_array(a):
# TODO this can return other backends e.g. tpu but they're unsupported in scipy
return "cuda" if (p := a.device.platform) == "gpu" else p
if is_dask_array(a):
return xp_device_type(a._meta)
# array-api-strict is a stand-in for unknown libraries; don't special-case it
return None
| _XPSphinxCapability |
python | PrefectHQ__prefect | src/prefect/server/database/orm_models.py | {
"start": 37323,
"end": 37590
} | class ____(Base):
"""SQLAlchemy model of a saved search."""
name: Mapped[str]
filters: Mapped[list[dict[str, Any]]] = mapped_column(
JSON, server_default="[]", default=list
)
__table_args__: Any = (sa.UniqueConstraint("name"),)
| SavedSearch |
python | pydata__xarray | xarray/tests/test_namedarray.py | {
"start": 5971,
"end": 24338
} | class ____(NamedArraySubclassobjects):
def cls(self, *args: Any, **kwargs: Any) -> NamedArray[Any, Any]:
return NamedArray(*args, **kwargs)
@pytest.fixture
def target(self, data: np.ndarray[Any, Any]) -> NamedArray[Any, Any]:
return NamedArray(["x", "y"], data)
@pytest.mark.parametrize(
"expected",
[
np.array([1, 2], dtype=np.dtype(np.int8)),
pytest.param(
[1, 2],
marks=pytest.mark.xfail(
reason="NamedArray only supports array-like objects"
),
),
],
)
def test_init(self, expected: Any) -> None:
super().test_init(expected)
@pytest.mark.parametrize(
"dims, data, expected, raise_error",
[
(("x",), [1, 2, 3], np.array([1, 2, 3]), False),
((1,), np.array([4, 5, 6]), np.array([4, 5, 6]), False),
((), 2, np.array(2), False),
# Fail:
(
("x",),
NamedArray("time", np.array([1, 2, 3], dtype=np.dtype(np.int64))),
np.array([1, 2, 3]),
True,
),
],
)
def test_from_array(
self,
dims: _DimsLike,
data: ArrayLike,
expected: np.ndarray[Any, Any],
raise_error: bool,
) -> None:
actual: NamedArray[Any, Any]
if raise_error:
with pytest.raises(TypeError, match="already a Named array"):
actual = from_array(dims, data)
# Named arrays are not allowed:
from_array(actual) # type: ignore[call-overload]
else:
actual = from_array(dims, data)
assert np.array_equal(np.asarray(actual.data), expected)
def test_from_array_with_masked_array(self) -> None:
masked_array: np.ndarray[Any, np.dtype[np.generic]]
masked_array = np.ma.array([1, 2, 3], mask=[False, True, False]) # type: ignore[no-untyped-call]
with pytest.raises(NotImplementedError):
from_array(("x",), masked_array)
def test_from_array_with_0d_object(self) -> None:
data = np.empty((), dtype=object)
data[()] = (10, 12, 12)
narr = from_array((), data)
np.array_equal(np.asarray(narr.data), data)
# TODO: Make xr.core.indexing.ExplicitlyIndexed pass as a subclass of_arrayfunction_or_api
# and remove this test.
def test_from_array_with_explicitly_indexed(
self, random_inputs: np.ndarray[Any, Any]
) -> None:
array: CustomArray[Any, Any]
array = CustomArray(random_inputs)
output: NamedArray[Any, Any]
output = from_array(("x", "y", "z"), array)
assert isinstance(output.data, np.ndarray)
array2: CustomArrayIndexable[Any, Any]
array2 = CustomArrayIndexable(random_inputs)
output2: NamedArray[Any, Any]
output2 = from_array(("x", "y", "z"), array2)
assert isinstance(output2.data, CustomArrayIndexable)
def test_real_and_imag(self) -> None:
expected_real: np.ndarray[Any, np.dtype[np.float64]]
expected_real = np.arange(3, dtype=np.float64)
expected_imag: np.ndarray[Any, np.dtype[np.float64]]
expected_imag = -np.arange(3, dtype=np.float64)
arr: np.ndarray[Any, np.dtype[np.complex128]]
arr = expected_real + 1j * expected_imag
named_array: NamedArray[Any, np.dtype[np.complex128]]
named_array = NamedArray(["x"], arr)
actual_real: duckarray[Any, np.dtype[np.float64]] = named_array.real.data
assert np.array_equal(np.asarray(actual_real), expected_real)
assert actual_real.dtype == expected_real.dtype
actual_imag: duckarray[Any, np.dtype[np.float64]] = named_array.imag.data
assert np.array_equal(np.asarray(actual_imag), expected_imag)
assert actual_imag.dtype == expected_imag.dtype
# Additional tests as per your original class-based code
@pytest.mark.parametrize(
"data, dtype",
[
("foo", np.dtype("U3")),
(b"foo", np.dtype("S3")),
],
)
def test_from_array_0d_string(self, data: Any, dtype: DTypeLike | None) -> None:
named_array: NamedArray[Any, Any]
named_array = from_array([], data)
assert named_array.data == data
assert named_array.dims == ()
assert named_array.sizes == {}
assert named_array.attrs == {}
assert named_array.ndim == 0
assert named_array.size == 1
assert named_array.dtype == dtype
def test_from_array_0d_object(self) -> None:
named_array: NamedArray[Any, Any]
named_array = from_array([], (10, 12, 12))
expected_data = np.empty((), dtype=object)
expected_data[()] = (10, 12, 12)
assert np.array_equal(np.asarray(named_array.data), expected_data)
assert named_array.dims == ()
assert named_array.sizes == {}
assert named_array.attrs == {}
assert named_array.ndim == 0
assert named_array.size == 1
assert named_array.dtype == np.dtype("O")
def test_from_array_0d_datetime(self) -> None:
named_array: NamedArray[Any, Any]
named_array = from_array([], np.datetime64("2000-01-01"))
assert named_array.dtype == np.dtype("datetime64[D]")
@pytest.mark.parametrize(
"timedelta, expected_dtype",
[
(np.timedelta64(1, "D"), np.dtype("timedelta64[D]")),
(np.timedelta64(1, "s"), np.dtype("timedelta64[s]")),
(np.timedelta64(1, "m"), np.dtype("timedelta64[m]")),
(np.timedelta64(1, "h"), np.dtype("timedelta64[h]")),
(np.timedelta64(1, "us"), np.dtype("timedelta64[us]")),
(np.timedelta64(1, "ns"), np.dtype("timedelta64[ns]")),
(np.timedelta64(1, "ps"), np.dtype("timedelta64[ps]")),
(np.timedelta64(1, "fs"), np.dtype("timedelta64[fs]")),
(np.timedelta64(1, "as"), np.dtype("timedelta64[as]")),
],
)
def test_from_array_0d_timedelta(
self, timedelta: np.timedelta64, expected_dtype: np.dtype[np.timedelta64]
) -> None:
named_array: NamedArray[Any, Any]
named_array = from_array([], timedelta)
assert named_array.dtype == expected_dtype
assert named_array.data == timedelta
@pytest.mark.parametrize(
"dims, data_shape, new_dims, raises",
[
(["x", "y", "z"], (2, 3, 4), ["a", "b", "c"], False),
(["x", "y", "z"], (2, 3, 4), ["a", "b"], True),
(["x", "y", "z"], (2, 4, 5), ["a", "b", "c", "d"], True),
([], [], (), False),
([], [], ("x",), True),
],
)
def test_dims_setter(
self, dims: Any, data_shape: Any, new_dims: Any, raises: bool
) -> None:
named_array: NamedArray[Any, Any]
named_array = NamedArray(dims, np.asarray(np.random.random(data_shape)))
assert named_array.dims == tuple(dims)
if raises:
with pytest.raises(ValueError):
named_array.dims = new_dims
else:
named_array.dims = new_dims
assert named_array.dims == tuple(new_dims)
def test_duck_array_class(self) -> None:
numpy_a: NDArray[np.int64]
numpy_a = np.array([2.1, 4], dtype=np.dtype(np.int64))
check_duck_array_typevar(numpy_a)
masked_a: np.ma.MaskedArray[Any, np.dtype[np.int64]]
masked_a = np.ma.asarray([2.1, 4], dtype=np.dtype(np.int64)) # type: ignore[no-untyped-call]
check_duck_array_typevar(masked_a) # type: ignore[arg-type] # MaskedArray not in duckarray union
custom_a: CustomArrayIndexable[Any, np.dtype[np.int64]]
custom_a = CustomArrayIndexable(numpy_a)
check_duck_array_typevar(custom_a)
def test_duck_array_class_array_api(self) -> None:
# Test numpy's array api:
nxp = pytest.importorskip("array_api_strict", minversion="1.0")
# TODO: nxp doesn't use dtype typevars, so can only use Any for the moment:
arrayapi_a: duckarray[Any, Any] # duckarray[Any, np.dtype[np.int64]]
arrayapi_a = nxp.asarray([2.1, 4], dtype=nxp.int64)
check_duck_array_typevar(arrayapi_a)
def test_new_namedarray(self) -> None:
dtype_float = np.dtype(np.float32)
narr_float: NamedArray[Any, np.dtype[np.float32]]
narr_float = NamedArray(("x",), np.array([1.5, 3.2], dtype=dtype_float))
assert narr_float.dtype == dtype_float
dtype_int = np.dtype(np.int8)
narr_int: NamedArray[Any, np.dtype[np.int8]]
narr_int = narr_float._new(("x",), np.array([1, 3], dtype=dtype_int))
assert narr_int.dtype == dtype_int
class Variable(
NamedArray[_ShapeType_co, _DType_co], Generic[_ShapeType_co, _DType_co]
):
@overload
def _new(
self,
dims: _DimsLike | Default = ...,
data: duckarray[Any, _DType] = ...,
attrs: _AttrsLike | Default = ...,
) -> Variable[Any, _DType]: ...
@overload
def _new(
self,
dims: _DimsLike | Default = ...,
data: Default = ...,
attrs: _AttrsLike | Default = ...,
) -> Variable[_ShapeType_co, _DType_co]: ...
def _new(
self,
dims: _DimsLike | Default = _default,
data: duckarray[Any, _DType] | Default = _default,
attrs: _AttrsLike | Default = _default,
) -> Variable[Any, _DType] | Variable[_ShapeType_co, _DType_co]:
dims_ = copy.copy(self._dims) if dims is _default else dims
attrs_: Mapping[Any, Any] | None
if attrs is _default:
attrs_ = None if self._attrs is None else self._attrs.copy()
else:
attrs_ = attrs
if data is _default:
return type(self)(dims_, copy.copy(self._data), attrs_)
cls_ = cast("type[Variable[Any, _DType]]", type(self))
return cls_(dims_, data, attrs_)
var_float: Variable[Any, np.dtype[np.float32]]
var_float = Variable(("x",), np.array([1.5, 3.2], dtype=dtype_float))
assert var_float.dtype == dtype_float
var_int: Variable[Any, np.dtype[np.int8]]
var_int = var_float._new(("x",), np.array([1, 3], dtype=dtype_int))
assert var_int.dtype == dtype_int
def test_replace_namedarray(self) -> None:
dtype_float = np.dtype(np.float32)
np_val: np.ndarray[Any, np.dtype[np.float32]]
np_val = np.array([1.5, 3.2], dtype=dtype_float)
np_val2: np.ndarray[Any, np.dtype[np.float32]]
np_val2 = 2 * np_val
narr_float: NamedArray[Any, np.dtype[np.float32]]
narr_float = NamedArray(("x",), np_val)
assert narr_float.dtype == dtype_float
narr_float2: NamedArray[Any, np.dtype[np.float32]]
narr_float2 = NamedArray(("x",), np_val2)
assert narr_float2.dtype == dtype_float
class Variable(
NamedArray[_ShapeType_co, _DType_co], Generic[_ShapeType_co, _DType_co]
):
@overload
def _new(
self,
dims: _DimsLike | Default = ...,
data: duckarray[Any, _DType] = ...,
attrs: _AttrsLike | Default = ...,
) -> Variable[Any, _DType]: ...
@overload
def _new(
self,
dims: _DimsLike | Default = ...,
data: Default = ...,
attrs: _AttrsLike | Default = ...,
) -> Variable[_ShapeType_co, _DType_co]: ...
def _new(
self,
dims: _DimsLike | Default = _default,
data: duckarray[Any, _DType] | Default = _default,
attrs: _AttrsLike | Default = _default,
) -> Variable[Any, _DType] | Variable[_ShapeType_co, _DType_co]:
dims_ = copy.copy(self._dims) if dims is _default else dims
attrs_: Mapping[Any, Any] | None
if attrs is _default:
attrs_ = None if self._attrs is None else self._attrs.copy()
else:
attrs_ = attrs
if data is _default:
return type(self)(dims_, copy.copy(self._data), attrs_)
cls_ = cast("type[Variable[Any, _DType]]", type(self))
return cls_(dims_, data, attrs_)
var_float: Variable[Any, np.dtype[np.float32]]
var_float = Variable(("x",), np_val)
assert var_float.dtype == dtype_float
var_float2: Variable[Any, np.dtype[np.float32]]
var_float2 = var_float._replace(("x",), np_val2)
assert var_float2.dtype == dtype_float
@pytest.mark.parametrize(
"dim,expected_ndim,expected_shape,expected_dims",
[
(None, 3, (1, 2, 5), (None, "x", "y")),
(_default, 3, (1, 2, 5), ("dim_2", "x", "y")),
("z", 3, (1, 2, 5), ("z", "x", "y")),
],
)
def test_expand_dims(
self,
target: NamedArray[Any, np.dtype[np.float32]],
dim: _Dim | Default,
expected_ndim: int,
expected_shape: _ShapeLike,
expected_dims: _DimsLike,
) -> None:
result = target.expand_dims(dim=dim)
assert result.ndim == expected_ndim
assert result.shape == expected_shape
assert result.dims == expected_dims
@pytest.mark.parametrize(
"dims, expected_sizes",
[
((), {"y": 5, "x": 2}),
(["y", "x"], {"y": 5, "x": 2}),
(["y", ...], {"y": 5, "x": 2}),
],
)
def test_permute_dims(
self,
target: NamedArray[Any, np.dtype[np.float32]],
dims: _DimsLike,
expected_sizes: dict[_Dim, _IntOrUnknown],
) -> None:
actual = target.permute_dims(*dims)
assert actual.sizes == expected_sizes
def test_permute_dims_errors(
self,
target: NamedArray[Any, np.dtype[np.float32]],
) -> None:
with pytest.raises(ValueError, match=r"'y'.*permuted list"):
dims = ["y"]
target.permute_dims(*dims)
@pytest.mark.parametrize(
"broadcast_dims,expected_ndim",
[
({"x": 2, "y": 5}, 2),
({"x": 2, "y": 5, "z": 2}, 3),
({"w": 1, "x": 2, "y": 5}, 3),
],
)
def test_broadcast_to(
self,
target: NamedArray[Any, np.dtype[np.float32]],
broadcast_dims: Mapping[_Dim, int],
expected_ndim: int,
) -> None:
expand_dims = set(broadcast_dims.keys()) - set(target.dims)
# loop over expand_dims and call .expand_dims(dim=dim) in a loop
for dim in expand_dims:
target = target.expand_dims(dim=dim)
result = target.broadcast_to(broadcast_dims)
assert result.ndim == expected_ndim
assert result.sizes == broadcast_dims
def test_broadcast_to_errors(
self, target: NamedArray[Any, np.dtype[np.float32]]
) -> None:
with pytest.raises(
ValueError,
match=r"operands could not be broadcast together with remapped shapes",
):
target.broadcast_to({"x": 2, "y": 2})
with pytest.raises(ValueError, match=r"Cannot add new dimensions"):
target.broadcast_to({"x": 2, "y": 2, "z": 2})
def test_warn_on_repeated_dimension_names(self) -> None:
with pytest.warns(UserWarning, match="Duplicate dimension names"):
NamedArray(("x", "x"), np.arange(4).reshape(2, 2))
def test_aggregation(self) -> None:
x: NamedArray[Any, np.dtype[np.int64]]
x = NamedArray(("x", "y"), np.arange(4).reshape(2, 2))
result = x.sum()
assert isinstance(result.data, np.ndarray)
def test_repr() -> None:
x: NamedArray[Any, np.dtype[np.uint64]]
x = NamedArray(("x",), np.array([0], dtype=np.uint64))
# Reprs should not crash:
r = x.__repr__()
x._repr_html_()
# Basic comparison:
assert r == "<xarray.NamedArray (x: 1)> Size: 8B\narray([0], dtype=uint64)"
@pytest.mark.parametrize(
"input_array, expected_chunksize_faked, expected_dtype",
[
(np.arange(100).reshape(10, 10), 1024, np.int64),
(np.arange(100).reshape(10, 10).astype(np.float32), 1024, np.float32),
],
)
def test_fake_target_chunksize(
input_array: DuckArray[Any],
expected_chunksize_faked: int,
expected_dtype: DTypeLike,
) -> None:
"""
Check that `fake_target_chunksize` returns the expected chunksize and dtype.
- It pretends to dask we are chunking an array with an 8-byte dtype, ie. a float64.
As such, it will *double* the amount of memory a 4-byte dtype (like float32) would try to use,
fooling it into actually using the correct amount of memory. For object dtypes, which are
generally larger, it will reduce the effective dask configuration chunksize, reducing the size of
the arrays per chunk such that we get the same amount of memory used.
"""
target_chunksize = 1024
faked_chunksize, dtype = fake_target_chunksize(input_array, target_chunksize)
assert faked_chunksize == expected_chunksize_faked
assert dtype == expected_dtype
@requires_cftime
def test_fake_target_chunksize_cftime() -> None:
"""
Check that `fake_target_chunksize` returns the expected chunksize and dtype.
- It pretends to dask we are chunking an array with an 8-byte dtype, ie. a float64.
- This is the same as the above test, but specifically for a CFTime array case - split for testing reasons
"""
import cftime
target_chunksize = 1024
input_array = np.array(
[
cftime.Datetime360Day(2000, month, day, 0, 0, 0, 0)
for month in range(1, 11)
for day in range(1, 11)
],
dtype=object,
).reshape(10, 10)
faked_chunksize, dtype = fake_target_chunksize(input_array, target_chunksize) # type: ignore[arg-type,unused-ignore]
assert faked_chunksize == 73
assert dtype == np.float64
| TestNamedArray |
python | django__django | django/contrib/gis/gdal/geometries.py | {
"start": 26581,
"end": 26630
} | class ____(GeometryCollection):
pass
| MultiPoint |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles08.py | {
"start": 380,
"end": 4796
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for simple fills with a default solid pattern."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.add_format({"pattern": 1, "bg_color": "red", "bold": 1})
workbook.add_format({"bg_color": "red", "italic": 1})
workbook.add_format({"fg_color": "red", "underline": 1})
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="4">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<b/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<i/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
<font>
<u/>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="3">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFF0000"/>
<bgColor indexed="64"/>
</patternFill>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="4">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="0" fontId="1" fillId="2" borderId="0" xfId="0" applyFont="1" applyFill="1"/>
<xf numFmtId="0" fontId="2" fillId="2" borderId="0" xfId="0" applyFont="1" applyFill="1"/>
<xf numFmtId="0" fontId="3" fillId="2" borderId="0" xfId="0" applyFont="1" applyFill="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | anthropics__anthropic-sdk-python | tests/api_resources/test_completions.py | {
"start": 384,
"end": 4732
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create_overload_1(self, client: Anthropic) -> None:
completion = client.completions.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
)
assert_matches_type(Completion, completion, path=["response"])
@parametrize
def test_method_create_with_all_params_overload_1(self, client: Anthropic) -> None:
completion = client.completions.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
stop_sequences=["string"],
stream=False,
temperature=1,
top_k=5,
top_p=0.7,
betas=["string"],
)
assert_matches_type(Completion, completion, path=["response"])
@parametrize
def test_raw_response_create_overload_1(self, client: Anthropic) -> None:
response = client.completions.with_raw_response.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(Completion, completion, path=["response"])
@parametrize
def test_streaming_response_create_overload_1(self, client: Anthropic) -> None:
with client.completions.with_streaming_response.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
completion = response.parse()
assert_matches_type(Completion, completion, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_create_overload_2(self, client: Anthropic) -> None:
completion_stream = client.completions.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
)
completion_stream.response.close()
@parametrize
def test_method_create_with_all_params_overload_2(self, client: Anthropic) -> None:
completion_stream = client.completions.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
metadata={"user_id": "13803d75-b4b5-4c3e-b2a2-6f21399b021b"},
stop_sequences=["string"],
temperature=1,
top_k=5,
top_p=0.7,
betas=["string"],
)
completion_stream.response.close()
@parametrize
def test_raw_response_create_overload_2(self, client: Anthropic) -> None:
response = client.completions.with_raw_response.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
@parametrize
def test_streaming_response_create_overload_2(self, client: Anthropic) -> None:
with client.completions.with_streaming_response.create(
max_tokens_to_sample=256,
model="claude-opus-4-5-20251101",
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
assert cast(Any, response.is_closed) is True
| TestCompletions |
python | PrefectHQ__prefect | src/prefect/server/events/filters.py | {
"start": 8900,
"end": 9035
} | class ____:
simple: list[str] = field(default_factory=list)
prefixes: list[str] = field(default_factory=list)
@dataclass
| LabelSet |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/age_comparison_handler.py | {
"start": 469,
"end": 2058
} | class ____(DataConditionHandler[WorkflowEventData]):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES
comparison_json_schema = {
"type": "object",
"properties": {
"comparison_type": {
"type": "string",
"enum": [AgeComparisonType.OLDER, AgeComparisonType.NEWER],
},
"value": {"type": "integer", "minimum": 0},
"time": {"type": "string", "enum": list(timeranges.keys())},
},
"required": ["comparison_type", "value", "time"],
"additionalProperties": False,
}
@staticmethod
def evaluate_value(event_data: WorkflowEventData, comparison: Any) -> bool:
group = event_data.group
first_seen = group.first_seen
current_time = timezone.now()
comparison_type = comparison["comparison_type"]
time = comparison["time"]
if (
not comparison_type
or not time
or time not in timeranges
or (
comparison_type != AgeComparisonType.OLDER
and comparison_type != AgeComparisonType.NEWER
)
):
return False
try:
value = int(comparison["value"])
except (TypeError, ValueError):
return False
_, delta_time = timeranges[time]
passes: bool = age_comparison_map[comparison_type](
first_seen + (value * delta_time), current_time
)
return passes
| AgeComparisonConditionHandler |
python | Textualize__rich | rich/markdown.py | {
"start": 4425,
"end": 5186
} | class ____(TextElement):
"""A code block with syntax highlighting."""
style_name = "markdown.code_block"
@classmethod
def create(cls, markdown: Markdown, token: Token) -> CodeBlock:
node_info = token.info or ""
lexer_name = node_info.partition(" ")[0]
return cls(lexer_name or "text", markdown.code_theme)
def __init__(self, lexer_name: str, theme: str) -> None:
self.lexer_name = lexer_name
self.theme = theme
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
code = str(self.text).rstrip()
syntax = Syntax(
code, self.lexer_name, theme=self.theme, word_wrap=True, padding=1
)
yield syntax
| CodeBlock |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/__init__.py | {
"start": 25215,
"end": 25577
} | class ____(TestFailure):
"""Sanity test failure."""
def __init__(
self,
test: str,
python_version: t.Optional[str] = None,
messages: t.Optional[c.Sequence[SanityMessage]] = None,
summary: t.Optional[str] = None,
) -> None:
super().__init__(COMMAND, test, python_version, messages, summary)
| SanityFailure |
python | doocs__leetcode | solution/3000-3099/3072.Distribute Elements Into Two Arrays II/Solution2.py | {
"start": 0,
"end": 690
} | class ____:
def resultArray(self, nums: List[int]) -> List[int]:
arr1 = [nums[0]]
arr2 = [nums[1]]
sl1 = SortedList(arr1)
sl2 = SortedList(arr2)
for x in nums[2:]:
i = sl1.bisect_right(x)
j = sl2.bisect_right(x)
if len(sl1) - i > len(sl2) - j:
arr1.append(x)
sl1.add(x)
elif len(sl1) - i < len(sl2) - j:
arr2.append(x)
sl2.add(x)
elif len(sl1) <= len(sl2):
arr1.append(x)
sl1.add(x)
else:
arr2.append(x)
sl2.add(x)
return arr1 + arr2
| Solution |
python | getsentry__sentry | src/sentry/api/endpoints/organization_sdk_updates.py | {
"start": 4112,
"end": 4687
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
def get(self, request: Request, organization: Organization) -> Response:
try:
sdks = get_sdk_index()
except Exception as e:
sentry_sdk.capture_exception(e)
return Response({"detail": "Error occurred while fetching SDKs"}, status=500)
if len(sdks) == 0:
raise NotFound(detail="No SDKs found in index")
return Response(sdks)
| OrganizationSdksEndpoint |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 106356,
"end": 108132
} | class ____(Request):
"""
get task scalar metrics and variants
:param task: task ID
:type task: str
:param model_events: If set then the retrieving model events. Otherwise task
events
:type model_events: bool
"""
_service = "events"
_action = "get_scalar_metrics_and_variants"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"model_events": {
"default": False,
"description": "If set then the retrieving model events. Otherwise task events",
"type": "boolean",
},
"task": {"description": "task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, model_events: Optional[bool] = False, **kwargs: Any) -> None:
super(GetScalarMetricsAndVariantsRequest, self).__init__(**kwargs)
self.task = task
self.model_events = model_events
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("model_events")
def model_events(self) -> Optional[bool]:
return self._property_model_events
@model_events.setter
def model_events(self, value: Optional[bool]) -> None:
if value is None:
self._property_model_events = None
return
self.assert_isinstance(value, "model_events", (bool,))
self._property_model_events = value
| GetScalarMetricsAndVariantsRequest |
python | huggingface__transformers | tests/models/owlvit/test_modeling_owlvit.py | {
"start": 1567,
"end": 4434
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=32,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return OwlViTVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = OwlViTVisionModel(config=config).to(torch_device)
model.eval()
pixel_values = pixel_values.to(torch.float32)
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
num_patches = (self.image_size // self.patch_size) ** 2
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| OwlViTVisionModelTester |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/control_flow_ops_py_test.py | {
"start": 182009,
"end": 183849
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testGuardedAssertDoesNotCopyWhenTrue(self):
if test_util.is_gpu_available():
self.skipTest("b/128646478 fails in opensource")
with self.session() as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_assert.Assert(
true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names),
str(unguarded_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
| AssertTest |
python | altair-viz__altair | tools/generate_schema_wrapper.py | {
"start": 14019,
"end": 14251
} | class ____(codegen.SchemaGenerator):
schema_class_template = textwrap.dedent(
'''
class {classname}({basename}):
"""{docstring}"""
_schema = {schema!r}
{init_code}
'''
)
| SchemaGenerator |
python | langchain-ai__langchain | libs/partners/perplexity/tests/unit_tests/test_chat_models_standard.py | {
"start": 207,
"end": 512
} | class ____(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatPerplexity
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return ({"PPLX_API_KEY": "api_key"}, {}, {"pplx_api_key": "api_key"})
| TestPerplexityStandard |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/db_io_manager.py | {
"start": 1932,
"end": 3061
} | class ____(Generic[T]):
@staticmethod
@abstractmethod
def delete_table_slice(
context: OutputContext, table_slice: TableSlice, connection: T
) -> None: ...
@staticmethod
@abstractmethod
def get_select_statement(table_slice: TableSlice) -> str: ...
@staticmethod
def get_table_name(table_slice: TableSlice) -> str:
"""Returns a string which is set as the dagster/table_name metadata value for an
emitted asset. This value should be the fully qualified name of the table, including the
schema and database, if applicable.
"""
if not table_slice.database:
return f"{table_slice.schema}.{table_slice.table}"
return f"{table_slice.database}.{table_slice.schema}.{table_slice.table}"
@staticmethod
@abstractmethod
def ensure_schema_exists(
context: OutputContext, table_slice: TableSlice, connection: T
) -> None: ...
@staticmethod
@abstractmethod
@contextmanager
def connect(
context: Union[OutputContext, InputContext], table_slice: TableSlice
) -> Iterator[T]: ...
| DbClient |
python | encode__django-rest-framework | tests/test_throttling.py | {
"start": 14381,
"end": 16288
} | class ____(TestCase):
def setUp(self):
SimpleRateThrottle.scope = 'anon'
def test_get_rate_raises_error_if_scope_is_missing(self):
throttle = SimpleRateThrottle()
with pytest.raises(ImproperlyConfigured):
throttle.scope = None
throttle.get_rate()
def test_throttle_raises_error_if_rate_is_missing(self):
SimpleRateThrottle.scope = 'invalid scope'
with pytest.raises(ImproperlyConfigured):
SimpleRateThrottle()
def test_parse_rate_returns_tuple_with_none_if_rate_not_provided(self):
rate = SimpleRateThrottle().parse_rate(None)
assert rate == (None, None)
def test_allow_request_returns_true_if_rate_is_none(self):
assert SimpleRateThrottle().allow_request(request={}, view={}) is True
def test_get_cache_key_raises_not_implemented_error(self):
with pytest.raises(NotImplementedError):
SimpleRateThrottle().get_cache_key({}, {})
def test_allow_request_returns_true_if_key_is_none(self):
throttle = SimpleRateThrottle()
throttle.rate = 'some rate'
throttle.get_cache_key = lambda *args: None
assert throttle.allow_request(request={}, view={}) is True
def test_wait_returns_correct_waiting_time_without_history(self):
throttle = SimpleRateThrottle()
throttle.num_requests = 1
throttle.duration = 60
throttle.history = []
waiting_time = throttle.wait()
assert isinstance(waiting_time, float)
assert waiting_time == 30.0
def test_wait_returns_none_if_there_are_no_available_requests(self):
throttle = SimpleRateThrottle()
throttle.num_requests = 1
throttle.duration = 60
throttle.now = throttle.timer()
throttle.history = [throttle.timer() for _ in range(3)]
assert throttle.wait() is None
| SimpleRateThrottleTests |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_batch.py | {
"start": 24313,
"end": 26775
} | class ____:
warn_message = "The `status_retries` parameter is unused and should be removed"
def test_template_fields(self):
environment_name = "environment_name"
compute_resources = {}
service_role = "test_role"
environment_type = "environment_type"
environment_state = "state"
operator = BatchCreateComputeEnvironmentOperator(
task_id="task",
compute_environment_name=environment_name,
environment_type=environment_type,
state=environment_state,
compute_resources=compute_resources,
service_role=service_role,
)
validate_template_fields(operator)
@mock.patch.object(BatchClientHook, "client")
def test_execute(self, mock_conn):
environment_name = "environment_name"
environment_type = "environment_type"
environment_state = "environment_state"
compute_resources = {}
tags = {}
operator = BatchCreateComputeEnvironmentOperator(
task_id="task",
compute_environment_name=environment_name,
environment_type=environment_type,
state=environment_state,
compute_resources=compute_resources,
tags=tags,
)
operator.execute(None)
mock_conn.create_compute_environment.assert_called_once_with(
computeEnvironmentName=environment_name,
type=environment_type,
state=environment_state,
computeResources=compute_resources,
tags=tags,
)
@mock.patch.object(BatchClientHook, "client")
def test_defer(self, client_mock):
client_mock.create_compute_environment.return_value = {"computeEnvironmentArn": "my_arn"}
operator = BatchCreateComputeEnvironmentOperator(
task_id="task",
compute_environment_name="my_env_name",
environment_type="my_env_type",
state="my_state",
compute_resources={},
max_retries=123456,
poll_interval=456789,
deferrable=True,
)
with pytest.raises(TaskDeferred) as deferred:
operator.execute(None)
assert isinstance(deferred.value.trigger, BatchCreateComputeEnvironmentTrigger)
assert deferred.value.trigger.waiter_delay == 456789
assert deferred.value.trigger.attempts == 123456
| TestBatchCreateComputeEnvironmentOperator |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 6805,
"end": 7405
} | class ____(_FusedModule):
r"""This is a sequential container which calls the BatchNorm 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, batch_norm, relu):
assert (
type_before_parametrizations(batch_norm) == BatchNorm2d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(batch_norm)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(batch_norm, relu)
| BNReLU2d |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image16.py | {
"start": 315,
"end": 1348
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image16.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
"xl/worksheets/sheet2.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.set_header("&L&G", {"image_left": self.image_dir + "red.jpg"})
worksheet2.set_header("&L&G", {"image_left": self.image_dir + "red.jpg"})
worksheet1.set_footer("&R&G", {"image_right": self.image_dir + "red.jpg"})
worksheet2.set_footer("&R&G", {"image_right": self.image_dir + "red.jpg"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 4399,
"end": 5100
} | class ____:
def test_1_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft(x1, n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (4, ))
assert_array_almost_equal(y, direct_dft(x1.astype(np.float32)))
def test_n_argument_real(self):
x1 = np.array([1, 2, 3, 4], dtype=np.float16)
x2 = np.array([1, 2, 3, 4], dtype=np.float16)
y = fft([x1, x2], n=4)
assert_equal(y.dtype, np.complex64)
assert_equal(y.shape, (2, 4))
assert_array_almost_equal(y[0], direct_dft(x1.astype(np.float32)))
assert_array_almost_equal(y[1], direct_dft(x2.astype(np.float32)))
| TestFloat16FFT |
python | tartley__colorama | colorama/ansitowin32.py | {
"start": 343,
"end": 2236
} | class ____:
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __enter__(self, *args, **kwargs):
# special method lookup bypasses __getattr__/__getattribute__, see
# https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
# thus, contextlib magic methods are not proxied via __getattr__
return self.__wrapped.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
return self.__wrapped.__exit__(*args, **kwargs)
def __setstate__(self, state):
self.__dict__ = state
def __getstate__(self):
return self.__dict__
def write(self, text):
self.__convertor.write(text)
def isatty(self):
stream = self.__wrapped
if 'PYCHARM_HOSTED' in os.environ:
if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
return True
try:
stream_isatty = stream.isatty
except AttributeError:
return False
else:
return stream_isatty()
@property
def closed(self):
stream = self.__wrapped
try:
return stream.closed
# AttributeError in the case that the stream doesn't support being closed
# ValueError for the case that the stream has already been detached when atexit runs
except (AttributeError, ValueError):
return True
| StreamWrapper |
python | urllib3__urllib3 | test/test_queue_monkeypatch.py | {
"start": 254,
"end": 761
} | class ____:
"""
Test that connection pool works even with a monkey patched Queue module,
see obspy/obspy#1599, psf/requests#3742, urllib3/urllib3#1061.
"""
def test_queue_monkeypatching(self) -> None:
with mock.patch.object(queue, "Empty", BadError):
with HTTPConnectionPool(host="localhost", block=True) as http:
http._get_conn()
with pytest.raises(EmptyPoolError):
http._get_conn(timeout=0)
| TestMonkeypatchResistance |
python | gevent__gevent | src/gevent/exceptions.py | {
"start": 162,
"end": 1607
} | class ____(Exception):
"""
Exception thrown when the hub finishes running (`gevent.hub.Hub.run`
would return).
In a normal application, this is never thrown or caught
explicitly. The internal implementation of functions like
:meth:`gevent.hub.Hub.join` and :func:`gevent.joinall` may catch it, but user code
generally should not.
.. caution::
Errors in application programming can also lead to this exception being
raised. Some examples include (but are not limited too):
- greenlets deadlocking on a lock;
- using a socket or other gevent object with native thread
affinity from a different thread
"""
@property
def hub(self):
"""
The (optional) hub that raised the error.
.. versionadded:: 20.12.0
"""
# XXX: Note that semaphore.py does this manually.
if len(self.args) == 3: # From the hub
return self.args[1]
def __repr__(self):
# pylint:disable=unsubscriptable-object
if len(self.args) == 3: # From the hub
import pprint
return (
"%s\n"
"\tHub: %s\n"
"\tHandles:\n%s"
) % (
self.args[0],
self.args[1],
pprint.pformat(self.args[2])
)
return Exception.__repr__(self)
def __str__(self):
return repr(self)
| LoopExit |
python | keon__algorithms | tests/test_strings.py | {
"start": 2609,
"end": 2941
} | class ____(unittest.TestCase):
"""[summary]
Test for the file decode_string.py
Arguments:
unittest {[type]} -- [description]
"""
def test_decode_string(self):
self.assertEqual("aaabcbc", decode_string("3[a]2[bc]"))
self.assertEqual("accaccacc", decode_string("3[a2[c]]"))
| TestDecodeString |
python | python-openxml__python-docx | tests/oxml/unitdata/section.py | {
"start": 590,
"end": 893
} | class ____(BaseBuilder):
__tag__ = "w:type"
__nspfxs__ = ("w",)
__attrs__ = ("w:val",)
def a_pgMar():
return CT_PageMarBuilder()
def a_pgSz():
return CT_PageSzBuilder()
def a_sectPr():
return CT_SectPrBuilder()
def a_type():
return CT_SectTypeBuilder()
| CT_SectTypeBuilder |
python | django__django | tests/urlpatterns_reverse/tests.py | {
"start": 68070,
"end": 70013
} | class ____(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
"/lookahead-/a-city/",
"/lookbehind-/a-city/",
"/lookahead+/a-city/",
"/lookbehind+/a-city/",
]
for test_url in test_urls:
with self.subTest(url=test_url):
self.assertEqual(resolve(test_url).kwargs, {"city": "a-city"})
def test_invalid_resolve(self):
test_urls = [
"/lookahead-/not-a-city/",
"/lookbehind-/not-a-city/",
"/lookahead+/other-city/",
"/lookbehind+/other-city/",
]
for test_url in test_urls:
with self.subTest(url=test_url):
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
test_urls = [
("lookahead-positive", {"city": "a-city"}, "/lookahead+/a-city/"),
("lookahead-negative", {"city": "a-city"}, "/lookahead-/a-city/"),
("lookbehind-positive", {"city": "a-city"}, "/lookbehind+/a-city/"),
("lookbehind-negative", {"city": "a-city"}, "/lookbehind-/a-city/"),
]
for name, kwargs, expected in test_urls:
with self.subTest(name=name, kwargs=kwargs):
self.assertEqual(reverse(name, kwargs=kwargs), expected)
def test_invalid_reverse(self):
test_urls = [
("lookahead-positive", {"city": "other-city"}),
("lookahead-negative", {"city": "not-a-city"}),
("lookbehind-positive", {"city": "other-city"}),
("lookbehind-negative", {"city": "not-a-city"}),
]
for name, kwargs in test_urls:
with self.subTest(name=name, kwargs=kwargs):
with self.assertRaises(NoReverseMatch):
reverse(name, kwargs=kwargs)
@override_settings(ROOT_URLCONF="urlpatterns_reverse.urls")
| LookaheadTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.