language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 14076,
"end": 14333
} | class ____(TestCase):
def test_nimpl_basic(self):
# smoke test that the "NotImplemented" annotation is picked up
with assert_raises(NotImplementedError):
w.empty(3, like="ooops")
@instantiate_parametrized_tests
| TestSmokeNotImpl |
python | kamyu104__LeetCode-Solutions | Python/smallest-palindromic-rearrangement-ii.py | {
"start": 86,
"end": 1510
} | class ____(object):
def smallestPalindrome(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
cnt = [0]*26
for i in xrange(len(s)//2):
cnt[ord(s[i])-ord('a')] += 1
total, count, remain = 0, 1, 0
for i in reversed(xrange(len(cnt))):
for c in xrange(1, cnt[i]+1):
total += 1
count = count*total//c
if count >= k:
remain = cnt[i]-c
break
if count >= k:
break
else:
return ""
result = []
for j in xrange(i+1):
x = chr(ord('a')+j)
for _ in xrange(cnt[j] if j != i else remain):
cnt[j] -= 1
result.append(x)
while total:
for j in xrange(i, len(cnt)):
if not cnt[j]:
continue
new_count = count*cnt[j]//total
if new_count < k:
k -= new_count
continue
count = new_count
cnt[j] -= 1
total -= 1
result.append(chr(ord('a')+j))
break
if len(s)%2:
result.append(s[len(s)//2])
result.extend((result[i] for i in reversed(xrange(len(result)-len(s)%2))))
return "".join(result)
| Solution |
python | mlflow__mlflow | mlflow/exceptions.py | {
"start": 5947,
"end": 6279
} | class ____(MlflowException):
"""Exception thrown when multipart upload is unsupported by an artifact repository"""
MESSAGE = "Multipart upload is not supported for the current artifact repository"
def __init__(self):
super().__init__(self.MESSAGE, error_code=NOT_IMPLEMENTED)
| _UnsupportedMultipartUploadException |
python | ijl__orjson | test/test_error.py | {
"start": 2536,
"end": 2964
} | class ____(Exception):
pass
def default_typeerror(obj):
raise TypeError
def default_notimplementederror(obj):
raise NotImplementedError
def default_systemerror(obj):
raise SystemError
def default_importerror(obj):
import doesnotexist # noqa: PLC0415
assert doesnotexist
CUSTOM_ERROR_MESSAGE = "zxc"
def default_customerror(obj):
raise CustomException(CUSTOM_ERROR_MESSAGE)
| CustomException |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py | {
"start": 3279,
"end": 4072
} | class ____(BaseGoogleLink):
"""Helper class for constructing Vertex AI Model Export Link."""
name = "Export Model"
key = "export_conf"
format_str = VERTEX_AI_MODEL_EXPORT_LINK
@staticmethod
def extract_bucket_name(config):
"""Return bucket name from output configuration."""
return config["artifact_destination"]["output_uri_prefix"].rpartition("gs://")[-1]
@classmethod
def persist(cls, context: Context, **value):
output_config = value.get("output_config")
bucket_name = cls.extract_bucket_name(output_config)
super().persist(
context=context,
project_id=value.get("project_id"),
model_id=value.get("model_id"),
bucket_name=bucket_name,
)
| VertexAIModelExportLink |
python | mlflow__mlflow | mlflow/genai/judges/tools/types.py | {
"start": 1070,
"end": 1332
} | class ____:
"""Expectation for a trace (simplified for judge tools)."""
name: str
source: str
rationale: str | None
span_id: str | None
assessment_id: str | None
value: Any
@experimental(version="3.5.0")
@dataclass
| JudgeToolExpectation |
python | PyCQA__pylint | tests/functional/n/no/no_member_assign_same_line.py | {
"start": 672,
"end": 861
} | class ____(ClassWithMember):
"""This assignment is valid due to inheritance."""
def __init__(self):
self.member = self.member
super().__init__()
| AssignMemberFromSuper1 |
python | getsentry__sentry | src/sentry/integrations/github/blame.py | {
"start": 730,
"end": 860
} | class ____(TypedDict):
commit: GitHubFileBlameCommit
startingLine: int
endingLine: int
age: int
| GitHubFileBlameRange |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 133836,
"end": 136412
} | class ____(test.TestCase):
def _npRelu6(self, np_features):
sixes = np.copy(np_features)
sixes.fill(6.0)
return np.minimum(
np.maximum(np_features, np.zeros(np_features.shape)), sixes
)
def testNpRelu6(self):
self.assertAllClose(
np.array([[0.0, 0.7, 0.0, 0.3, 6.0], [0.1, 0.0, 6.0, 0.0, 0.9]]),
self._npRelu6(
np.array([[-0.9, 0.7, -0.5, 0.3, 6.0], [0.1, -0.3, 6.5, -0.7, 0.9]])
),
)
def _testRelu6(self, np_features):
np_relu6 = self._npRelu6(np_features)
tf_relu6 = nn_ops.relu6(np_features)
self.assertAllClose(np_relu6, tf_relu6)
self.assertShapeEqual(np_relu6, tf_relu6)
def testNumbersCPU(self):
for t in [np.int32, np.int64, np.float16, np.float32, np.float64]:
# Force execution on CPU even if a GPU kernel is available for the type.
with ops.device("/device:CPU:0"):
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)
)
def testNumbersGPU(self):
if not test.is_gpu_available():
self.skipTest("No GPU available")
for t in [np.float16, np.float, np.double]:
print(t)
self._testRelu6(
np.array([[-9, 7, -5, 3, -1], [1, -3, 5, -7, 9]]).astype(t)
)
# The gradient test for ReLU6 is a bit tricky as the derivative is
# not well defined at around zero and six and we want to avoid that
# in terms of input values.
def testGradientFloat32(self):
with self.cached_session():
x = numpy_compat.np_asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float32,
order="F",
)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x])
)
self.assertLess(err, 1e-4)
def testGradientFloat16(self):
with self.cached_session():
x = numpy_compat.np_asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float16,
order="F",
)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x])
)
self.assertLess(err, 1e-4)
def testGradientFloat64(self):
with self.cached_session():
x = numpy_compat.np_asarray(
[[-0.9, -0.7, -0.5, -0.3, -0.1], [6.1, 6.3, 6.5, 6.7, 6.9]],
dtype=np.float64,
order="F",
)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(nn_ops.relu6, [x])
)
self.assertLess(err, 1e-10)
| Relu6Test |
python | getsentry__sentry | src/sentry/replays/lib/new_query/fields.py | {
"start": 6414,
"end": 6502
} | class ____(ColumnField[UUID]):
"""UUID-type condition column field."""
| UUIDColumnField |
python | tensorflow__tensorflow | tensorflow/python/tpu/feature_column_v2.py | {
"start": 44496,
"end": 47960
} | class ____(_TPUSharedEmbeddingColumnV2):
"""TPUSharedEmbeddingColumnV2 which allows serving on TensorCore."""
def __new__(cls, *args, **kwargs):
# For __new__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
cls._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
cls._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
return _TPUSharedEmbeddingColumnV2.__new__(cls, *args, **kwargs) # pytype: disable=wrong-keyword-args # always-use-return-annotations
def __init__(self, *args, **kwargs):
# For __init__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
self._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
self._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
_TPUSharedEmbeddingColumnV2.__init__(self, *args, **kwargs)
def __deepcopy__(self, memo):
return _TPUSharedDeviceSpecificEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
tensor_core_shape=self._tensor_core_shape,
embedding_lookup_device=self._embedding_lookup_device)
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows _get_dense_tensor_internal."""
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor_internal(transformation_cache,
state_manager)
# TPU_EMBEDDING_CORE case.
if self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUSharedDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor_internal(transformation_cache,
state_manager)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = transformation_cache.get(self.categorical_column.name,
state_manager)
def host_computation():
return pad_sparse_embedding_lookup_indices(sparse_tensor,
self._tensor_core_shape[1])
values, mask = tpu_replication.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = transformation_cache.get(self.categorical_column.name,
state_manager)
mask = transformation_cache.get(
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
state_manager)
# Do a dense embedding lookup on TensorCore.
embedding_weights = self.shared_embedding_column_creator.embedding_weights
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
self.get_combiner())
| _TPUSharedDeviceSpecificEmbeddingColumnV2 |
python | nedbat__coveragepy | tests/test_plugins.py | {
"start": 23483,
"end": 37527
} | class ____(FileTracerTest):
"""Test error handling around file tracer plugins."""
def run_plugin(self, module_name: str) -> Coverage:
"""Run a plugin with the given module_name.
Uses a few fixed Python files.
Returns the Coverage object.
"""
self.make_file(
"simple.py",
"""\
import other, another
a = other.f(2)
b = other.f(3)
c = another.g(4)
d = another.g(5)
""",
)
# The names of these files are important: some plugins apply themselves
# to "*other.py".
self.make_file(
"other.py",
"""\
def f(x):
return x+1
""",
)
self.make_file(
"another.py",
"""\
def g(x):
return x-1
""",
)
cov = coverage.Coverage()
cov.set_option("run:plugins", [module_name])
self.start_import_stop(cov, "simple")
cov.save() # pytest-cov does a save after stop, so we'll do it too.
return cov
def run_bad_plugin(
self,
module_name: str,
plugin_name: str,
our_error: bool = True,
excmsg: str | None = None,
excmsgs: list[str] | None = None,
) -> None:
"""Run a file, and see that the plugin failed.
`module_name` and `plugin_name` is the module and name of the plugin to
use.
`our_error` is True if the error reported to the user will be an
explicit error in our test code, marked with an '# Oh noes!' comment.
`excmsg`, if provided, is text that must appear in the stderr.
`excmsgs`, if provided, is a list of messages, one of which must
appear in the stderr.
The plugin will be disabled, and we check that a warning is output
explaining why.
"""
with pytest.warns(Warning) as warns:
self.run_plugin(module_name)
stderr = self.stderr()
stderr += "".join(str(w.message) for w in warns)
if our_error:
# The exception we're causing should only appear once.
assert stderr.count("# Oh noes!") == 1
# There should be a warning explaining what's happening, but only one.
# The message can be in two forms:
# Disabling plug-in '...' due to previous exception
# or:
# Disabling plug-in '...' due to an exception:
print([str(w) for w in warns.list])
warnings = [w for w in warns.list if issubclass(w.category, CoverageWarning)]
assert len(warnings) == 1
warnmsg = str(warnings[0].message)
assert f"Disabling plug-in '{module_name}.{plugin_name}' due to " in warnmsg
if excmsg:
assert excmsg in stderr
if excmsgs:
found_exc = any(em in stderr for em in excmsgs) # pragma: part covered
assert found_exc, f"expected one of {excmsgs} in stderr"
def test_file_tracer_has_no_file_tracer_method(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
class Plugin(object):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin("bad_plugin", "Plugin", our_error=False)
def test_file_tracer_has_inherited_sourcefilename_method(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
pass
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()",
)
def test_plugin_has_inherited_filereporter_method(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage
class Plugin(coverage.CoveragePlugin):
def file_tracer(self, filename):
# Just grab everything.
return FileTracer()
class FileTracer(coverage.FileTracer):
def source_filename(self):
return "foo.xxx"
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
cov = self.run_plugin("bad_plugin")
expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()"
with pytest.raises(NotImplementedError, match=expected_msg):
cov.report()
def test_file_tracer_fails(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
17/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_fails_eventually(self) -> None:
# Django coverage plugin can report on a few files and then fail.
# https://github.com/coveragepy/coveragepy/issues/1011
self.make_file(
"bad_plugin.py",
"""\
import os.path
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def __init__(self):
self.calls = 0
def file_tracer(self, filename):
print(filename)
self.calls += 1
if self.calls <= 2:
return FileTracer(filename)
else:
17/0 # Oh noes!
class FileTracer(coverage.FileTracer):
def __init__(self, filename):
self.filename = filename
def source_filename(self):
return os.path.basename(self.filename).replace(".py", ".foo")
def line_number_range(self, frame):
return -1, -1
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin("bad_plugin", "Plugin")
def test_file_tracer_returns_wrong(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return 3.14159
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsg="'float' object has no attribute",
)
def test_has_dynamic_source_filename_fails(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
23/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_fails(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
42/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin("bad_plugin", "Plugin")
def test_source_filename_returns_wrong(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return 17.3
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsgs=[
"expected str, bytes or os.PathLike object, not float",
"'float' object has no attribute",
"object of type 'float' has no len()",
"'float' object is unsubscriptable",
],
)
def test_dynamic_source_filename_fails(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
101/0 # Oh noes!
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin("bad_plugin", "Plugin")
def test_line_number_range_raises_error(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
raise Exception("borked!")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsg="borked!",
)
def test_line_number_range_returns_non_tuple(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return 42.23
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsg="line_number_range must return 2-tuple",
)
def test_line_number_range_returns_triple(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return (1, 2, 3)
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsg="line_number_range must return 2-tuple",
)
def test_line_number_range_returns_pair_of_strings(self) -> None:
self.make_file(
"bad_plugin.py",
"""\
import coverage.plugin
class Plugin(coverage.plugin.CoveragePlugin):
def file_tracer(self, filename):
if filename.endswith("other.py"):
return BadFileTracer()
class BadFileTracer(coverage.plugin.FileTracer):
def source_filename(self):
return "something.foo"
def line_number_range(self, frame):
return ("5", "7")
def coverage_init(reg, options):
reg.add_file_tracer(Plugin())
""",
)
self.run_bad_plugin(
"bad_plugin",
"Plugin",
our_error=False,
excmsgs=[
"an integer is required",
"cannot be interpreted as an integer",
],
)
| BadFileTracerTest |
python | django__django | django/db/migrations/operations/models.py | {
"start": 26463,
"end": 28994
} | class ____(ModelOptionOperation):
"""Represent a change with the order_with_respect_to option."""
option_name = "order_with_respect_to"
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"order_with_respect_to": self.order_with_respect_to,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(
app_label,
self.name_lower,
{self.option_name: self.order_with_respect_to},
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if (
from_model._meta.order_with_respect_to
and not to_model._meta.order_with_respect_to
):
schema_editor.remove_field(
from_model, from_model._meta.get_field("_order")
)
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif (
to_model._meta.order_with_respect_to
and not from_model._meta.order_with_respect_to
):
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label):
return self.references_model(model_name, app_label) and (
self.order_with_respect_to is None or name == self.order_with_respect_to
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (
self.name,
self.order_with_respect_to,
)
@property
def migration_name_fragment(self):
return "alter_%s_order_with_respect_to" % self.name_lower
| AlterOrderWithRespectTo |
python | huggingface__transformers | tests/models/gemma/test_modeling_gemma.py | {
"start": 2226,
"end": 22174
} | class ____(unittest.TestCase):
input_text = ["Hello I am doing", "Hi today"]
# This variable is used to determine which accelerator are we using for our runners (e.g. A10 or T4)
# Depending on the hardware we get different logits / generations
device_properties: DeviceProperties = (None, None, None)
@classmethod
def setUpClass(cls):
cls.device_properties = get_device_properties()
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
# See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed.
cleanup(torch_device, gc_collect=True)
@require_read_token
def test_model_2b_fp16(self):
model_id = "google/gemma-2b"
EXPECTED_TEXTS = [
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat",
]
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16).to(torch_device)
model.generation_config.cache_implementation = "static"
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_read_token
def test_model_2b_bf16(self):
model_id = "google/gemma-2b"
EXPECTED_TEXTS = [
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat",
]
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_read_token
def test_model_2b_eager(self):
model_id = "google/gemma-2b"
EXPECTED_TEXTS = [
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat",
]
# bfloat16 gives strange values, likely due to it has lower precision + very short prompts
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, attn_implementation="eager")
model.to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_flash_attn
@require_read_token
@pytest.mark.flash_attn_test
def test_model_2b_flash_attn(self):
model_id = "google/gemma-2b"
EXPECTED_TEXTS = [
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat",
]
model = AutoModelForCausalLM.from_pretrained(
model_id, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model.to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_bitsandbytes
@require_read_token
def test_model_2b_4bit(self):
model_id = "google/gemma-2b"
EXPECTED_TEXTS = [
"Hello I am doing a project and I need to make a 3d model of a house. I have been using",
"Hi today I'd like to share with you my experience with the new wattpad wattpad wattpad wattpad wattpad wattpad wattpad",
]
model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@unittest.skip(reason="The test will not fit our CI runners")
@require_read_token
def test_model_7b_fp32(self):
model_id = "google/gemma-7b"
EXPECTED_TEXTS = [
"Hello my name is ***** ***** I will be assisting you today. I am sorry to hear about your issue. I will",
"Hi,\n\nI have a problem with my 2005 1.6 16",
]
model = AutoModelForCausalLM.from_pretrained(model_id).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_read_token
def test_model_7b_fp16(self):
if self.device_properties[0] == "cuda" and self.device_properties[1] == 7:
self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU (OOM).")
model_id = "google/gemma-7b"
EXPECTED_TEXTS = [
"""Hello I am doing a project on a 1999 4.0L 4x4. I""",
"Hi today I am going to show you how to make a simple and easy to make a DIY 3D",
]
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_read_token
def test_model_7b_bf16(self):
if self.device_properties[0] == "cuda" and self.device_properties[1] == 7:
self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU (OOM).")
model_id = "google/gemma-7b"
# Key 9 for MI300, Key 8 for A100/A10, and Key 7 for T4.
#
# Note: Key 9 is currently set for MI300, but may need potential future adjustments for H100s,
# considering differences in hardware processing and potential deviations in generated text.
# fmt: off
EXPECTED_TEXTS = Expectations(
{
("cuda", 7): ["""Hello I am doing a project on a 1991 240sx and I am trying to find""", "Hi today I am going to show you how to make a very simple and easy to make a very simple and",],
("cuda", 8): ['Hello I am doing a project for my school and I am trying to make a game in which you have to get a', 'Hi today I am going to show you how to make a very simple and easy to make a very simple and'],
("rocm", 9): ["Hello I am doing a project for my school and I am trying to get a servo to move a certain amount of degrees", "Hi today I am going to show you how to make a very simple and easy to make DIY light up sign",],
}
)
# fmt: on
expected_text = EXPECTED_TEXTS.get_expectation()
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, expected_text)
@require_read_token
def test_model_7b_fp16_static_cache(self):
if self.device_properties[0] == "cuda" and self.device_properties[1] == 7:
self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU (OOM).")
model_id = "google/gemma-7b"
expectations = Expectations(
{
(None, None): [
"Hello I am doing a project on a 1999 4.0L 4x4. I",
"Hi today I am going to show you how to make a simple and easy to make a DIY 3D",
],
("cuda", 8): [
"Hello I am doing a project on a 1995 3000gt SL. I have a",
"Hi today I am going to show you how to make a simple and easy to make a DIY 3D",
],
}
)
EXPECTED_TEXTS = expectations.get_expectation()
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16).to(torch_device)
model.generation_config.cache_implementation = "static"
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@require_bitsandbytes
@require_read_token
def test_model_7b_4bit(self):
model_id = "google/gemma-7b"
expectations = Expectations(
{
(None, None): [
"Hello I am doing a project for my school and I am trying to make a program that will take a number and then",
"Hi today I am going to talk about the best way to get rid of acne. miniaturing is a very",
],
("cuda", 8): [
"Hello I am doing a project for my school and I am trying to make a program that will take a number and then",
'Hi today I am going to talk about the new update for the game called "The new update!:)!:)!:)',
],
}
)
EXPECTED_TEXTS = expectations.get_expectation()
model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
@slow
@require_torch_accelerator
@pytest.mark.torch_compile_test
@require_read_token
def test_compile_static_cache(self):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
"Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have looked on the internet and I have found",
"Hi today\nI have a problem with my 2007 1.9 tdi 105bhp.\nI have a problem with the engine management light on.\nI have checked the",
]
prompts = ["Hello I am doing", "Hi today"]
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b", pad_token="</s>", padding_side="right")
model = GemmaForCausalLM.from_pretrained("google/gemma-2b", device_map=torch_device, dtype=torch.float16)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text) # Both GPU architectures have the same output
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Static Cache + compile
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
@pytest.mark.torch_export_test
@slow
@require_read_token
def test_export_static_cache(self):
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
from transformers.integrations.executorch import (
TorchExportableModuleWithStaticCache,
)
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b", pad_token="</s>", padding_side="right")
expectations = Expectations(
{
(None, None): [
"Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have looked on the internet and I have found"
],
("cuda", 8): [
"Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have been looking on the internet and I have"
],
("rocm", (9, 5)): [
"Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have been looking on the internet and I have"
],
}
)
EXPECTED_TEXT_COMPLETION = expectations.get_expectation()
max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[
"input_ids"
].shape[-1]
# Load model
device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM
dtype = torch.bfloat16
cache_implementation = "static"
attn_implementation = "sdpa"
batch_size = 1
model = GemmaForCausalLM.from_pretrained(
"google/gemma-2b",
device_map=device,
dtype=dtype,
attn_implementation=attn_implementation,
generation_config=GenerationConfig(
use_cache=True,
cache_implementation=cache_implementation,
max_length=max_generation_length,
cache_config={
"batch_size": batch_size,
"max_cache_len": max_generation_length,
},
),
)
prompts = ["Hello I am doing"]
prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
prompt_token_ids = prompt_tokens["input_ids"]
max_new_tokens = max_generation_length - prompt_token_ids.shape[-1]
# Static Cache + eager
eager_generated_ids = model.generate(
**prompt_tokens, max_new_tokens=max_new_tokens, do_sample=False, cache_implementation=cache_implementation
)
eager_generated_text = tokenizer.batch_decode(eager_generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, eager_generated_text)
# Static Cache + export
from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM
exportable_module = TorchExportableModuleForDecoderOnlyLM(model)
exported_program = exportable_module.export(
input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device),
cache_position=torch.tensor([0], dtype=torch.long, device=model.device),
)
ep_generated_ids = TorchExportableModuleWithStaticCache.generate(
exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens
)
ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True)
# After switching to A10 on 2025/06/29, we get slightly different outputs when using export
expectations = Expectations(
{
(None, None): [
"Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have looked on the internet and I have found"
],
("cuda", 8): [
"Hello I am doing a project on the 1990s and I need to know what the most popular music was in the 1990s. I have looked on the internet and I have found"
],
}
)
EXPECTED_TEXT_COMPLETION = expectations.get_expectation()
self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text)
# TODO joao, manuel: remove this in v4.62.0
def test_model_2b_bf16_dola(self):
model_id = "google/gemma-2b"
# ground truth text generated with dola_layers="low", repetition_penalty=1.2
expectations = Expectations(
{
(None, None): [
"Hello I am doing an experiment and need to get the mass of a block. The problem is, it has no scale",
"Hi today we have the review for a <strong>2016/2017</strong> season of",
],
("cuda", 8): [
"Hello I am doing an experiment and need to get the mass of a block. The only tool I have is a scale",
"Hi today we have the review for a <strong>2016/2017</strong> season of",
],
}
)
EXPECTED_TEXTS = expectations.get_expectation()
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(
**inputs,
max_new_tokens=20,
do_sample=False,
dola_layers="low",
repetition_penalty=1.2,
trust_remote_code=True,
custom_generate="transformers-community/dola",
)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
| GemmaIntegrationTest |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 93097,
"end": 99398
} | class ____(Response):
"""
Response of models.get_by_id endpoint.
:param model: Model info
:type model: Model
"""
_service = "models"
_action = "get_by_id"
_version = "2.20"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": ["string", "null"],
},
"type": {
"description": "The type of the metadata item",
"type": ["string", "null"],
},
"value": {
"description": "The value stored in the metadata item",
"type": ["string", "null"],
},
},
"type": "object",
},
"model": {
"properties": {
"comment": {
"description": "Model comment",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {
"description": "Parent model ID",
"type": ["string", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"stats": {
"description": "Model statistics",
"properties": {
"labels_count": {
"description": "Number of the model labels",
"type": "integer",
}
},
"type": ["object", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"model": {
"description": "Model info",
"oneOf": [{"$ref": "#/definitions/model"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, model: Any = None, **kwargs: Any) -> None:
super(GetByIdResponse, self).__init__(**kwargs)
self.model = model
@schema_property("model")
def model(self) -> Any:
return self._property_model
@model.setter
def model(self, value: Any) -> None:
if value is None:
self._property_model = None
return
if isinstance(value, dict):
value = Model.from_dict(value)
else:
self.assert_isinstance(value, "model", Model)
self._property_model = value
| GetByIdResponse |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/sequential.py | {
"start": 1978,
"end": 23345
} | class ____(functional.Functional):
"""`Sequential` groups a linear stack of layers into a `tf.keras.Model`.
`Sequential` provides training and inference features on this model.
Examples:
>>> # Optionally, the first layer can receive an `input_shape` argument:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> # Afterwards, we do automatic shape inference:
>>> model.add(tf.keras.layers.Dense(4))
>>> # This is identical to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.Input(shape=(16,)))
>>> model.add(tf.keras.layers.Dense(8))
>>> # Note that you can also omit the `input_shape` argument.
>>> # In that case the model doesn't have any weights until the first call
>>> # to a training/evaluation method (since it isn't yet built):
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> # model.weights not created yet
>>> # Whereas if you specify the input shape, the model gets built
>>> # continuously as you are adding layers:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> model.add(tf.keras.layers.Dense(4))
>>> len(model.weights)
4
>>> # When using the delayed-build pattern (no input shape specified), you can
>>> # choose to manually build your model by calling
>>> # `build(batch_input_shape)`:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> model.build((None, 16))
>>> len(model.weights)
4
```python
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, layers=None, name=None):
"""Creates a `Sequential` model instance.
Args:
layers: Optional list of layers to add to the model.
name: Optional name for the model.
"""
# Skip the init in FunctionalModel since model doesn't have input/output yet
super(functional.Functional, self).__init__( # pylint: disable=bad-super-call
name=name, autocast=False)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
self._auto_track_sub_layers = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._input_dtype = None
self._layer_call_argspecs = {}
self._created_nodes = set()
# Flag that indicate whether the sequential network topology has been
# created. It is false when there isn't any layer, or the layers doesn't
# have input shape.
self._graph_initialized = False
# Unfortunately some Sequential models using custom layers or FeatureColumn
# layers have multiple inputs. This is fundamentally incompatible with
# most of the Sequential API, and we have to disable a number of features
# for such models.
self._use_legacy_deferred_behavior = False
# Add to the model any layers passed to the constructor.
if layers:
if not isinstance(layers, (list, tuple)):
layers = [layers]
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `Trackable` manages the `_layers` attributes and does filtering
# over it.
layers = super(Sequential, self).layers
if layers and isinstance(layers[0], input_layer.InputLayer):
return layers[1:]
return layers[:]
@trackable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Args:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
# If we are passed a Keras tensor created by keras.Input(), we can extract
# the input layer from its keras history and use that without any loss of
# generality.
if hasattr(layer, '_keras_history'):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, input_layer.InputLayer):
layer = origin_layer
logging.warning(
'Please add `keras.layers.InputLayer` instead of `keras.Input` to '
'Sequential model. `keras.Input` is intended to be used by '
'Functional model.')
if isinstance(layer, module.Module):
if not isinstance(layer, base_layer.Layer):
layer = functional.ModuleWrapper(layer)
else:
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
tf_utils.assert_no_legacy_layers([layer])
if not self._is_layer_name_unique(layer):
raise ValueError('All layers added to a Sequential model '
'should have unique names. Name "%s" is already the name'
' of a layer in this model. Update the `name` argument '
'to pass a unique name.' % (layer.name,))
self.built = False
set_inputs = False
self._maybe_create_attribute('_self_tracked_trackables', [])
if not self._self_tracked_trackables:
if isinstance(layer, input_layer.InputLayer):
# Case where the user passes an Input or InputLayer layer via `add`.
set_inputs = True
else:
batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)
if batch_shape:
# Instantiate an input layer.
x = input_layer.Input(
batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
outputs = nest.flatten(layer._inbound_nodes[-1].outputs)
if len(outputs) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = outputs
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
self.built = True
self._has_explicit_input_shape = True
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if len(nest.flatten(output_tensor)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [output_tensor]
self.built = True
if set_inputs or self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
self._graph_initialized = True
else:
self._self_tracked_trackables.append(layer)
self._handle_deferred_layer_dependencies([layer])
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
@trackable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
layer = self._self_tracked_trackables.pop()
self._layer_call_argspecs.pop(layer)
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._graph_initialized = False
elif self._graph_initialized:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs)
self.built = True
@trackable.no_automatic_dependency_tracking
def _build_graph_network_for_inferred_shape(self,
input_shape,
input_dtype=None):
if input_shape is None or not self.layers:
return
if not tf2.enabled() or not ops.executing_eagerly_outside_functions():
# This behavior is disabled in V1 or when eager execution is disabled.
return
if (not self._has_explicit_input_shape and
not self._use_legacy_deferred_behavior):
# Determine whether the input shape is novel, i.e. whether the model
# should be rebuilt.
input_shape = tuple(input_shape)
if self._inferred_input_shape is None:
new_shape = input_shape
else:
new_shape = relax_input_shape(self._inferred_input_shape, input_shape)
if (new_shape is not None and new_shape != self._inferred_input_shape):
# A novel shape has been received: we need to rebuild the model.
# In case we are inside a graph function, we step out of it.
with ops.init_scope():
inputs = input_layer.Input(
batch_shape=new_shape,
dtype=input_dtype,
name=self.layers[0].name + '_input')
layer_input = inputs
created_nodes = set()
for layer in self.layers:
# Clear nodes previously created via this method. This prevents
# node accumulation and ensures that e.g. `layer.output` is
# always connected to `model.inputs`
# (this is important e.g. for the feature extraction use case).
# We don't just do `layer._inbound_nodes = []` in order
# not to break shared layers added to Sequential models (which is
# technically illegal as per the `add()` docstring,
# but wasn't previously disabled).
clear_previously_created_nodes(layer, self._created_nodes)
try:
# Create Functional API connection by calling the current layer
layer_output = layer(layer_input)
except: # pylint:disable=bare-except
# Functional API calls may fail for a number of reasons:
# 1) The layer may be buggy. In this case it will be easier for
# the user to debug if we fail on the first call on concrete data,
# instead of our own call on a symbolic input.
# 2) The layer is dynamic (graph-incompatible) and hasn't
# overridden `compute_output_shape`. In this case, it is
# impossible to build a graph network.
# 3) The layer is otherwise incompatible with the Functional API
# (e.g. this is the case for some probabilistic layers that rely
# on hacks and that do not return tensors).
# In all these cases, we should avoid creating a graph network
# (or we simply can't).
self._use_legacy_deferred_behavior = True
return
if len(nest.flatten(layer_output)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# Keep track of nodes just created above
track_nodes_created_by_last_call(layer, created_nodes)
layer_input = layer_output
outputs = layer_output
self._created_nodes = created_nodes
try:
# Initialize a graph Network. This call will never fail for
# a stack of valid Keras layers.
# However some users have layers that are fundamentally incompatible
# with the Functional API, which do not return tensors. In this
# case, we fall back to the legacy deferred behavior.
# TODO(fchollet): consider raising here, as we should not be
# supporting such layers.
self._init_graph_network(inputs, outputs)
self._graph_initialized = True
except: # pylint:disable=bare-except
self._use_legacy_deferred_behavior = True
self._inferred_input_shape = new_shape
@generic_utils.default
def build(self, input_shape=None):
if self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
else:
if input_shape is None:
raise ValueError('You must provide an `input_shape` argument.')
self._build_graph_network_for_inferred_shape(input_shape)
if not self.built:
input_shape = tuple(input_shape)
self._build_input_shape = input_shape
super(Sequential, self).build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name
# If applicable, update the static input shape of the model.
if not self._has_explicit_input_shape:
if not tensor_util.is_tf_type(inputs) and not isinstance(
inputs, np_arrays.ndarray):
# This is a Sequential with multiple inputs. This is technically an
# invalid use case of Sequential, but we tolerate it for backwards
# compatibility.
self._use_legacy_deferred_behavior = True
self._build_input_shape = nest.map_structure(_get_shape_tuple, inputs)
if tf2.enabled():
logging.warning('Layers in a Sequential model should only have a '
'single input tensor, but we receive a %s input: %s'
'\nConsider rewriting this model with the Functional '
'API.' % (type(inputs), inputs))
else:
self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype)
if self._graph_initialized:
if not self.built:
self._init_graph_network(self.inputs, self.outputs)
return super(Sequential, self).call(inputs, training=training, mask=mask)
outputs = inputs # handle the corner case where self.layers is empty
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and `outputs`
# are the outputs of `layer` applied to `inputs`. At the end of each
# iteration `inputs` is set to `outputs` to prepare for the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if 'mask' in argspec:
kwargs['mask'] = mask
if 'training' in argspec:
kwargs['training'] = training
outputs = layer(inputs, **kwargs)
if len(nest.flatten(outputs)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# `outputs` will be the inputs to the next layer.
inputs = outputs
mask = getattr(outputs, '_keras_mask', None)
return outputs
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
outputs = self.call(inputs, mask=mask) # pylint: disable=unexpected-keyword-arg
return getattr(outputs, '_keras_mask', None)
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Args:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
warnings.warn('`model.predict_proba()` is deprecated and '
'will be removed after 2021-01-01. '
'Please use `model.predict()` instead.')
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Args:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
warnings.warn('`model.predict_classes()` is deprecated and '
'will be removed after 2021-01-01. '
'Please use instead:'
'* `np.argmax(model.predict(x), axis=-1)`, '
' if your model does multi-class classification '
' (e.g. if it uses a `softmax` last-layer activation).'
'* `(model.predict(x) > 0.5).astype("int32")`, '
' if your model does binary classification '
' (e.g. if it uses a `sigmoid` last-layer activation).')
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
layer_configs = []
for layer in super(Sequential, self).layers:
# `super().layers` include the InputLayer if available (it is filtered out
# of `self.layers`). Note that `self._self_tracked_trackables` is managed
# by the tracking infrastructure and should not be used.
layer_configs.append(generic_utils.serialize_keras_object(layer))
config = {
'name': self.name,
'layers': copy.deepcopy(layer_configs)
}
if not self._is_graph_network and self._build_input_shape is not None:
config['build_input_shape'] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'name' in config:
name = config['name']
build_input_shape = config.get('build_input_shape')
layer_configs = config['layers']
else:
name = None
build_input_shape = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
layer = layer_module.deserialize(layer_config,
custom_objects=custom_objects)
model.add(layer)
if (not model.inputs and build_input_shape and
isinstance(build_input_shape, (tuple, list))):
model.build(build_input_shape)
return model
@property
def input_spec(self):
if hasattr(self, '_manual_input_spec'):
return self._manual_input_spec
if self.layers and hasattr(self.layers[0], 'input_spec'):
return self.layers[0].input_spec
return None
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
@property
def _trackable_saved_model_saver(self):
return model_serialization.SequentialSavedModelSaver(self)
def _is_layer_name_unique(self, layer):
for ref_layer in self.layers:
if layer.name == ref_layer.name and ref_layer is not layer:
return False
return True
def _assert_weights_created(self):
if self._graph_initialized:
return
# When the graph has not been initialized, use the Model's implementation to
# to check if the weights has been created.
super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call
def _get_shape_tuple(t):
if hasattr(t, 'shape'):
shape = t.shape
if isinstance(shape, tuple):
return shape
if shape.rank is not None:
return tuple(shape.as_list())
return None
return None
def relax_input_shape(shape_1, shape_2):
if shape_1 is None or shape_2 is None:
return None
if len(shape_1) != len(shape_2):
return None
return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))
def clear_previously_created_nodes(layer, created_nodes):
"""Remove nodes from `created_nodes` from the layer's inbound_nodes."""
for node in layer._inbound_nodes:
prev_layers = node.inbound_layers
for prev_layer in nest.flatten(prev_layers):
prev_layer._outbound_nodes = [
n for n in prev_layer._outbound_nodes
if n not in created_nodes]
layer._inbound_nodes = [
n for n in layer._inbound_nodes if n not in created_nodes]
def track_nodes_created_by_last_call(layer, created_nodes):
"""Adds to `created_nodes` the nodes created by the last call to `layer`."""
if not layer._inbound_nodes:
return
created_nodes.add(layer._inbound_nodes[-1])
prev_layers = layer._inbound_nodes[-1].inbound_layers
for prev_layer in nest.flatten(prev_layers):
if prev_layer._outbound_nodes:
created_nodes.add(prev_layer._outbound_nodes[-1])
| Sequential |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/records/groups_records_builder.py | {
"start": 196,
"end": 492
} | class ____(ZendeskSupportRecordBuilder):
@classmethod
def groups_record(cls) -> "GroupsRecordBuilder":
record_template = cls.extract_record("groups", __file__, NestedPath(["groups", 0]))
return cls(record_template, FieldPath("id"), FieldPath("updated_at"))
| GroupsRecordBuilder |
python | scikit-image__scikit-image | benchmarks/benchmark_peak_local_max.py | {
"start": 437,
"end": 1103
} | class ____:
def setup(self):
mask = np.zeros([500, 500], dtype=bool)
x, y = np.indices((500, 500))
x_c = x // 20 * 20 + 10
y_c = y // 20 * 20 + 10
mask[(x - x_c) ** 2 + (y - y_c) ** 2 < 8**2] = True
# create a mask, label each disk,
self.labels, num_objs = ndi.label(mask)
# create distance image for peak searching
self.dist = ndi.distance_transform_edt(mask)
def time_peak_local_max(self):
peak_local_max(
self.dist,
labels=self.labels,
min_distance=20,
exclude_border=False,
**peak_kwargs,
)
| PeakLocalMaxSuite |
python | sqlalchemy__sqlalchemy | test/orm/test_mapper.py | {
"start": 82658,
"end": 86296
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"cartographers",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("alias", String(50)),
Column("quip", String(100)),
)
Table(
"maps",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("cart_id", Integer, ForeignKey("cartographers.id")),
Column("state", String(2)),
Column("data", sa.Text),
)
@classmethod
def setup_classes(cls):
class Cartographer(cls.Basic):
pass
class Map(cls.Basic):
pass
def test_mappish(self):
maps, Cartographer, cartographers, Map = (
self.tables.maps,
self.classes.Cartographer,
self.tables.cartographers,
self.classes.Map,
)
self.mapper(
Cartographer,
cartographers,
properties=dict(query=cartographers.c.quip),
)
self.mapper(
Map,
maps,
properties=dict(mapper=relationship(Cartographer, backref="maps")),
)
c = Cartographer(
name="Lenny", alias="The Dude", query="Where be dragons?"
)
Map(state="AK", mapper=c)
sess = fixture_session()
sess.add(c)
sess.flush()
sess.expunge_all()
for C, M in (
(Cartographer, Map),
(sa.orm.aliased(Cartographer), sa.orm.aliased(Map)),
):
c1 = (
sess.query(C)
.filter(C.alias == "The Dude")
.filter(C.query == "Where be dragons?")
).one()
sess.query(M).filter(M.mapper == c1).one()
def test_direct_stateish(self):
for reserved in (
sa.orm.instrumentation.ClassManager.STATE_ATTR,
sa.orm.instrumentation.ClassManager.MANAGER_ATTR,
):
t = Table(
"t",
sa.MetaData(),
Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(reserved, Integer),
)
class T:
pass
clear_mappers()
assert_raises_message(
KeyError,
(
"%r: requested attribute name conflicts with "
"instrumentation attribute of the same name." % reserved
),
self.mapper_registry.map_imperatively,
T,
t,
)
def test_indirect_stateish(self):
maps = self.tables.maps
for reserved in (
sa.orm.instrumentation.ClassManager.STATE_ATTR,
sa.orm.instrumentation.ClassManager.MANAGER_ATTR,
):
class M:
pass
clear_mappers()
assert_raises_message(
KeyError,
(
"requested attribute name conflicts with "
"instrumentation attribute of the same name"
),
self.mapper_registry.map_imperatively,
M,
maps,
properties={reserved: maps.c.state},
)
| MagicNamesTest |
python | huggingface__transformers | src/transformers/models/dac/modeling_dac.py | {
"start": 10054,
"end": 11153
} | class ____(nn.Module):
"""Decoder block used in DAC decoder."""
def __init__(self, config: DacConfig, stride: int = 1, stride_index: int = 1):
super().__init__()
input_dim = config.decoder_hidden_size // 2**stride_index
output_dim = config.decoder_hidden_size // 2 ** (stride_index + 1)
self.snake1 = Snake1d(input_dim)
self.conv_t1 = nn.ConvTranspose1d(
input_dim,
output_dim,
kernel_size=2 * stride,
stride=stride,
padding=math.ceil(stride / 2),
)
self.res_unit1 = DacResidualUnit(output_dim, dilation=1)
self.res_unit2 = DacResidualUnit(output_dim, dilation=3)
self.res_unit3 = DacResidualUnit(output_dim, dilation=9)
def forward(self, hidden_state):
hidden_state = self.snake1(hidden_state)
hidden_state = self.conv_t1(hidden_state)
hidden_state = self.res_unit1(hidden_state)
hidden_state = self.res_unit2(hidden_state)
hidden_state = self.res_unit3(hidden_state)
return hidden_state
| DacDecoderBlock |
python | langchain-ai__langchain | libs/core/langchain_core/tracers/event_stream.py | {
"start": 1466,
"end": 2342
} | class ____(TypedDict):
"""Information about a run.
This is used to keep track of the metadata associated with a run.
"""
name: str
"""The name of the run."""
tags: list[str]
"""The tags associated with the run."""
metadata: dict[str, Any]
"""The metadata associated with the run."""
run_type: str
"""The type of the run."""
inputs: NotRequired[Any]
"""The inputs to the run."""
parent_run_id: UUID | None
"""The ID of the parent run."""
def _assign_name(name: str | None, serialized: dict[str, Any] | None) -> str:
"""Assign a name to a run."""
if name is not None:
return name
if serialized is not None:
if "name" in serialized:
return serialized["name"]
if "id" in serialized:
return serialized["id"][-1]
return "Unnamed"
T = TypeVar("T")
| RunInfo |
python | ZoranPandovski__al-go-rithms | data_structures/Linked_list/Python/linked_list.py | {
"start": 40,
"end": 3521
} | class ____:
class Node:
next_node = None
data = None
def compare_to(self, node):
return node.data == self.data
def __init__(self, data, next_node):
self.data = data
self.next_node = next_node
class NoSuchNodeException(Exception):
pass
head = None
list_size = 0
def __init__(self):
pass
def size(self):
"""
This methods returns the size of the linked list.
:return: The size of the linked list. That is to say; the amount of elements in the linked list.
"""
return self.list_size
def is_empty(self):
"""
This methods determines whether the list is empty, which means it holds no values.
:return: True if the list is empty; False otherwise.
"""
return self.list_size == 0
def get_first(self):
"""
This method returns the first element of the list.
:return: The first element of the list.
"""
if self.is_empty():
raise self.NoSuchNodeException()
return self.head.data
def getNode(self, i):
"""
This method takes an index and finds the node residing at the given index.
Raises an NoSuchNodeException() if there is no such element.
:param i: The index of the node.
:return: The ith node of the list.
"""
if i < 0 or i > self.list_size - 1:
raise self.NoSuchNodeException()
current = 0
p = self.head
while current < i:
p = p.next_node
current += 1
return p
def add_first(self, data):
"""
This method adds a node to the front of the list.
:param data: The data of the node to add.
"""
node = self.Node(data, self.head)
self.head = node
self.list_size += 1
def add_last(self, data):
"""
This method adds a node to the back of the list.
:param data: The data of the node to add.
"""
node = self.Node(data, None)
if self.is_empty():
self.head = node
else:
tail = self.getNode(self.list_size - 1)
tail.next_node = node
self.list_size += 1
def remove_first(self):
"""
This method removes the first element of the list.
:return: The value of the first element.
"""
if self.is_empty():
raise self.NoSuchNodeException()
tmp_val = self.head.data
self.head = self.head.next_node
self.list_size -= 1
return tmp_val
def remove_last(self):
"""
This method removes the last element of the list.
:return: The value of the removed element.
"""
if self.is_empty():
raise self.NoSuchNodeException()
tail = self.getNode(self.list_size - 1)
tail_data = tail.data
if self.list_size == 1:
self.head = None
else:
before_tail = self.getNode(self.list_size - 2)
before_tail.next_node = None
self.list_size -= 1
return tail_data
def print_list(self):
"""
This method prints a list which a newline after each element.
"""
p = self.head
i = 0
while i < self.size():
print(p.data)
i += 1
p = p.next_node
| LinkedList |
python | scipy__scipy | scipy/stats/tests/test_sampling.py | {
"start": 12246,
"end": 16126
} | class ____:
def test_input_validation(self, method):
match = "`qmc_engine` must be an instance of..."
with pytest.raises(ValueError, match=match):
Method = getattr(stats.sampling, method)
gen = Method(StandardNormal())
gen.qrvs(qmc_engine=0)
# issues with QMCEngines and old NumPy
Method = getattr(stats.sampling, method)
gen = Method(StandardNormal())
match = "`d` must be consistent with dimension of `qmc_engine`."
with pytest.raises(ValueError, match=match):
gen.qrvs(d=3, qmc_engine=stats.qmc.Halton(2))
qrngs = [None, stats.qmc.Sobol(1, seed=0), stats.qmc.Halton(3, seed=0)]
# `size=None` should not add anything to the shape, `size=1` should
sizes = [(None, tuple()), (1, (1,)), (4, (4,)),
((4,), (4,)), ((2, 4), (2, 4))] # type: ignore
# Neither `d=None` nor `d=1` should add anything to the shape
ds = [(None, tuple()), (1, tuple()), (3, (3,))]
@pytest.mark.parametrize('qrng', qrngs)
@pytest.mark.parametrize('size_in, size_out', sizes)
@pytest.mark.parametrize('d_in, d_out', ds)
@pytest.mark.thread_unsafe(reason="fails in parallel")
def test_QRVS_shape_consistency(self, qrng, size_in, size_out,
d_in, d_out, method):
w32 = sys.platform == "win32" and platform.architecture()[0] == "32bit"
if w32 and method == "NumericalInversePolynomial":
pytest.xfail("NumericalInversePolynomial.qrvs fails for Win "
"32-bit")
dist = StandardNormal()
Method = getattr(stats.sampling, method)
gen = Method(dist)
# If d and qrng.d are inconsistent, an error is raised
if d_in is not None and qrng is not None and qrng.d != d_in:
match = "`d` must be consistent with dimension of `qmc_engine`."
with pytest.raises(ValueError, match=match):
gen.qrvs(size_in, d=d_in, qmc_engine=qrng)
return
# Sometimes d is really determined by qrng
if d_in is None and qrng is not None and qrng.d != 1:
d_out = (qrng.d,)
shape_expected = size_out + d_out
qrng2 = deepcopy(qrng)
qrvs = gen.qrvs(size=size_in, d=d_in, qmc_engine=qrng)
if size_in is not None:
assert qrvs.shape == shape_expected
if qrng2 is not None:
uniform = qrng2.random(np.prod(size_in) or 1)
qrvs2 = stats.norm.ppf(uniform).reshape(shape_expected)
assert_allclose(qrvs, qrvs2, atol=1e-12)
def test_QRVS_size_tuple(self, method):
# QMCEngine samples are always of shape (n, d). When `size` is a tuple,
# we set `n = prod(size)` in the call to qmc_engine.random, transform
# the sample, and reshape it to the final dimensions. When we reshape,
# we need to be careful, because the _columns_ of the sample returned
# by a QMCEngine are "independent"-ish, but the elements within the
# columns are not. We need to make sure that this doesn't get mixed up
# by reshaping: qrvs[..., i] should remain "independent"-ish of
# qrvs[..., i+1], but the elements within qrvs[..., i] should be
# transformed from the same low-discrepancy sequence.
dist = StandardNormal()
Method = getattr(stats.sampling, method)
gen = Method(dist)
size = (3, 4)
d = 5
qrng = stats.qmc.Halton(d, seed=0)
qrng2 = stats.qmc.Halton(d, seed=0)
uniform = qrng2.random(np.prod(size))
qrvs = gen.qrvs(size=size, d=d, qmc_engine=qrng)
qrvs2 = stats.norm.ppf(uniform)
for i in range(d):
sample = qrvs[..., i]
sample2 = qrvs2[:, i].reshape(size)
assert_allclose(sample, sample2, atol=1e-12)
| TestQRVS |
python | weaviate__weaviate-python-client | weaviate/users/base.py | {
"start": 7165,
"end": 9534
} | class ____(Generic[ConnectionType], _BaseExecutor[ConnectionType]):
@overload
def get_assigned_roles(
self, *, user_id: str, include_permissions: Literal[False] = False
) -> executor.Result[Dict[str, RoleBase]]: ...
@overload
def get_assigned_roles(
self, *, user_id: str, include_permissions: Literal[True]
) -> executor.Result[Dict[str, Role]]: ...
@overload
def get_assigned_roles(
self,
*,
user_id: str,
include_permissions: bool = False,
) -> executor.Result[Union[Dict[str, Role], Dict[str, RoleBase]]]: ...
def get_assigned_roles(
self,
*,
user_id: str,
include_permissions: bool = False,
) -> executor.Result[Union[Dict[str, Role], Dict[str, RoleBase]]]:
"""Get the roles assigned to a user specific to the configured OIDC's dynamic auth functionality.
Args:
user_id: The user ID to get the roles for.
Returns:
A dictionary with role names as keys and the `Role` objects as values.
"""
return self._get_roles_of_user(
user_id,
USER_TYPE_OIDC,
include_permissions,
)
def assign_roles(
self,
*,
user_id: str,
role_names: Union[str, List[str]],
) -> executor.Result[None]:
"""Assign roles to a user specific to the configured OIDC's dynamic auth functionality.
Args:
role_names: The names of the roles to assign to the user.
user_id: The user to assign the roles to.
"""
return self._assign_roles_to_user(
[role_names] if isinstance(role_names, str) else role_names,
user_id,
USER_TYPE_OIDC,
)
def revoke_roles(
self,
*,
user_id: str,
role_names: Union[str, List[str]],
) -> executor.Result[None]:
"""Revoke roles from a user specific to the configured OIDC's dynamic auth functionality.
Args:
role_names: The names of the roles to revoke from the user.
user_id: The user to revoke the roles from.
"""
return self._revoke_roles_from_user(
[role_names] if isinstance(role_names, str) else role_names,
user_id,
USER_TYPE_OIDC,
)
| _UsersOIDCExecutor |
python | protocolbuffers__protobuf | python/google/protobuf/internal/well_known_types_test.py | {
"start": 1392,
"end": 1942
} | class ____(parameterized.TestCase):
def CheckTimestampConversion(self, message, text):
self.assertEqual(text, message.ToJsonString())
parsed_message = timestamp_pb2.Timestamp()
parsed_message.FromJsonString(text)
self.assertEqual(message, parsed_message)
def CheckDurationConversion(self, message, text):
self.assertEqual(text, message.ToJsonString())
parsed_message = duration_pb2.Duration()
parsed_message.FromJsonString(text)
self.assertEqual(message, parsed_message)
@testing_refleaks.TestCase
| TimeUtilTestBase |
python | PrefectHQ__prefect | src/integrations/prefect-docker/tests/test_containers.py | {
"start": 273,
"end": 1012
} | class ____:
async def test_create_kwargs(self, mock_docker_host: MagicMock):
create_kwargs = dict(
image="test_image",
command="test_command",
name="test_name",
detach=False,
ports={"2222/tcp": 3333},
entrypoint=None,
environment=None,
)
with disable_run_logger():
container = await create_docker_container.fn(
docker_host=mock_docker_host, **create_kwargs
)
assert container.id == "id_1"
client = mock_docker_host.get_client()
client.__enter__.return_value.containers.create.assert_called_once_with(
**create_kwargs
)
| TestCreateDockerContainer |
python | openai__openai-python | src/openai/types/beta/realtime/input_audio_buffer_append_event.py | {
"start": 233,
"end": 662
} | class ____(BaseModel):
audio: str
"""Base64-encoded audio bytes.
This must be in the format specified by the `input_audio_format` field in the
session configuration.
"""
type: Literal["input_audio_buffer.append"]
"""The event type, must be `input_audio_buffer.append`."""
event_id: Optional[str] = None
"""Optional client-generated ID used to identify this event."""
| InputAudioBufferAppendEvent |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/relationships/tutorial001_py310.py | {
"start": 468,
"end": 588
} | class ____(SQLModel):
id: int | None = None
name: str | None = None
headquarters: str | None = None
| TeamUpdate |
python | pypa__warehouse | warehouse/banners/models.py | {
"start": 235,
"end": 1039
} | class ____(db.Model):
__tablename__ = "banners"
__repr__ = make_repr("text")
DEFAULT_FA_ICON = "fa-comment-alt"
DEFAULT_BTN_LABEL = "See more"
# internal name
name: Mapped[str]
# banner display configuration
text: Mapped[str]
link_url: Mapped[str]
link_label: Mapped[str] = mapped_column(default=DEFAULT_BTN_LABEL)
fa_icon: Mapped[str] = mapped_column(default=DEFAULT_FA_ICON)
dismissable: Mapped[bool_false]
# visibility control
# TODO: Migrate to `warehouse.utils.db.types.bool_false` - triggers migration
active: Mapped[bool] = mapped_column(default=False)
end: Mapped[date]
@property
def is_live(self):
# date.today is using the server timezone which is UTC
return self.active and date.today() <= self.end
| Banner |
python | pytorch__pytorch | test/distributed/test_control_collectives.py | {
"start": 621,
"end": 7398
} | class ____(TestCase):
def test_barrier(self) -> None:
store = dist.HashStore()
world_size = 2
def f(rank: int) -> None:
collectives = dist._StoreCollectives(store, rank, world_size)
collectives.barrier("foo", timedelta(seconds=10), True)
with ThreadPool(world_size) as pool:
pool.map(f, range(world_size))
def test_broadcast(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(seconds=10)
def f(rank: int) -> None:
collectives = dist._StoreCollectives(store, rank, world_size)
if rank == 2:
collectives.broadcast_send("foo", b"data", timeout)
else:
out = collectives.broadcast_recv("foo", timeout)
self.assertEqual(out, b"data")
with ThreadPool(world_size) as pool:
pool.map(f, range(world_size))
def test_gather(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(seconds=10)
def f(rank: int) -> None:
collectives = dist._StoreCollectives(store, rank, world_size)
if rank == 2:
out = collectives.gather_recv("foo", str(rank), timeout)
self.assertEqual(out, [b"0", b"1", b"2", b"3"])
else:
collectives.gather_send("foo", str(rank), timeout)
with ThreadPool(world_size) as pool:
pool.map(f, range(world_size))
def test_scatter(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(seconds=10)
def f(rank: int) -> None:
collectives = dist._StoreCollectives(store, rank, world_size)
if rank == 2:
out = collectives.scatter_send(
"foo", [str(i) for i in range(world_size)], timeout
)
else:
out = collectives.scatter_recv("foo", timeout)
self.assertEqual(out, str(rank).encode())
with ThreadPool(world_size) as pool:
pool.map(f, range(world_size))
def test_all_sum(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(seconds=10)
def f(rank: int) -> None:
collectives = dist._StoreCollectives(store, rank, world_size)
out = collectives.all_sum("foo", rank, timeout)
self.assertEqual(out, sum(range(world_size)))
with ThreadPool(world_size) as pool:
pool.map(f, range(world_size))
def test_broadcast_timeout(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(milliseconds=1)
collectives = dist._StoreCollectives(store, 1, world_size)
with self.assertRaisesRegex(Exception, "Wait timeout"):
collectives.broadcast_recv("foo", timeout)
def test_gather_timeout(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(milliseconds=1)
collectives = dist._StoreCollectives(store, 1, world_size)
with self.assertRaisesRegex(
Exception, "gather failed -- missing ranks: 0, 2, 3"
):
collectives.gather_recv("foo", "data", timeout)
def test_scatter_timeout(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(milliseconds=1)
collectives = dist._StoreCollectives(store, 1, world_size)
with self.assertRaisesRegex(Exception, "Wait timeout"):
collectives.scatter_recv("foo", timeout)
def test_all_gather_timeout(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(milliseconds=1)
collectives = dist._StoreCollectives(store, 1, world_size)
with self.assertRaisesRegex(
Exception, "all_gather failed -- missing ranks: 0, 2, 3"
):
collectives.all_gather("foo", "data", timeout)
def test_barrier_timeout(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(milliseconds=1)
collectives = dist._StoreCollectives(store, 1, world_size)
with self.assertRaisesRegex(
Exception, "barrier failed -- missing ranks: 0, 2, 3"
):
collectives.barrier("foo", timeout, True)
def test_all_sum_timeout(self) -> None:
store = dist.HashStore()
world_size = 4
timeout = timedelta(milliseconds=1)
collectives = dist._StoreCollectives(store, 1, world_size)
with self.assertRaisesRegex(
Exception, "barrier failed -- missing ranks: 0, 2, 3"
):
collectives.all_sum("foo", 1, timeout)
def test_unique(self) -> None:
store = dist.HashStore()
collectives = dist._StoreCollectives(store, 1, 1)
collectives.broadcast_send("foo", "bar")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.broadcast_send("foo", "bar")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.broadcast_recv("foo")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.gather_send("foo", "bar")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.gather_recv("foo", "asdf")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.scatter_send("foo", ["asdf"])
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.scatter_recv("foo")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.all_gather("foo", "bar")
with self.assertRaisesRegex(Exception, "Key foo has already been used"):
collectives.all_sum("foo", 2)
def test_simple_user_func(self) -> None:
store = dist.HashStore()
world_size = 4
def f(rank: int) -> None:
# user need to create child collectives
# but simple_user_func do not need to be changed for different child collectives
store_collectives = dist._StoreCollectives(store, rank, world_size)
out = simple_user_func(store_collectives, rank)
self.assertEqual(out, sum(range(world_size)))
with ThreadPool(world_size) as pool:
pool.map(f, range(world_size))
if __name__ == "__main__":
assert not torch.cuda._initialized, (
"test_distributed must not have initialized CUDA context on main process"
)
run_tests()
| TestCollectives |
python | ansible__ansible | lib/ansible/plugins/connection/psrp.py | {
"start": 10819,
"end": 30847
} | class ____(ConnectionBase):
transport = 'psrp'
module_implementation_preferences = ('.ps1', '.exe', '')
allow_executable = False
has_pipelining = True
# Satisfies mypy as this connection only ever runs with this plugin
_shell: PowerShellPlugin
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
self.always_pipeline_modules = True
self.has_native_async = True
self.runspace: RunspacePool | None = None
self.host: PSHost | None = None
self._last_pipeline: PowerShell | None = None
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
if not C.DEFAULT_DEBUG:
logging.getLogger('pypsrp').setLevel(logging.WARNING)
logging.getLogger('requests_credssp').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
def _connect(self) -> Connection:
if not HAS_PYPSRP:
raise AnsibleError("pypsrp or dependencies are not installed: %s"
% to_native(PYPSRP_IMP_ERR))
super(Connection, self)._connect()
self._build_kwargs()
display.vvv("ESTABLISH PSRP CONNECTION FOR USER: %s ON PORT %s TO %s" %
(self._psrp_user, self._psrp_port, self._psrp_host),
host=self._psrp_host)
if not self.runspace:
connection = WSMan(**self._psrp_conn_kwargs)
# create our pseudo host to capture the exit code and host output
host_ui = PSHostUserInterface()
self.host = PSHost(None, None, False, "Ansible PSRP Host", None,
host_ui, None)
self.runspace = RunspacePool(
connection, host=self.host,
configuration_name=self._psrp_configuration_name
)
display.vvvvv(
"PSRP OPEN RUNSPACE: auth=%s configuration=%s endpoint=%s" %
(self._psrp_auth, self._psrp_configuration_name,
connection.transport.endpoint), host=self._psrp_host
)
try:
self.runspace.open()
except AuthenticationError as e:
raise AnsibleConnectionFailure("failed to authenticate with "
"the server: %s" % to_native(e))
except WinRMError as e:
raise AnsibleConnectionFailure(
"psrp connection failure during runspace open: %s"
% to_native(e)
)
except (ConnectionError, ConnectTimeout) as e:
raise AnsibleConnectionFailure(
"Failed to connect to the host via PSRP: %s"
% to_native(e)
)
self._connected = True
self._last_pipeline = None
return self
def reset(self) -> None:
if not self._connected:
self.runspace = None
return
# Try out best to ensure the runspace is closed to free up server side resources
try:
self.close()
except Exception as e:
# There's a good chance the connection was already closed so just log the error and move on
display.debug("PSRP reset - failed to closed runspace: %s" % to_text(e))
display.vvvvv("PSRP: Reset Connection", host=self._psrp_host)
self.runspace = None
self._connect()
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
super(Connection, self).exec_command(cmd, in_data=in_data,
sudoable=sudoable)
pwsh_in_data: bytes | str | None = None
script_args: list[str] | None = None
common_args_prefix = " ".join(_common_args)
if cmd.startswith(f"{common_args_prefix} -EncodedCommand"):
# This is a PowerShell script encoded by the shell plugin, we will
# decode the script and execute it in the runspace instead of
# starting a new interpreter to save on time
b_command = base64.b64decode(cmd.split(" ")[-1])
script = to_text(b_command, 'utf-16-le')
pwsh_in_data = to_text(in_data, errors="surrogate_or_strict", nonstring="passthru")
if pwsh_in_data and isinstance(pwsh_in_data, str) and pwsh_in_data.startswith("#!"):
# ANSIBALLZ wrapper, we need to get the interpreter and execute
# that as the script - note this won't work as basic.py relies
# on packages not available on Windows, once fixed we can enable
# this path
interpreter = to_native(pwsh_in_data.splitlines()[0][2:])
# script = "$input | &'%s' -" % interpreter
raise AnsibleError("cannot run the interpreter '%s' on the psrp "
"connection plugin" % interpreter)
# call build_module_command to get the bootstrap wrapper text
bootstrap_wrapper = self._shell.build_module_command('', '', '')
if bootstrap_wrapper == cmd:
# Do not display to the user each invocation of the bootstrap wrapper
display.vvv("PSRP: EXEC (via pipeline wrapper)")
else:
display.vvv("PSRP: EXEC %s" % script, host=self._psrp_host)
elif cmd.startswith(f"{common_args_prefix} -File "): # trailing space is on purpose
# Used when executing a script file, we will execute it in the runspace process
# instead on a new subprocess
script = 'param([string]$Path, [Parameter(ValueFromRemainingArguments)][string[]]$ScriptArgs) & $Path @ScriptArgs'
# Using shlex isn't perfect but it's good enough.
cmd = cmd[len(common_args_prefix) + 7:]
script_args = shlex.split(cmd)
display.vvv(f"PSRP: EXEC {cmd}")
else:
# In other cases we want to execute the cmd as the script. We add on the 'exit $LASTEXITCODE' to ensure the
# rc is propagated back to the connection plugin.
script = to_text(u"%s\nexit $LASTEXITCODE" % cmd)
pwsh_in_data = in_data
display.vvv(u"PSRP: EXEC %s" % script, host=self._psrp_host)
try:
rc, stdout, stderr = self._exec_psrp_script(
script=script,
input_data=pwsh_in_data.splitlines() if pwsh_in_data else None,
arguments=script_args,
)
except ReadTimeout as e:
raise AnsibleConnectionFailure(
"HTTP read timeout during PSRP script execution"
) from e
return rc, stdout, stderr
def put_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._psrp_host)
script, in_data = _bootstrap_powershell_script('psrp_put_file.ps1', {
'Path': out_path,
}, has_input=True)
# Get the buffer size of each fragment to send, subtract 82 for the fragment, message, and other header info
# fields that PSRP adds. Adjust to size of the base64 encoded bytes length.
buffer_size = int((self.runspace.connection.max_payload_size - 82) / 4 * 3)
sha1_hash = sha1()
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
if not os.path.exists(b_in_path):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
def read_gen():
yield from in_data.decode().splitlines()
offset = 0
with open(b_in_path, 'rb') as src_fd:
for b_data in iter((lambda: src_fd.read(buffer_size)), b""):
data_len = len(b_data)
offset += data_len
sha1_hash.update(b_data)
# PSRP technically supports sending raw bytes but that method requires a larger CLIXML message.
# Sending base64 is still more efficient here.
display.vvvvv("PSRP PUT %s to %s (offset=%d, size=%d" % (in_path, out_path, offset, data_len),
host=self._psrp_host)
b64_data = base64.b64encode(b_data)
yield [to_text(b64_data)]
if offset == 0: # empty file
yield [""]
rc, stdout, stderr = self._exec_psrp_script(script, read_gen())
if rc != 0:
raise AnsibleError(to_native(stderr))
put_output = json.loads(to_text(stdout))
local_sha1 = sha1_hash.hexdigest()
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned, stdout: '%s', stderr: '%s'"
% (to_native(stdout), to_native(stderr)))
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash %s does not match local hash %s"
% (to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path),
host=self._psrp_host)
out_path = out_path.replace('\\', '/')
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
# because we are dealing with base64 data we need to get the max size
# of the bytes that the base64 size would equal
max_b64_size = int(self.runspace.connection.max_payload_size -
(self.runspace.connection.max_payload_size / 4 * 3))
buffer_size = max_b64_size - (max_b64_size % 1024)
script, in_data = _bootstrap_powershell_script('psrp_fetch_file.ps1', {
'Path': in_path,
'BufferSize': buffer_size,
})
ps = PowerShell(self.runspace)
ps.add_script(script)
ps.begin_invoke(in_data.decode().splitlines())
# Call poll once to get the first output telling us if it's a file/dir/failure
ps.poll_invoke()
if ps.output:
if ps.output.pop(0) == '[DIR]':
# to be consistent with other connection plugins, we assume the caller has created the target dir
return
with open(b_out_path, 'wb') as out_file:
while True:
while ps.output:
data = base64.b64decode(ps.output.pop(0))
out_file.write(data)
if ps.state == PSInvocationState.RUNNING:
ps.poll_invoke()
else:
break
ps.end_invoke()
rc, stdout, stderr = self._parse_pipeline_result(ps)
if rc != 0:
raise AnsibleError(f"failed to transfer file to '{out_path}': {to_text(stderr)}")
def close(self) -> None:
if self.runspace and self.runspace.state == RunspacePoolState.OPENED:
display.vvvvv("PSRP CLOSE RUNSPACE: %s" % (self.runspace.id),
host=self._psrp_host)
self.runspace.close()
self.runspace = None
self._connected = False
self._last_pipeline = None
def _build_kwargs(self) -> None:
self._psrp_host = self.get_option('remote_addr')
self._psrp_user = self.get_option('remote_user')
protocol = self.get_option('protocol')
port = self.get_option('port')
if protocol is None and port is None:
protocol = 'https'
port = 5986
elif protocol is None:
protocol = 'https' if int(port) != 5985 else 'http'
elif port is None:
port = 5986 if protocol == 'https' else 5985
self._psrp_port = int(port)
self._psrp_auth = self.get_option('auth')
self._psrp_configuration_name = self.get_option('configuration_name')
# cert validation can either be a bool or a path to the cert
cert_validation = self.get_option('cert_validation')
cert_trust_path = self.get_option('ca_cert')
if cert_validation == 'ignore':
psrp_cert_validation = False
elif cert_trust_path is not None:
psrp_cert_validation = cert_trust_path
else:
psrp_cert_validation = True
self._psrp_conn_kwargs = dict(
server=self._psrp_host,
port=self._psrp_port,
username=self._psrp_user,
password=self.get_option('remote_password'),
ssl=protocol == 'https',
path=self.get_option('path'),
auth=self._psrp_auth,
cert_validation=psrp_cert_validation,
connection_timeout=self.get_option('connection_timeout'),
encryption=self.get_option('message_encryption'),
proxy=self.get_option('proxy'),
no_proxy=boolean(self.get_option('ignore_proxy')),
max_envelope_size=self.get_option('max_envelope_size'),
operation_timeout=self.get_option('operation_timeout'),
read_timeout=self.get_option('read_timeout'),
reconnection_retries=self.get_option('reconnection_retries'),
reconnection_backoff=float(self.get_option('reconnection_backoff')),
certificate_key_pem=self.get_option('certificate_key_pem'),
certificate_pem=self.get_option('certificate_pem'),
credssp_auth_mechanism=self.get_option('credssp_auth_mechanism'),
credssp_disable_tlsv1_2=self.get_option('credssp_disable_tlsv1_2'),
credssp_minimum_version=self.get_option('credssp_minimum_version'),
negotiate_send_cbt=self.get_option('negotiate_send_cbt'),
negotiate_delegate=self.get_option('negotiate_delegate'),
negotiate_hostname_override=self.get_option('negotiate_hostname_override'),
negotiate_service=self.get_option('negotiate_service'),
)
def _exec_psrp_script(
self,
script: str,
input_data: bytes | str | t.Iterable | None = None,
use_local_scope: bool = True,
arguments: t.Iterable[t.Any] | None = None,
) -> tuple[int, bytes, bytes]:
# Check if there's a command on the current pipeline that still needs to be closed.
if self._last_pipeline:
# Current pypsrp versions raise an exception if the current state was not RUNNING. We manually set it so we
# can call stop without any issues.
self._last_pipeline.state = PSInvocationState.RUNNING
self._last_pipeline.stop()
self._last_pipeline = None
ps = PowerShell(self.runspace)
ps.add_script(script, use_local_scope=use_local_scope)
if arguments:
for arg in arguments:
ps.add_argument(arg)
ps.invoke(input=input_data)
rc, stdout, stderr = self._parse_pipeline_result(ps)
# We should really call .stop() on all pipelines that are run to decrement the concurrent command counter on
# PSSession but that involves another round trip and is done when the runspace is closed. We instead store the
# last pipeline which is closed if another command is run on the runspace.
self._last_pipeline = ps
return rc, stdout, stderr
def _parse_pipeline_result(self, pipeline: PowerShell) -> tuple[int, bytes, bytes]:
"""
PSRP doesn't have the same concept as other protocols with its output.
We need some extra logic to convert the pipeline streams and host
output into the format that Ansible understands.
:param pipeline: The finished PowerShell pipeline that invoked our
commands
:return: rc, stdout, stderr based on the pipeline output
"""
# we try and get the rc from our host implementation, this is set if
# exit or $host.SetShouldExit() is called in our pipeline, if not we
# set to 0 if the pipeline had not errors and 1 if it did
rc = self.host.rc or (1 if pipeline.had_errors else 0)
# TODO: figure out a better way of merging this with the host output
stdout_list = []
for output in pipeline.output:
# Not all pipeline outputs are a string or contain a __str__ value,
# we will create our own output based on the properties of the
# complex object if that is the case.
if isinstance(output, GenericComplexObject) and output.to_string is None:
obj_lines = output.property_sets
for key, value in output.adapted_properties.items():
obj_lines.append(u"%s: %s" % (key, value))
for key, value in output.extended_properties.items():
obj_lines.append(u"%s: %s" % (key, value))
output_msg = u"\n".join(obj_lines)
else:
output_msg = to_text(output, nonstring='simplerepr')
stdout_list.append(output_msg)
if len(self.host.ui.stdout) > 0:
stdout_list += self.host.ui.stdout
stdout = u"\r\n".join(stdout_list)
stderr_list = []
for error in pipeline.streams.error:
# the error record is not as fully fleshed out like we usually get
# in PS, we will manually create it here
# NativeCommandError and NativeCommandErrorMessage are special
# cases used for stderr from a subprocess, we will just print the
# error message
if error.fq_error == 'NativeCommandErrorMessage' and not error.target_name:
# This can be removed once Server 2016 is EOL and no longer
# supported. PS 5.1 on 2016 will emit 1 error record under
# NativeCommandError being the first line, subsequent records
# are the raw stderr up to 4096 chars. Each entry is the raw
# stderr value without any newlines appended so we just use the
# value as is. We know it's 2016 as the target_name is empty in
# this scenario.
stderr_list.append(str(error))
continue
elif error.fq_error in ['NativeCommandError', 'NativeCommandErrorMessage']:
stderr_list.append(f"{error}\r\n")
continue
command_name = "%s : " % error.command_name if error.command_name else ''
position = "%s\r\n" % error.invocation_position_message if error.invocation_position_message else ''
error_msg = "%s%s\r\n%s" \
" + CategoryInfo : %s\r\n" \
" + FullyQualifiedErrorId : %s" \
% (command_name, str(error), position,
error.message, error.fq_error)
stacktrace = error.script_stacktrace
if display.verbosity >= 3 and stacktrace is not None:
error_msg += "\r\nStackTrace:\r\n%s" % stacktrace
stderr_list.append(f"{error_msg}\r\n")
if len(self.host.ui.stderr) > 0:
stderr_list += self.host.ui.stderr
stderr = "".join([to_text(o) for o in stderr_list])
display.vvvvv("PSRP RC: %d" % rc, host=self._psrp_host)
display.vvvvv("PSRP STDOUT: %s" % stdout, host=self._psrp_host)
display.vvvvv("PSRP STDERR: %s" % stderr, host=self._psrp_host)
# reset the host back output back to defaults, needed if running
# multiple pipelines on the same RunspacePool
self.host.rc = 0
self.host.ui.stdout = []
self.host.ui.stderr = []
return rc, to_bytes(stdout, encoding='utf-8'), to_bytes(stderr, encoding='utf-8')
| Connection |
python | pytorch__pytorch | test/inductor/test_torchinductor.py | {
"start": 29808,
"end": 484566
} | class ____:
def is_dtype_supported(self, dtype: torch.dtype) -> bool:
device_interface = get_interface_for_device(self.device)
return device_interface.is_dtype_supported(dtype)
def test_bool(self):
def fn(a, b):
return (
a + b,
a * b,
a & b,
a | b,
a ^ b,
torch.logical_and(a, b),
torch.logical_or(a, b),
torch.logical_not(a),
torch.sign(b),
)
self.common(
fn,
(
torch.tensor([True, False, True, False]),
torch.tensor([False, False, True, True]),
),
)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_dtype_device_layout(self):
ns = "aten"
op_name = "tril_indices"
dispatch_key = "CPU"
device = "cpu"
if self.device.lower() == "cuda":
dispatch_key = "CUDA"
device = "cuda"
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
row = 128
col = 256
offset = 1
dtype = torch.int32
layout = torch.strided
pin_memory = False
ref = torch.tril_indices(
row=row,
col=col,
offset=offset,
dtype=dtype,
layout=layout,
pin_memory=pin_memory,
device=device,
)
register_ops_with_aoti_compile(
ns, [op_name], dispatch_key, torch_compile_op_lib_impl
)
res = torch.tril_indices(
row=row,
col=col,
offset=offset,
dtype=dtype,
layout=layout,
pin_memory=pin_memory,
device=device,
)
self.assertEqual(ref, res)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_support_out(self):
ns = "aten"
op_name = "clamp"
dispatch_key = "CPU"
device = "cpu"
if self.device.lower() == "cuda":
dispatch_key = "CUDA"
device = "cuda"
inp_tensor = torch.randn(128, dtype=torch.float, device=device).fill_(1.0)
min_tensor = inp_tensor - 0.05
max_tensor = inp_tensor + 0.05
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
ref_out_tensor = torch.randn(128, dtype=torch.float, device=device).fill_(
-1
)
ref_tensor = torch.clamp(
max=max_tensor, min=min_tensor, input=inp_tensor, out=ref_out_tensor
)
ref_out_tensor1 = torch.randn(128, dtype=torch.float, device=device).fill_(
-1
)
ref_tensor1 = torch.clamp(
max=max_tensor, out=ref_out_tensor1, min=min_tensor, input=inp_tensor
)
register_ops_with_aoti_compile(
ns, [op_name], dispatch_key, torch_compile_op_lib_impl
)
res_out_tensor = torch.randn(128, dtype=torch.float, device=device).fill_(
-1
)
res_tensor = torch.clamp(
max=max_tensor, min=min_tensor, input=inp_tensor, out=res_out_tensor
)
self.assertEqual(ref_tensor, res_tensor)
self.assertEqual(ref_out_tensor, res_out_tensor)
res_out_tensor1 = torch.randn(128, dtype=torch.float, device=device).fill_(
-1
)
res_tensor1 = torch.clamp(
max=max_tensor, out=res_out_tensor1, min=min_tensor, input=inp_tensor
)
self.assertEqual(ref_tensor1, res_tensor1)
self.assertEqual(ref_out_tensor1, res_out_tensor1)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_support_str(self):
ns = "aten"
op_name = "div"
dispatch_key = "CPU"
device = "cpu"
if self.device.lower() == "cuda":
dispatch_key = "CUDA"
device = "cuda"
a = torch.randn(128, dtype=torch.float, device=device)
b = torch.randn(128, dtype=torch.float, device=device)
rounding_mode_list = ["trunc", "floor"]
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
# Get ref result from eager
ref_value_list = []
for rounding_mode in rounding_mode_list:
ref_value = getattr(torch.ops.aten, op_name)(
a, b, rounding_mode=rounding_mode
)
ref_value_list.append(ref_value)
register_ops_with_aoti_compile(
ns, [op_name], dispatch_key, torch_compile_op_lib_impl
)
# Invoke the pre-compiled kernel and get result.
res_value_list = []
for rounding_mode in rounding_mode_list:
res_value = getattr(torch.ops.aten, op_name)(
a, b, rounding_mode=rounding_mode
)
res_value_list.append(res_value)
for ref_value, res_value in zip(ref_value_list, res_value_list):
self.assertEqual(ref_value, res_value)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_cache_hit(self):
ns = "aten"
op_name = "abs"
dispatch_key = "CPU"
device = "cpu"
if self.device.lower() == "cuda":
dispatch_key = "CUDA"
device = "cuda"
input_tensor = torch.randn(128, dtype=torch.float, device=device)
kernel_lib_path = aoti_compile_with_persistent_cache(
ns,
op_name,
device,
False,
getattr(torch.ops.aten, op_name),
(input_tensor,),
{},
)
self.assertTrue(Path(kernel_lib_path).exists())
from unittest import mock
# Patch the aoti_compile_with_persistent_cache as None to ensure no new kernel is generated
with mock.patch(
"torch._inductor.aoti_eager.aoti_compile_with_persistent_cache", None
):
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
# Get ref result from eager
ref_value = getattr(torch.ops.aten, op_name)(input_tensor)
register_ops_with_aoti_compile(
ns, [op_name], dispatch_key, torch_compile_op_lib_impl
)
# Invoke the pre-compiled kernel and get result.
res_value = getattr(torch.ops.aten, op_name)(input_tensor)
self.assertEqual(ref_value, res_value)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_with_persistent_cache(self):
def fn(a):
return torch.abs(a)
ns = "aten"
op_name = "abs"
device = "cpu"
if self.device.lower() == "cuda":
device = "cuda"
input_tensor = torch.randn(128, dtype=torch.float, device=device)
kernel_lib_path = aoti_compile_with_persistent_cache(
ns,
op_name,
input_tensor.device.type,
False,
fn,
args=(input_tensor,),
kwargs={},
)
self.assertTrue(len(kernel_lib_path) > 0)
device_kernel_cache = aoti_eager_cache_dir(ns, device)
kernel_conf = device_kernel_cache / f"{op_name}.json"
self.assertTrue(kernel_conf.exists())
json_data = load_aoti_eager_cache("aten", "abs", input_tensor.device.type)
self.assertTrue(json_data is not None)
self.assertTrue(isinstance(json_data, list))
self.assertTrue(len(json_data) > 0)
op_info = json_data[0]
self.assertTrue(isinstance(op_info, dict))
self.assertTrue("meta_info" in op_info)
self.assertTrue("kernel_path" in op_info)
kernel_libs_abs_path = []
for item in json_data:
kernel_path = device_kernel_cache / item["kernel_path"]
kernel_libs_abs_path.append(kernel_path.as_posix())
self.assertTrue(kernel_lib_path in kernel_libs_abs_path)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_with_scalar(self):
namespace_name = "aten"
op_name = "add"
op_overload_name = "Tensor"
op_name_with_overload = f"{op_name}.{op_overload_name}"
dispatch_key = "CPU"
device = torch.device("cpu")
if self.device.lower() == "cuda":
dispatch_key = "CUDA"
device = torch.device("cuda")
# Test the difference between scalar tensor and scalar
a = torch.scalar_tensor(1.0, device=device)
b = torch.scalar_tensor(2.0, device=device)
kernel_lib_path = aoti_compile_with_persistent_cache(
namespace_name,
op_name_with_overload,
a.device.type,
False,
torch.ops.aten.add,
args=(a, b),
kwargs={"alpha": 3.0},
)
self.assertTrue(Path(kernel_lib_path).exists())
device_kernel_cache = aoti_eager_cache_dir(namespace_name, device.type)
kernel_conf = device_kernel_cache / f"{op_name_with_overload}.json"
self.assertTrue(kernel_conf.exists())
json_data = load_aoti_eager_cache(
namespace_name, op_name_with_overload, a.device.type
)
op_info = json_data[0]
self.assertTrue(isinstance(op_info, dict))
self.assertTrue("meta_info" in op_info)
self.assertTrue(len(op_info["meta_info"]) == 3)
# Scalar Tensor
self.assertTrue("scalar_value" not in op_info["meta_info"][0])
self.assertTrue(op_info["meta_info"][0]["sizes"] == [])
self.assertTrue(op_info["meta_info"][0]["strides"] == [])
# Scalar Tensor
self.assertTrue("scalar_value" not in op_info["meta_info"][1])
self.assertTrue(op_info["meta_info"][1]["sizes"] == [])
self.assertTrue(op_info["meta_info"][1]["strides"] == [])
# Scalar
self.assertTrue("scalar_value" in op_info["meta_info"][2])
self.assertTrue("sizes" not in op_info["meta_info"][2])
self.assertTrue("strides" not in op_info["meta_info"][2])
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
a = torch.randn(128, device=device)
b = torch.randn(128, device=device)
scalar_values = [1.0, 2.0, 3.0]
ref_values = []
for scalar_value in scalar_values:
ref_values.append(torch.add(a, b, alpha=scalar_value))
register_ops_with_aoti_compile(
namespace_name, [op_name], dispatch_key, torch_compile_op_lib_impl
)
res_values = []
for scalar_value in scalar_values:
res_values.append(torch.add(a, b, alpha=scalar_value))
self.assertEqual(len(ref_values), len(res_values))
self.assertEqual(ref_values, res_values)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_halide # aoti
@skip_if_triton_cpu # aoti
@skipIfWindows(msg="aoti not support on Windows")
def test_aoti_eager_override_registration(self):
namespace_name = "aten"
dispatch_key = "CPU"
device = torch.device("cpu")
if self.device.lower() == "cuda":
dispatch_key = "CUDA"
device = torch.device("cuda")
unary_op_set = ["abs", "acos"]
def fn(x, op_name=""):
return getattr(torch, op_name)(x)
# Invoke torch.compile directly to get referent results
x = torch.randn(3, 4, device=device)
ref_array = []
for unary_op_name in unary_op_set:
opt_fn = torch.compile(functools.partial(fn, op_name=unary_op_name))
ref = opt_fn(x)
ref_array.append(ref)
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
register_ops_with_aoti_compile(
namespace_name, unary_op_set, dispatch_key, torch_compile_op_lib_impl
)
res_array = []
for unary_op_name in unary_op_set:
res_array.append(getattr(torch, unary_op_name)(x))
for ref, res in zip(ref_array, res_array):
self.assertEqual(ref, res)
a = torch.randn(128, device=device)
min_tensor = torch.randn(128, device=device)
max_tensor = min_tensor + 0.5
ref_with_min = torch.ops.aten.clamp(a, min_tensor)
ref_with_min_max = torch.ops.aten.clamp(a, min_tensor, max_tensor)
with _scoped_library("aten", "IMPL") as torch_compile_op_lib_impl:
register_ops_with_aoti_compile(
namespace_name, ["clamp"], dispatch_key, torch_compile_op_lib_impl
)
res_with_min = torch.ops.aten.clamp(a, min_tensor)
res_with_min_max = torch.ops.aten.clamp(a, min_tensor, max_tensor)
self.assertEqual(ref_with_min, res_with_min)
self.assertEqual(ref_with_min_max, res_with_min_max)
def test_add_const_int(self):
def fn(a):
return (a + 1, torch.add(a, 1, alpha=2))
for dtype in [torch.float32, torch.int32, torch.int64]:
self.common(fn, (torch.arange(32, dtype=dtype),))
def test_add_const_float(self):
def fn(a):
return (a + 1.5,)
self.common(fn, (torch.randn(32),))
def test_add_inplace_permuted(self):
if config.cpu_backend == "halide":
raise unittest.SkipTest(
"Halide cpu backend does not work for this test case: https://github.com/pytorch/pytorch/issues/140344"
)
def fn(x, y):
return x.add_(y)
x = torch.ones([2, 12, 13, 17]).transpose(1, 2)
y = torch.randn([2, 13, 1, 17])
self.common(fn, (x, y))
def test_add_complex(self):
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.tensor([1 + 1j, -1 + 1j, -2 + 2j, 3 - 3j, 0, 1j, 1, -1])
y = torch.tensor([1 + 1j, -1 + 1j, -2 + 2j, 3 - 3j, 0, 1j, 1, -1])
self.common(fn, (x, y, 2))
def test_add_complex3(self):
# fix https://github.com/pytorch/pytorch/issues/115071
@torch.compile
def fn(*args):
a = torch.neg(args[0])
b = torch.add(args[0], args[0])
return (a, b)
x = torch.randn(41, dtype=torch.complex64, device=self.device)
y = x.clone()
# should not inplace write to the input
fn(x)
self.assertEqual(x, y)
def test_add_complex4(self):
@torch.compile
def fn(a, b):
c = a + b
d = a + b
return c + d
for dtype in [torch.complex32, torch.complex64, torch.complex128]:
if not self.is_dtype_supported(dtype):
continue
x = torch.tensor(
[1 + 1j, -1 + 1j, -2 + 2j, 3 - 3j, 0, 1j, 1, -1],
dtype=dtype,
device=self.device,
)
y = torch.tensor(
[1 + 1j, -1 + 1j, -2 + 2j, 3 - 3j, 0, 1j, 1, -1],
dtype=dtype,
device=self.device,
)
_, code = run_and_get_code(fn, x, y)
code = " ".join(code)
assert_keywords = ["assert_size_stride", "assert_alignment"]
filtered_lines = [
line
for line in code.splitlines()
if not any(assert_key in line for assert_key in assert_keywords)
]
code = "\n".join(filtered_lines)
self.assertGreaterEqual(
code.count("view_dtype" if config.cpp_wrapper else "aten.view"), 3
)
def test_add_complex_strided_fallback(self):
@torch.compile
def fn(a, b):
return a + b
if not self.is_dtype_supported(torch.complex64):
raise unittest.SkipTest("complex64 not supported on device")
base = torch.randn(3, 4, dtype=torch.complex64, device=self.device)
x = base.transpose(0, 1)
y = base.transpose(0, 1)
torch._inductor.metrics.reset()
_, code = run_and_get_code(fn, x, y)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
code = " ".join(code)
fallback_markers = [
"extern_kernels.add",
"torch.ops.aten.add.Tensor",
]
if config.cpp_wrapper:
fallback_markers.extend(
[
"aoti_torch_cuda_add_Tensor",
"aoti_torch_cpu_add_Tensor",
]
)
self.assertTrue(
any(code.count(marker) >= 1 for marker in fallback_markers),
msg=f"Expected complex add with strided inputs to fall back to extern kernels, got:\n{code}",
)
def test_add_complex5(self):
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.tensor([[1 + 1j, -1 + 1j], [-2 + 2j, 3 - 3j]])
y = torch.tensor([[1 + 1j, -1 + 1j], [-2 + 2j, 3 - 3j]])
self.common(fn, (x, y, 2))
def test_add_complex6(self):
# Fix https://github.com/pytorch/pytorch/issues/125745.
# Add complex tensors with broadcasting.
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.tensor([[1 + 1j, -1 + 1j, -2 + 2j, 3 - 3j]])
y = torch.tensor([[1 + 1j]])
self.common(fn, (x, y, 2))
def test_add_complex7(self):
# Fix https://github.com/pytorch/pytorch/issues/160495
# Test scalar (0-dimensional) complex tensor addition: 0D + 0D
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.rand((), dtype=torch.complex64, device=self.device)
y = torch.rand((), dtype=torch.complex64, device=self.device)
self.common(fn, (x, y, 2))
def test_add_complex8(self):
# Fix https://github.com/pytorch/pytorch/issues/160495
# Test scalar complex addition: 1D + 0D
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.rand(1, dtype=torch.complex64, device=self.device)
y = torch.rand((), dtype=torch.complex64, device=self.device)
self.common(fn, (x, y, 2))
def test_add_complex9(self):
# Fix https://github.com/pytorch/pytorch/issues/160495
# Test scalar complex addition: 0D + 1D
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.rand((), dtype=torch.complex64, device=self.device)
y = torch.rand(1, dtype=torch.complex64, device=self.device)
self.common(fn, (x, y, 2))
def test_add_complex10(self):
# Fix https://github.com/pytorch/pytorch/issues/160495
# Test scalar complex broadcasting
def fn(a, b, alpha):
return torch.add(a, b, alpha=alpha)
x = torch.randn(2, 3, dtype=torch.complex64, device=self.device)
y = torch.rand((), dtype=torch.complex64, device=self.device)
self.common(fn, (x, y, 2))
def test_concat_add_inplace(self):
def fn(x, y, z):
return torch.cat([x, y], dim=1).add_(z)
x = torch.randn([2, 12, 14, 14])
y = torch.randn([2, 12, 14, 14])
z = torch.randn([2, 24, 14, 14])
self.common(fn, (x, y, z))
def test_abs(self):
def fn(a):
return (a / (torch.abs(a) + 1),)
self.common(fn, (torch.randn(17),))
@xfail_if_triton_cpu
def test_angle(self):
def fn(a, b, c):
return torch.angle(a), torch.angle(b), torch.angle(c)
complex_input = torch.tensor(
[1 + 1j, -1 + 1j, -2 + 2j, 3 - 3j, 0, 1j, 1, -1, float("nan")]
)
real_input = torch.tensor([-1.0, 0.0, 1.0, float("nan")])
interger_real_input = torch.tensor([-1, 0, 1])
self.common(fn, (complex_input, real_input, interger_real_input))
def test_sgn(self):
def fn(a):
return torch.sgn(a), torch.sgn(a + 1) - 1
self.common(fn, [torch.linspace(-10, 10, 41)])
@skipCUDAIf(not SM80OrLater, "uses bfloat16 which requires SM >= 80")
def test_scatter_bf16(self):
def fn(inp, src, index):
return inp.scatter_add(0, index, src)
for dtype in [torch.int64, torch.bool, torch.bfloat16]:
if not self.is_dtype_supported(dtype):
continue
self.common(
fn,
[
torch.zeros(3, 5, dtype=dtype),
torch.ones((2, 5), dtype=dtype),
torch.tensor([[0, 1, 2, 0, 0]]),
],
)
def test_randn_generator(self):
def fn(a, generator):
return torch.randn([20, 20], generator=generator, device=a.device)
self.common(fn, (torch.linspace(-10, 10, 41), None), assert_equal=False)
# generator not yet supported in dynamo
with self.assertRaisesRegex(torch._dynamo.exc.Unsupported, "Generator"):
self.common(fn, (torch.linspace(-10, 10, 41), torch.Generator(self.device)))
def test_sgn_extremal(self):
def fn(a):
return (torch.sgn(a),)
self.common(fn, [torch.tensor([np.nan, np.inf, -np.inf, 0])])
def test_max_min(self):
def fn(a, b):
return (torch.maximum(a, b), torch.minimum(a, b))
self.common(fn, (torch.randn(8), torch.randn(8)))
t1 = torch.randn(8)
t1[0] = float("nan")
t2 = torch.randn(8)
t2[1] = float("nan")
self.common(fn, (t1, t2))
def test_neg_max_uint8(self):
# https://github.com/pytorch/pytorch/issues/93380
def fn(a, b):
c = torch.neg(a)
return torch.maximum(b, c)
a = torch.randint(256, (1,), dtype=torch.uint8)
b = torch.randint(256, (8390,), dtype=torch.uint8)
self.common(fn, (a, b))
def test_compar(self):
def fn(x):
return x.gt(3.5), x.ge(3.5), x.eq(3.5), x.le(2.5), x.lt(3.5), x.ne(3.5)
a = torch.tensor([3])
self.common(fn, (a,))
def test_horizonal_fusion1(self):
def fn(a, b, c):
return (a + b, a - c, b * c)
self.common(
fn, (torch.randn(8, 16, 16), torch.randn(8, 16, 16), torch.randn(1, 16, 1))
)
def test_horizonal_fusion2(self):
def fn(a, b, c):
return a + 1, b + 2, c + 3
self.common(fn, (torch.randn(8, 16, 8), torch.randn(8, 16), torch.randn(16, 8)))
def test_vertical_fusion1(self):
def fn(sa, ct, p):
# From torchbench.pyhpc_equation_of_state
v17 = -3.087032500374211e-7
v18 = -1.988366587925593e-8
v19 = -1.061519070296458e-11
v20 = 1.550932729220080e-10
t15 = v19 * ct
t19 = v17 + ct * (v18 + t15) + v20 * sa
t20 = 1.0 / t19
t128 = t19 * p
return t20 + t128
self.common(
fn,
(
torch.randn(204, 204, 26),
torch.randn(204, 204, 26),
torch.randn(26),
),
)
assertGeneratedKernelCountEqual(self, 1)
@config.patch({"fx_graph_cache": False})
@skipIfWindows(msg="torch._dynamo.exc.Unsupported")
def test_forced_buffer_realize(self):
# Test torch._test_inductor_realize forces a buffer to be realized
def fn(a):
b = test_operators.realize(a * 2)
return (b * 2,)
self.common(fn, (torch.randn(10),))
self.assertEqual(torch._inductor.metrics.ir_nodes_pre_fusion, 2)
@config.patch({"fx_graph_cache": False})
@skipIfWindows(msg="torch._dynamo.exc.Unsupported")
def test_scheduler_vertical_fusion1(self):
realize = test_operators.realize
def fn(sa, ct, p):
# From torchbench.pyhpc_equation_of_state
v17 = -3.087032500374211e-7
v18 = -1.988366587925593e-8
v19 = -1.061519070296458e-11
v20 = 1.550932729220080e-10
t15 = realize(v19 * ct)
t19 = realize(v17 + ct * (v18 + t15) + v20 * sa)
t20 = realize(1.0 / t19)
t128 = realize(t19 * p)
return t20 + t128
self.common(
fn,
(
torch.randn(204, 204, 26),
torch.randn(204, 204, 26),
torch.randn(26),
),
)
self.assertEqual(torch._inductor.metrics.ir_nodes_pre_fusion, 5)
assertGeneratedKernelCountEqual(
self, 1 if not is_cpp_backend(self.device) else 2
)
def test_index_propagation(self):
def copy(x):
i = torch.arange(x.size(0), device=x.device)
return x[i]
x = torch.randn(8, device=self.device)
copy_opt = torch.compile(copy, backend="inductor")
expect = copy(x)
actual = _run_and_assert_no_indirect_indexing(self, copy_opt, x)
self.assertEqual(expect, actual)
@dynamo_config.patch("capture_dynamic_output_shape_ops", True)
# https://github.com/halide/Halide/issues/8308
@config.patch("halide.scheduler_cpu", "Mullapudi2016")
@config.patch("halide.scheduler_cuda", "Li2018")
@config.patch(implicit_fallbacks=True)
def test_index_propagation_nested_indirect_indexing(self):
def nested(x, repeats):
rank = torch.arange(repeats.numel(), device=x.device)
index = rank.repeat_interleave(repeats, dim=0)
return torch.index_select(x, index=index, dim=0)
example_inputs = (
torch.randn((32, 64), device=self.device),
repeats := torch.tensor([5, 10, 15], device=self.device),
)
torch._dynamo.mark_dynamic(repeats, 0) # create backed symint
nested_opt = torch.compile(nested, backend="inductor")
expect = nested(*example_inputs)
actual = nested_opt(*example_inputs)
self.assertEqual(expect, actual)
def test_index_propagation_flip(self):
def flip(x):
i = torch.arange(x.size(0) - 1, -1, -1, device=x.device)
return x[i]
x = torch.randn(8, device=self.device)
flip_opt = torch.compile(flip, backend="inductor")
expect = flip(x)
actual = _run_and_assert_no_indirect_indexing(self, flip_opt, x)
self.assertEqual(expect, actual)
def test_index_propagation_floordiv(self):
def repeat_interleave(x, n):
# e.g. x=[1, 2, 3], n=2 => returns [1, 1, 2, 2, 3, 3]
i = torch.arange(x.shape[0] * n, device=x.device)
return x[i // n]
x = torch.randn(8, 16, device=self.device)
repeat_interleave_opt = torch.compile(repeat_interleave, backend="inductor")
# With static shapes we can prove the bound, our dynamic shapes reasoning is not good enough
has_assert = ifdynstaticdefault(False, True)
# this should be collapsed to direct indexing
actual = _run_and_assert_no_indirect_indexing(
self, repeat_interleave_opt, x, 3, has_assert=has_assert
)
expect = torch.repeat_interleave(x, 3, dim=0)
self.assertEqual(expect, actual)
self.assertEqual(actual, repeat_interleave(x, 3))
def test_index_propagation_remainder(self):
def repeat(x, n):
# e.g. x=[1, 2, 3], n=2 => returns [1, 2, 3, 1, 2, 3]
i = torch.arange(x.shape[0] * n, device=x.device)
return x[i % x.shape[0]]
x = torch.randn(8, 16, device=self.device)
repeat_opt = torch.compile(repeat, backend="inductor")
# With static shapes we can prove the bound, our dynamic shapes reasoning is not good enough
has_assert = ifdynstaticdefault(False, True)
# this should be collapsed to direct indexing
actual = _run_and_assert_no_indirect_indexing(
self, repeat_opt, x, 3, has_wrapping=False, has_assert=has_assert
)
expect = x.repeat(3, 1)
self.assertEqual(expect, actual)
self.assertEqual(actual, repeat(x, 3))
def test_index_propagation_abs(self):
def reflection_pad_left(x, n):
# e.g. x=[1, 2, 3], n=2 => returns [3, 2, 1, 2, 3]
i = torch.arange(x.shape[0] + n, device=x.device)
return x[(i - n).abs()]
x = torch.randn(8, device=self.device)
opt_fn = torch.compile(reflection_pad_left, backend="inductor")
# With static shapes we can prove the bound, our dynamic shapes reasoning is not good enough
has_assert = ifdynstaticdefault(False, True)
# this should be collapsed to direct indexing
actual = _run_and_assert_no_indirect_indexing(
self, opt_fn, x, 3, has_wrapping=False, has_assert=has_assert
)
expect = reflection_pad_left(x, 3)
self.assertEqual(expect, actual)
def test_index_propagation_device_assert_masked(self):
def fn(a):
idx = torch.arange(a.size(0), device=a.device)
padded_idx = torch.constant_pad_nd(idx, (1050, 0))
padded_idx = torch.where(padded_idx >= 0, padded_idx, padded_idx)
return a[padded_idx]
self.common(fn, (torch.randn(1024),))
def test_index_remainder(self):
def fn(x, y):
return x[y % 12]
self.common(fn, (torch.rand(1024), torch.randint(50, (50,))))
@xfailIfS390X
@config.patch(debug_index_asserts=False)
@config.patch("cpp.enable_tiling_heuristics", False)
def test_neg_index(self):
def test(
fn, inps, has_assert: bool, has_wrapping: bool, vectorize: bool = True
):
fn_opt = torch.compile(fn)
if is_halide_backend(self.device):
pass # no device asserts in halide
# TODO: remove once https://github.com/pytorch/pytorch/issues/144634
# is fixed.
elif is_mps_backend(self.device):
pass # no device asserts in MPS
elif self.device == "cpu" and not is_triton_cpu_backend(self.device):
_, code = run_and_get_cpp_code(fn_opt, *inps)
self.assertTrue(("TORCH_CHECK" in code) is has_assert)
if (
cpu_vec_isa.valid_vec_isa_list()
and os.getenv("ATEN_CPU_CAPABILITY") != "default"
):
self.assertTrue(
(") ? (" in code or "blendv" in code) is has_wrapping
)
# Assert that we always vectorize the kernel regardless of wrapping / checks
self.assertTrue(("loadu" in code) is vectorize)
else:
code = run_and_get_triton_code(fn_opt, *inps)
self.assertTrue(("tl.where" in code) is has_wrapping)
self.assertTrue(("device_assert" in code) is has_assert)
def indirect(a, b):
return a[b - 1]
a = torch.rand(1024, device=self.device)
b = torch.zeros(256, dtype=torch.long, device=self.device)
test(indirect, (a, b), has_assert=True, has_wrapping=True)
def direct(x):
return x[:, -1]
a = torch.rand(1, 64, 32, device=self.device)
# Does not even generate a kernel as it's a view
test(direct, (a,), has_assert=False, has_wrapping=False, vectorize=False)
def flip(a, b):
return a[b]
a = torch.rand(1024, device=self.device)
b = torch.arange(start=-1, end=-a.numel() - 1, step=-1, device=self.device)
test(flip, (a, b), has_assert=True, has_wrapping=True)
# Constant propagate a constant that's negative
def flip_with_index_constant(a):
b = torch.arange(start=-1, end=-a.numel() - 1, step=-1, device=a.device)
return a[b]
# Wrapping is constant-folded
test(flip_with_index_constant, (a,), has_assert=False, has_wrapping=False)
# Operation where we can't prove that the index is always positive or negative
def pos_and_neg(a):
b = torch.arange(start=1, end=-a.numel() - 1, step=-1, device=a.device)
return a[b]
# It has wrapping but no assert
test(pos_and_neg, (a,), has_assert=False, has_wrapping=True)
# We currently don't do constant propagation with float constants
# We cannot prove this kind of asserts just with bounds. We would need
# to lift IndexPropagation.shape_env to be accessible in all of Inductor
def flip_with_index(a):
b = 1.0 * torch.arange(
start=-1, end=-a.numel() - 1, step=-1, device=a.device
)
b = b.int()
return a[b]
test(
flip_with_index,
(a,),
has_assert=ifdynstaticdefault(False, True),
has_wrapping=False,
vectorize=True,
)
def unsafe_index(a, b):
return aten._unsafe_index(a, (b,))
test(unsafe_index, (a, b), has_assert=False, has_wrapping=True)
def constant_propagation(a):
b = torch.tensor([2], device=a.device)
return a[b]
test(
constant_propagation,
(a,),
has_assert=ifdynstaticdefault(False, True),
has_wrapping=False,
vectorize=False, # There's no loop to vectorize!
)
def constant_propagation_neg(a):
b = torch.tensor([-2], device=a.device)
return a[b]
# In symbolic shapes, we know that we can access -2, so no assert is necessary!
test(
constant_propagation_neg,
(a,),
has_assert=False,
has_wrapping=False,
vectorize=False, # There's no loop to vectorize!
)
def test_computed_buffer_inlining(self):
def flip(x):
idx = torch.arange(x.size(0) - 1, -1, -1, device=x.device)
return x[idx], idx
flip_opt = torch.compile(flip, backend="inductor")
x = torch.randn(8, device=self.device)
expect = flip(x)
actual = _run_and_assert_no_indirect_indexing(self, flip_opt, x)
self.assertEqual(expect, actual)
def test__unsafe_masked_index(self):
def fn(a, mask, idx):
return aten._unsafe_masked_index(a, mask, idx, 1)
self.common(
fn,
(
torch.randn(8, device=self.device),
torch.tensor([True, False, True], device=self.device),
[torch.tensor([3, 9, 2], device=self.device)],
),
)
def test__unsafe_masked_index_put_accumulate(self):
def fn(a, mask, idx, values):
return aten._unsafe_masked_index_put_accumulate(a, mask, idx, values)
self.common(
fn,
(
torch.randn(8, device=self.device),
torch.tensor([True, False, True], device=self.device),
[torch.tensor([3, 9, 2], device=self.device)],
torch.randn(3, device=self.device),
),
)
def test_sum1(self):
def fn(a, b):
return ((a + b).sum(-1),)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_sum2(self):
def fn(a, b):
return ((a + b).sum([1, 2]), (a + b).sum(-1))
self.common(fn, (torch.randn(8, 9, 3, 21), torch.randn(8, 9, 3, 21)))
def test_sum3(self):
def fn(a, b):
r1 = a + b
r2 = r1.sum(-1)
r3 = torch.squeeze(b) + 10
return (r1, r2, r3)
# Mismatched elements: 2 / 10 (20.0%)
# Greatest absolute difference: 0.0029296875 at index (8,) (up to 1e-05 allowed)
# Greatest relative difference: 0.0017482517482517483 at index (6,) (up to 0.001 allowed)
self.common(fn, (torch.randn(10, 10), torch.randn(1, 10)), atol=1e-5, rtol=2e-3)
def test_sum4(self):
def fn(a):
b = a + 1
c = b.sum(-1)
d = c + 3
e = d.sum(-1)
f = e + 5
return (f, e, d, c, b)
self.common(fn, (torch.randn(1, 16, 8, 8),))
def test_sum5(self):
def fn(a):
b = a + 1
c = b.sum(-1)
d = c + 3
e = d.sum(-1)
f = e + 5
return (f,)
self.common(fn, (torch.randn(1, 17, 8, 9),))
def test_reduction1(self):
def fn(a):
return (a.sum(), a.max(), a.min(), a.argmax(), a.argmin())
self.common(fn, (torch.tensor([float("-inf"), 0.0, float("inf")]),))
@skip_if_x86_mac()
def test_reduction2(self):
def fn(a):
# FIXME: a.argmax
return (a.sum(), a.max(), a.min(), a.argmin())
self.common(fn, (torch.full((4,), float("inf")),))
@skip_if_x86_mac()
def test_reduction3(self):
def fn(a):
# FIXME: a.argmin
return (a.sum(), a.max(), a.min(), a.argmax())
self.common(fn, (torch.full((4,), float("-inf")),))
def test_reduction4(self):
if self.device == "cpu":
raise unittest.SkipTest("Non-deterministic CPU results")
def fn(a):
return (a.argmax(-1), a.argmin(-1))
inputs = (torch.ones(128), torch.ones(4, 4, 1))
for i in inputs:
self.common(fn, (i,), check_lowp=not is_halide_backend(self.device))
@config.patch(unroll_reductions_threshold=1)
def test_reduction5(self):
if self.device == "cpu":
raise unittest.SkipTest("Non-deterministic CPU results")
def fn(a):
return (a.sum(), a.max(), a.min(), a.argmax())
self.common(fn, (torch.full((4,), float("-inf")),))
@skip_if_not_triton
def test_reduction_config_limit(self):
"""
This unit-test tests whether we exceed cudaDeviceProperties.maxGridSize in
triton reduction configs for large size hints. #128826 introduced a scaling XBLOCK
feature to resolve the issue in reduction configs which may exceed the maxGridSize
"""
from torch._inductor.runtime.runtime_utils import next_power_of_2
from torch._inductor.runtime.triton_heuristics import triton_config_reduction
size_hints = {"x": 67108864, "r0_": 8192}
for _ in range(4):
size_hints["x"] = next_power_of_2(size_hints["x"])
triton_config_reduction(size_hints, 1, 2048, 1, 8)
def test_prod(self):
def fn(a):
return a.prod(0), a.prod(1), a.prod()
self.common(fn, (torch.rand((10, 10)),))
self.common(fn, (torch.rand((1, 2050)),))
def test_unroll_small_reduction(self):
def fn(x):
val1, index1 = x.min(-1)
val2, index2 = x.max(-1)
return (
val1,
index1,
val2,
index2,
x.sum(-1),
(x > 1).any(-1),
(x > 0).all(-1),
x.argmin(-1),
x.argmax(-1),
x.amin(-1),
x.amax(-1),
x.aminmax(),
)
with config.patch(unroll_reductions_threshold=8):
# small sized reductions will get unrolled
self.common(fn, (torch.randn(8, 3),))
torch._dynamo.reset()
with config.patch(unroll_reductions_threshold=1):
# make sure things also work if they aren't unrolled
self.common(fn, (torch.randn(8, 3),))
def test_multilayer_sum_low_prec(self):
# fp16 nyi for cpu
if self.device == "cpu":
raise unittest.SkipTest(f"requires {GPU_TYPE}")
def fn(a):
return torch.mean(a)
self.common(fn, ((torch.rand((10, 3, 352, 352), dtype=torch.float16),)))
def test_multilayer_prime_size(self):
def fn(a):
return torch.max(a), torch.sum(a)
# Requires masked loading for the intermediate reduction
sample = torch.full((3999971,), 0, dtype=torch.int64)
sample[-1] = 1
self.common(fn, (sample,))
@skip_if_gpu_halide
@skipCPUIf(IS_MACOS, "fails on macos")
def test_multilayer_var(self):
def fn(a):
return torch.var(a)
self.common(
fn,
((torch.rand((10, 3, 352, 352), dtype=torch.float32),)),
atol=1e-3,
rtol=1e-3,
)
self.common(
fn,
((torch.rand((14923), dtype=torch.float32),)),
atol=1e-3,
rtol=1e-3,
)
@skipCPUIf(IS_MACOS, "fails on macos")
@skip_if_halide # accuracy 4.7% off
def test_multilayer_var_lowp(self):
def fn(a):
return torch.var(a)
atol = None
rtol = None
if self.device == "cpu" and os.getenv("ATEN_CPU_CAPABILITY") == "default":
atol = 1e-3
rtol = 1e-3
self.common(
fn,
(torch.rand((16, 16, 352, 352), dtype=torch.float16),),
atol=atol,
rtol=rtol,
)
self.common(
fn, (torch.rand((14923), dtype=torch.float16),), atol=atol, rtol=rtol
)
def test_split_cumsum(self):
def fn(a):
return torch.cumsum(a, -1)
for dtype in get_all_dtypes(
include_bfloat16=False,
include_bool=True,
include_complex=False,
include_half=False,
):
if not self.is_dtype_supported(dtype):
continue
# Use low=0 since when the mean value is 0, cumsum at all points
# tends towards zero which makes the relative error term blow up
inp = make_tensor(10, 3, 352, 352, low=0, dtype=dtype, device=self.device)
self.common(fn, (inp.view(-1),), rtol=1e-4, atol=1e-5, check_lowp=False)
self.common(fn, (inp.view(10, -1),), rtol=1e-4, atol=1e-5, check_lowp=False)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_gpu_halide # accuracy issue
def test_split_cumsum_low_prec(self):
if is_cpp_backend(self.device):
raise unittest.SkipTest("ir.Scan nyi on CPU")
def fn(a):
return torch.cumsum(a.view(-1), 0)
self.common(
fn,
(torch.rand((10, 3, 352, 352), dtype=torch.float16),),
reference_in_float=True,
check_lowp=False,
)
def test_consecutive_split_cumsum(self):
def fn(a, b):
a = a.view(-1)
b = b.view(-1)
return torch.cumsum(a, 0) + torch.cumsum(b, 0)
dtype_a = torch.float32
dtype_b = torch.float64
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(dtype_a) and self.is_dtype_supported(dtype_b)
else self.assertRaises(TypeError)
)
with ctx:
a = make_tensor(10, 3, 352, 352, low=0, dtype=dtype_a, device=self.device)
b = make_tensor(10, 3, 352, 352, low=0, dtype=dtype_b, device=self.device)
self.common(fn, (a, b), rtol=1e-4, atol=1e-5, check_lowp=False)
@config.patch(max_autotune_pointwise=True)
def test_split_cumsum_index(self):
# Split scan uses a workspace that needs to be zeroed before use.
# data[index] does indirect indexing that should catch issues if the
# workspace is not zeroed.
def fn(lengths, data):
offsets = torch.cumsum(lengths, 0)
return data[offsets]
lengths = torch.full((2**14,), 2**2, dtype=torch.int64, device=self.device)
lengths[-2] = 3
lengths[-1] = 3
data = make_tensor((2**16,), dtype=torch.float32, device=self.device)
self.common(fn, (lengths, data))
def test_split_cumprod(self):
def fn(a):
return torch.cumprod(a, -1)
for dtype in [torch.float32, torch.float64, torch.int32, torch.int64]:
if not self.is_dtype_supported(dtype):
continue
inp = _large_cumprod_input(
(10, 10000), dim=1, dtype=dtype, device=self.device
)
self.common(fn, (inp,), atol=1e-5, rtol=1e-4, check_lowp=False)
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_gpu_halide # accuracy issue
def test_split_cumprod_low_prec(self):
if is_cpp_backend(self.device):
raise unittest.SkipTest("ir.Scan nyi on CPU")
def fn(a):
return torch.cumprod(a.view(-1), 0)
for dtype in [torch.float16, torch.bfloat16]:
if not self.is_dtype_supported(dtype):
continue
inp = _large_cumprod_input(
(10, 10000), dim=1, dtype=dtype, device=self.device
)
self.common(
fn,
(inp,),
reference_in_float=True,
check_lowp=False,
)
def test_consecutive_split_cumprod(self):
def fn(a, b):
return torch.cumprod(a, 0) + torch.cumprod(b, 0)
dtype_a = torch.float32
dtype_b = torch.float64
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(dtype_a) and self.is_dtype_supported(dtype_b)
else self.assertRaises(TypeError)
)
with ctx:
a = _large_cumprod_input((10000,), dim=0, dtype=dtype_a, device=self.device)
b = _large_cumprod_input((10000,), dim=0, dtype=dtype_b, device=self.device)
self.common(fn, (a, b), atol=1e-5, rtol=1e-5, check_lowp=False)
@skip_if_halide # scan ops
# TODO: support lifted symints when dynamic
@torch._dynamo.config.patch(
{"dynamic_shapes": False, "assume_static_by_default": True}
)
def test_custom_scan_op(self):
if self.device != "cuda" and self.device != "xpu":
raise unittest.SkipTest("associative_scan only supported on GPU")
def sum_combine(a, b):
return a + b
from torch._higher_order_ops.associative_scan import associative_scan
a = torch.randn(100, 100, device=self.device)
expect = torch.cumsum(a, 0)
actual = associative_scan(sum_combine, a, 0)
self.assertEqual(expect, actual)
def logcumsum_combine(a, b):
min_v = torch.minimum(a, b)
max_v = torch.maximum(a, b)
mask = (min_v != max_v) | ~min_v.isinf()
return torch.where(mask, max_v + (min_v - max_v).exp().log1p(), a)
expect = torch.logcumsumexp(a, 0)
actual = associative_scan(logcumsum_combine, a, 0)
self.assertEqual(expect, actual)
@skip_if_halide # scan ops
# TODO: support lifted symints when dynamic
@torch._dynamo.config.patch(
{"dynamic_shapes": False, "assume_static_by_default": True}
)
def test_custom_scan_op_compiled(self):
if self.device != "cuda" and self.device != "xpu":
raise unittest.SkipTest("associative_scan only supported on GPU")
from torch._higher_order_ops.associative_scan import associative_scan
def sum_combine(a, b):
return a + b
def fn(a, b, dim):
diff = (a - b).abs()
sad = associative_scan(sum_combine, diff, dim)
return sad.sum(dim)
a = torch.randn(100, 100, device=self.device)
b = torch.randn(100, 100, device=self.device)
self.common(fn, (a, b, 0))
cfn = torch.compile(fn)
_, code = run_and_get_code(cfn, a, b, 0)
# Check everything is fused into a single kernel
FileCheck().check_not("run(").check_regex(
r"triton_.*\.run\(arg[01]_1, arg[12]_1, buf1,"
).check_not("run(").run(code[0])
@skip_if_halide # scan ops
# TODO: support lifted symints when dynamic
@torch._dynamo.config.patch(
{"dynamic_shapes": False, "assume_static_by_default": True}
)
def test_custom_scan_op_multi_input(self):
if self.device != "cuda" and self.device != "xpu":
raise unittest.SkipTest("associative_scan only supported on GPU")
def argmax_combine(a, b):
a_value, a_index = a
b_value, b_index = b
mask = (a_value > b_value) | ((a_value == b_value) & (a_index > b_index))
return (
torch.where(mask, a_value, b_value),
torch.where(mask, a_index, b_index),
)
from torch._higher_order_ops.associative_scan import associative_scan
a = torch.randn(100, 100, device=self.device)
expect = torch.cummax(a, 0)
idx = torch.arange(100, device=self.device).view(100, 1).expand(100, 100)
actual = associative_scan(argmax_combine, (a, idx), 0)
self.assertEqual(expect, actual)
@skip_if_halide # scan ops
# TODO: support lifted symints when dynamic
@torch._dynamo.config.patch(
{"dynamic_shapes": False, "assume_static_by_default": True}
)
def test_custom_scan_would_split(self):
if self.device != "cuda" and self.device != "xpu":
raise unittest.SkipTest("associative_scan only supported on GPU")
def combine_linear_recurrence(left, right):
xl, fl = left
xr, fr = right
x = xl * fr + xr
f = fl * fr
return x, f
def eager_scan(x, g):
x, g = x.to(torch.float64), g.to(torch.float64)
x_out = torch.empty_like(x)
g_out = torch.empty_like(g)
x_out[:, 0] = x[:, 0]
g_out[:, 0] = g[:, 0]
for i in range(1, x.shape[1]):
x_out[:, i], g_out[:, i] = combine_linear_recurrence(
(x_out[:, i - 1], g_out[:, i - 1]),
(x[:, i], g[:, i]),
)
return x_out.float(), g_out.float()
@torch.compile
def compiled_scan(x, f):
from torch._higher_order_ops.associative_scan import associative_scan
x, f = associative_scan(combine_linear_recurrence, (x, f), dim=1)
return x, f
x = torch.randn(1, 129, 2, device=self.device)
f = torch.randn(1, 129, 2, device=self.device)
expect = eager_scan(x, f)
actual = compiled_scan(x, f)
self.assertEqual(expect, actual)
def test_embedding_bag_byte_unpack(self):
if self.device != "cpu":
raise unittest.SkipTest(f"No {GPU_TYPE} implementation (it returns empty)")
def fn(a):
return torch.ops.quantized.embedding_bag_byte_unpack(a)
M, N = 32, 64
scales = torch.randn(M, 1).view(torch.uint8)
offsets = torch.randn(M, 1).view(torch.uint8)
data = torch.randint(0, 255, (M, N), dtype=torch.uint8)
packed = torch.cat([data, scales, offsets], dim=-1)
self.common(fn, [packed])
@xfail_if_mps_unimplemented
def test_int8_weight_only_quant(self):
def convert_weight_to_int8pack(b):
b_int8pack, b_scales, _ = _dynamically_quantize_per_channel(
b, -128, 127, torch.int8
)
return b_int8pack, b_scales
def fn(a, b_int8pack, b_scales, c):
res = torch._weight_int8pack_mm(a, b_int8pack, b_scales)
res = res + c
return res
m = 32
k = 32
n = 48
a = torch.rand((m, k), dtype=torch.bfloat16)
b = torch.rand((n, k), dtype=torch.bfloat16)
c = torch.rand((m, n), dtype=torch.bfloat16)
b_int8pack, b_scales = convert_weight_to_int8pack(b)
self.common(fn, (a, b_int8pack, b_scales, c))
@xfail_if_mps_unimplemented
@xfail_if_triton_cpu
@skipCUDAIf(True, "No _dyn_quant_pack_4bit_weight implementation on CUDA")
@skipIfRocm
@skipIfXpu(msg="No _dyn_quant_pack_4bit_weight implementation on XPU")
def test__dyn_quant_pack_4bit_weight_fp32(self):
q_group = 32
k = 128
n = 128
torch.manual_seed(1)
b = torch.rand((k, n), dtype=torch.float32)
in_features = b.size(0)
out_features = b.size(1)
def dyn_quant_pack_4bit_weight(b, in_features, out_features):
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b, n_bit=4, groupsize=q_group
)
if q_group == in_features:
b_scales_and_zeros = b_scales_and_zeros.to(torch.float)
else:
b_scales_and_zeros = b_scales_and_zeros.to(torch.bfloat16)
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, q_group, in_features, out_features
)
return b_int4pack, b_scales_and_zeros
def fn(b, in_features, out_features):
b_int4pack, _ = dyn_quant_pack_4bit_weight(b, in_features, out_features)
return b_int4pack
self.common(fn, (b, in_features, out_features))
@xfail_if_mps_unimplemented
@xfail_if_triton_cpu
@skipCUDAIf(True, "No _dyn_quant_pack_4bit_weight implementation on CUDA")
@skipIfRocm
@skipIfXpu(msg="No _dyn_quant_pack_4bit_weight implementation on XPU")
@skip_if_halide # bf16
def test__dyn_quant_pack_4bit_weight_bf16(self):
k = 128
n = 128
q_group = 32
if not self.is_dtype_supported(torch.bfloat16):
raise unittest.SkipTest(
f"torch.bfloat16 not supported for device {self.device}"
)
torch.manual_seed(1)
b = torch.rand((k, n), dtype=torch.bfloat16)
in_features = b.size(0)
out_features = b.size(1)
def dyn_quant_pack_4bit_weight(b, in_features, out_features):
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b, n_bit=4, groupsize=q_group
)
if q_group == in_features:
b_scales_and_zeros = b_scales_and_zeros.to(torch.float)
else:
b_scales_and_zeros = b_scales_and_zeros.to(torch.bfloat16)
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, q_group, in_features, out_features
)
return b_int4pack, b_scales_and_zeros
def fn(b, in_features, out_features):
b_int4pack, _ = dyn_quant_pack_4bit_weight(b, in_features, out_features)
return b_int4pack
self.common(fn, (b, in_features, out_features))
@xfail_if_mps_unimplemented
@xfail_if_triton_cpu
@skipCUDAIf(True, "No _dyn_quant_matmul_4bit implementation on CUDA")
@skipIfRocm
@skipIfXpu(msg="No _dyn_quant_matmul_4bit implementation on XPU")
def test__dyn_quant_matmul_4bit_fp32_input(self):
q_group = 32
m = 32
k = 128
n = 128
torch.manual_seed(1)
a = torch.rand((m, k), dtype=torch.float32)
b = torch.rand((k, n), dtype=torch.float32)
in_features = b.size(0)
out_features = b.size(1)
def dyn_quant_pack_4bit_weight(b, in_features, out_features):
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b, n_bit=4, groupsize=q_group
)
if q_group == in_features:
b_scales_and_zeros = b_scales_and_zeros.to(torch.float)
else:
b_scales_and_zeros = b_scales_and_zeros.to(torch.bfloat16)
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, q_group, in_features, out_features
)
return b_int4pack, b_scales_and_zeros
def fn(a, q_group, in_features, out_features):
b_int4pack, _ = dyn_quant_pack_4bit_weight(b, in_features, out_features)
res = torch._dyn_quant_matmul_4bit(
a,
b_int4pack,
q_group,
in_features,
out_features,
)
return res
self.common(fn, (a, q_group, in_features, out_features))
@skipCPUIf(IS_MACOS, "fails on M1, mismatch in bf16 support reporting")
@xfail_if_mps_unimplemented
@xfail_if_triton_cpu
@skipCUDAIf(True, "No _dyn_quant_matmul_4bit implementation on CUDA")
@skipIfRocm
@skipIfXpu(msg="No _dyn_quant_matmul_4bit implementation on XPU")
@skip_if_halide # bf16
def test__dyn_quant_matmul_4bit_bf16_input(self):
m = 32
k = 128
n = 128
q_group = k
if not self.is_dtype_supported(torch.bfloat16):
raise unittest.SkipTest(
f"torch.bfloat16 not supported for device {self.device}"
)
torch.manual_seed(1)
a = torch.rand((m, k), dtype=torch.bfloat16)
b = torch.rand((k, n), dtype=torch.bfloat16)
# codegen_dynamic_shape test fails without explicitly marking these dynamic
torch._dynamo.mark_dynamic(a, 0)
torch._dynamo.mark_dynamic(b, 1)
in_features = b.size(0)
out_features = b.size(1)
if not self.is_dtype_supported(torch.bfloat16):
raise unittest.SkipTest(
f"torch.bfloat16 not supported for device {self.device}"
)
def dyn_quant_pack_4bit_weight(b, in_features, out_features):
b_uint8, b_scales_and_zeros = _group_quantize_tensor_symmetric(
b, n_bit=4, groupsize=q_group
)
if q_group == in_features:
b_scales_and_zeros = b_scales_and_zeros.to(torch.float)
else:
b_scales_and_zeros = b_scales_and_zeros.to(torch.bfloat16)
b_int4pack = torch._dyn_quant_pack_4bit_weight(
b_uint8, b_scales_and_zeros, None, q_group, in_features, out_features
)
return b_int4pack, b_scales_and_zeros
def fn(a, q_group, in_features, out_features):
b_int4pack, _ = dyn_quant_pack_4bit_weight(b, in_features, out_features)
res = torch.ops.aten._dyn_quant_matmul_4bit(
a,
b_int4pack,
q_group,
in_features,
out_features,
)
return res
self.common(fn, (a, q_group, in_features, out_features), atol=1, rtol=0.5)
def test_expanded_reduction(self):
def fn(x, y):
z = x * y
return z.sum((0, 1))
atol = 1e-3
rtol = 1e-3
self.common(
fn, (torch.randn(2, 197, 256), torch.randn(2, 1, 256)), atol=atol, rtol=rtol
)
@skip_if_gpu_halide
def test_min_max_reduction(self):
def fn(a, b):
return (
(a + b).max(),
(a + b).min(),
torch.amax(a + 1, keepdim=True),
torch.amin(b + 1, keepdim=True),
)
dtypes = [torch.float, torch.float16]
if self.is_dtype_supported(torch.bfloat16):
dtypes += [torch.bfloat16]
for dtype in dtypes:
self.common(fn, (torch.randn(8, 8).to(dtype), torch.randn(8, 8).to(dtype)))
@skip_if_halide # bug in nan handling
def test_min_max_reduction_nan(self):
def fn(a):
return (torch.max(a), torch.min(a))
t1 = torch.randn(32)
t1[16] = float("nan")
self.common(fn, (t1,))
@skip_if_halide # bug in nan handling
def test_fmin_fmax(self):
def fn(a, b):
return (
torch.fmin(a, b),
torch.fmax(a, b),
torch.fmax(a + 1, torch.tensor(0.0)),
)
self.common(
fn,
(
torch.tensor(
[-10.0, 10.0, float("nan"), float("nan"), float("nan"), 3, 4]
),
torch.tensor(
[float("nan"), float("nan"), -10.0, 10.0, float("nan"), 4, 3]
),
),
)
def test_sum_int(self):
def fn(x):
return 2 * x.sum(-1) + x.sum()
dtypes = torch.bool, torch.uint8, torch.int
inps = [torch.randint(2, (64,), dtype=dtype) for dtype in dtypes]
for i in inps:
self.common(fn, (i,), check_lowp=False)
def test_sum_dtype(self):
sum_dtype = torch.double if self.device != "mps" else torch.bfloat16
def fn(x):
return x * x.sum(-1, dtype=sum_dtype) + x.sum(dtype=sum_dtype)
self.common(fn, (torch.ones(32, 32) * 70,))
@skip_if_halide
def test_cummin(self):
def fn(x):
return x.cummin(0)
self.common(
fn, (torch.rand(16, 32),), check_lowp=not is_halide_backend(self.device)
)
self.common(fn, (torch.rand(1),), check_lowp=not is_halide_backend(self.device))
self.common(fn, (torch.rand(0),), check_lowp=not is_halide_backend(self.device))
def test_cumsum(self):
def fn(x):
return x.cumsum(0), x.cumsum(1)
# Persistent reductions
self.common(
fn, (torch.rand(16, 32),), check_lowp=not is_halide_backend(self.device)
)
self.common(
fn, (torch.rand(20, 30),), check_lowp=not is_halide_backend(self.device)
)
# Non-persistent reduction
self.common(
fn,
(torch.rand(100, 4000),),
check_lowp=not is_halide_backend(self.device),
atol=1e-5,
rtol=1e-5,
)
def test_cumsum_zero_dim(self):
def fn(x):
return x.cumsum(0), x.cumsum(-1)
a = torch.rand(())
self.common(fn, (a,))
def test_cumsum_no_mask(self):
def fn(x):
return x.cumsum(-1)
# Persistent reduction
a = torch.rand((1, 1024))
self.common(
fn, (a,), check_lowp=not (TEST_WITH_ROCM or is_halide_backend(self.device))
)
# Non-persistent reduction
b = torch.rand((1, 8192))
self.common(
fn,
(b,),
check_lowp=not (TEST_WITH_ROCM or is_halide_backend(self.device)),
atol=1e-5,
rtol=1e-5,
)
def test_cumprod_zero_dim(self):
def fn(x):
return x.cumprod(0), x.cumprod(-1)
a = torch.rand(())
self.common(fn, (a,))
def test_cumsum_inf(self):
def fn(x):
return x.cumsum(-1)
_dtype = torch.float64
def make_tensor(shape):
return torch.full(shape, float("inf"), device=self.device, dtype=_dtype)
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(_dtype)
else self.assertRaises(TypeError)
)
with ctx:
cfn = torch.compile(fn)
for n in [100, 10, 100]:
inp = torch.full((2, n), float("inf"), device=self.device, dtype=_dtype)
self.assertEqual(cfn(inp), fn(inp))
@xfail_if_triton_cpu
def test_logcumsumexp(self):
def fn(x):
return x.logcumsumexp(0), x.logcumsumexp(1)
# Persistent reductions
self.common(
fn,
(torch.rand(16, 32),),
check_lowp=not (TEST_WITH_ROCM or is_halide_backend(self.device)),
)
self.common(
fn,
(torch.rand(20, 30),),
check_lowp=not (TEST_WITH_ROCM or is_halide_backend(self.device)),
)
# Non-persistent reduction
self.common(
fn,
(torch.rand(100, 4000),),
check_lowp=not (TEST_WITH_ROCM or is_halide_backend(self.device)),
atol=1e-5,
rtol=1e-5,
)
def test_logcumsumexp_zero_dim(self):
def fn(x):
return x.logcumsumexp(0), x.logcumsumexp(-1)
a = torch.rand(())
self.common(fn, (a,))
def test_clamp(self):
def fn(a, b):
return (a.clamp(-0.1, 0.1), b.clamp(0), torch.clamp(a + b, max=0))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_clamp_type_promotion(self):
tgt_dtype = torch.double if self.device != "mps" else torch.half
def fn(a):
b = torch.tensor(1.0, dtype=tgt_dtype, device=self.device)
c = torch.full((4,), 2, device=self.device)
return a.clamp(min=b, max=c)
self.common(fn, (torch.randint(4, (4,)),))
def test_clamp_type_promotion_non_tensor(self):
def fn(a):
return a.clamp(min=1.5), a.clamp(min=2)
self.common(fn, (torch.randint(4, (4,)),))
@skip_if_gpu_halide
@xfail_if_triton_cpu
def test_dist(self):
def fn(a, b):
return (
torch.dist(a, b),
torch.dist(a, b, p=1.2),
)
self.common(fn, (torch.randn(4, 4), torch.randn(4, 4)))
@xfail_if_mps
@skip_if_halide # different pow accuracies
@xfail_if_triton_cpu
def test_norm_constant_overflow(self):
def fn(a):
return (
torch.norm(a, p=-41.0, dim=1),
torch.norm(a, p=-41.0, dim=0),
)
self.common(fn, (torch.randn(4, 1, 4),))
@skipCUDAIf(not SM80OrLater, "Requires sm80")
@skip_if_gpu_halide # https://github.com/halide/Halide/issues/8311
def test_dist_bf16(self):
def fn(a, b):
return torch.dist(a.to(torch.bfloat16), b.to(torch.bfloat16))
if not self.is_dtype_supported(torch.bfloat16):
raise unittest.SkipTest(
f"torch.bfloat16 not supported for device {self.device}"
)
self.common(fn, (torch.randn(4, 4), torch.randn(4, 4)))
def test_arange1(self):
def fn(x):
rng1 = torch.arange(8 * 8, dtype=torch.float32, device=x.device).view(8, 8)
rng2 = torch.arange(10, 18, device=x.device)
tmp = x * rng1
return tmp, tmp + rng2
self.common(fn, (torch.randn(8, 8),))
def test_arange2(self):
def fn(x):
rng1 = torch.arange(8, device=x.device)
return (x + rng1,)
self.common(fn, (torch.randint(4, (8, 8)),), check_lowp=False)
def test_arange3(self):
def fn(x):
return x + torch.ops.aten.arange.start_step(
0, 53, 4, dtype=torch.int64, device=x.device
)
self.common(fn, (torch.randn(14),))
def test_arange4(self):
def fn(x):
return x - torch.arange(512, -512, -1.0, device=x.device)
self.common(fn, (torch.randn(1024),))
def test_arange5(self):
def fn(step, device):
return torch.arange(512, -512, step, device=device)
compiled_fn = torch.compile(fn)
# NOTE: use assertEqual to check dtypes which self.common doesn't do
for step in (-1, -1.0):
expect = fn(step, self.device)
actual = compiled_fn(step, self.device)
self.assertEqual(expect, actual)
self.assertEqual(expect, actual)
def test_arange6(self):
def fn(x):
return torch.arange(0.1, 8.0001, 1, dtype=x.dtype, device=x.device)
# Test that float arguments are truncated to int when dtype is set explicitly
make_arg = functools.partial(
make_tensor, device=self.device, requires_grad=False
)
self.common(fn, (make_arg(1, dtype=torch.float32),))
self.common(fn, (make_arg(1, dtype=torch.int64),))
def test_linspace1(self):
def fn(x):
return torch.linspace(0.125, 0.875, 7, device=x.device) + x
self.common(fn, (torch.randn(1, 7),))
def test_linspace2(self):
def fn(x):
return torch.linspace(0, 2, 1, device=x.device) + x
self.common(fn, (torch.randn(1, 1),))
def test_linspace3(self):
def fn(x):
return torch.linspace(0, 2, 0, device=x.device)
self.common(fn, (torch.Tensor([]),))
@requires_multigpu()
def test_linspace4(self):
def fn(x):
return torch.linspace(0, 2, 0, device=f"{GPU_TYPE}:1")
self.common(fn, (torch.Tensor([]),))
def test_tensor1(self):
def fn(x):
return torch.tensor([1], device=x.device) + x, torch.tensor(
5, device=x.device
)
self.common(fn, (torch.randn(10),))
def test_tensor2(self):
def fn(x):
return torch.tensor(list(range(2, 40, 2)), device=x.device) + x
self.common(fn, (torch.randn(1),))
def test_tensor3(self):
def fn(x):
return (
torch.tensor([], device=x.device),
torch.tensor([1, 2], device=x.device) + 1,
torch.tensor([1, 2, 3], device=x.device) + 2,
torch.tensor([1, 2, 3, 4], device=x.device) + x,
)
self.common(fn, [torch.randn(4)])
def test_views1(self):
def fn1(x, y):
return (x.view(size2) + y,)
def fn2(x, y):
return ((x + 1).view(size2) + y,)
views = [
([5 * 7], [5, 7]),
([2 * 3 * 4 * 5 * 6 * 7], [2, 3, 4, 5, 6, 7]),
([2 * 3, 4, 5, 6 * 7], [2, 3, 4, 5, 6, 7]),
([10 * 5, 20], [10, 5, 20]),
([1, 10, 1], [10]),
([10, 1, 10, 1, 10], [10, 100]),
([2, 2, 2, 2], [4, 4]),
]
for size1, size2 in views:
self.common(fn1, (torch.randn(size1), torch.randn(size2)))
self.common(fn2, (torch.randn(size1), torch.randn(size2)))
for size2, size1 in views:
self.common(fn1, (torch.randn(size1), torch.randn(size2)))
self.common(fn2, (torch.randn(size1), torch.randn(size2)))
def test_views2(self):
for size1, size2 in [
([2, 2, 2, 2], [4, -1]),
([10, 1, 10, 1, 10], [-1, 100]),
([10 * 5, 20], [10, -1, 20]),
]:
def fn1(x):
return (x.view(size2) + 1,)
def fn2(x):
return ((x * 2).view(size2) + 1,)
self.common(fn1, (torch.randn(size1),))
self.common(fn2, (torch.randn(size1),))
def test_views3(self):
# example taken from hf_BigBird
def forward(arg1, arg2):
index = torch.ops.aten.index(arg1, [arg2])
view_1 = torch.ops.aten.view(index, [1, 2232, 64])
view_2 = torch.ops.aten.view(view_1, [1, 12, 62, 192])
return view_2
self.common(
forward,
(
rand_strided((64, 64), (64, 1), torch.float32),
rand_strided((2232,), (1,), torch.int64),
),
)
def test_views4(self):
# example taken from hf_BigBird
def forward(arg1, arg2):
arg1 = arg1.index_select(0, arg2)
arg1 = torch.ops.aten.view(arg1, [2, 3, 4, 5, 5])
arg1 = torch.ops.aten.view(arg1, [2, 3, 2, 10, -1])
return arg1
self.common(
forward,
(
torch.randn(12, 5, 5),
torch.randint(0, 11, (24,)),
),
)
def test_views5(self):
# tensor with shape 0 in any dimension
def forward(x):
y = x[:, 4:]
return y.view(len(y), -1, 4)
self.common(
forward,
(torch.randn(4, 4, 4, 4),),
)
def test_views6(self):
def forward(x):
x = torch.ops.aten.relu(x)
s = torch.ops.aten.slice(x, 0, 0, 9223372036854775807)
s = torch.ops.aten.slice(s, 1, 0, 9223372036854775807)
s = torch.ops.aten.slice(s, 3, 0, 0)
y = torch.ops.aten.view(s, [4, 2, -1])
return y
self.common(
forward,
(torch.randn(4, 2, 4, 4),),
)
def test_views7(self):
# x.view(dtype)
def forward(x, y):
x = (x + 1).to(torch.float32)
y = (y + 1).to(torch.int32)
return x.view(torch.int32), y.view(torch.float32)
self.common(
forward,
(
torch.rand(2, 3, dtype=torch.float32),
torch.randint(10, (2, 3), dtype=torch.int32),
),
)
def test_torch_device_split(self):
def fn(x):
return x.split(2)
x = torch.rand(10)
with x.device:
out = torch.compile(fn, backend=lambda gm, _: gm)(x)
ref = fn(x)
for a, b in zip(out, ref):
self.assertTrue(torch.allclose(a, b))
def test_relu(self):
def fn(a, b):
return (torch.relu(a), torch.relu(a + b) / 10)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_exp(self):
def fn(a, b):
return (torch.exp(a), torch.exp(a + b))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_exp2(self):
def fn(a, b):
return (torch.exp2(a), torch.exp2(a + b), torch.pow(2, -torch.abs(a - b)))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
@skipIfXpu(msg="logaddexp_xpu not implemented for ComplexFloat")
@skipCUDAIf(True, "Not implemented for CUDA")
def test_logaddexp(self):
self.common(
torch.logaddexp,
(
torch.randn(8, 8).to(dtype=torch.complex64),
torch.randn(8, 8).to(dtype=torch.complex64),
),
)
def test_sigmoid(self):
def fn(a, b):
return (torch.sigmoid(a), torch.sigmoid(a + b))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
@xfail_if_triton_cpu
def test_round(self):
def fn(a, b):
return torch.round(a), torch.round(b + 1), torch.round(a, decimals=2)
# without manual_seed, there is some chance this test fails due to:
# https://github.com/triton-lang/triton/issues/530
torch.manual_seed(0)
# with *100 we are always getting a number exactly at .5 which we don't do right in half
self.common(fn, (torch.randn(8, 8) * 100, torch.randn(8, 8) * 10))
@xfail_if_triton_cpu
def test_round_correctness(self):
if self.device == "cuda":
raise unittest.SkipTest("need to debug tl.libdevice on A100/V100")
def fn(a):
return torch.round(a)
dtype = torch.float64 if self.device != "mps" else torch.float32
self.common(
fn,
[torch.arange(-10, 10, 0.1, dtype=dtype)],
check_lowp=False,
)
@xfail_if_triton_cpu
def test_builtins_round(self):
def fn(x, i):
return x[: round(i / 2 + 1)] + round(i / 2)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.zeros(5, dtype=torch.int, device=self.device)
with torch.no_grad():
for i in range(1, 6):
self.assertEqual(cfn(x, i), fn(x, i))
@xfail_if_triton_cpu
def test_builtins_round_float_ndigits_pos(self):
def fn(x, i):
return x + round(i / 2 * 123.4567, 1)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.zeros(2, device=self.device)
i = 2
with torch.no_grad():
self.assertEqual(cfn(x, i), fn(x, i))
@xfail_if_triton_cpu
def test_builtins_round_float_ndigits_zero(self):
def fn(x, i):
return x + round(i / 2 * 123.4567, 0)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.zeros(2, device=self.device)
i = 2
with torch.no_grad():
self.assertEqual(cfn(x, i), fn(x, i))
@xfail_if_triton_cpu
def test_builtins_round_float_ndigits_neg(self):
def fn(x, i):
return x + round(i / 2 * 123.4567, -1)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.zeros(2, device=self.device)
i = 2
with torch.no_grad():
self.assertEqual(cfn(x, i), fn(x, i))
def test_builtins_round_int_ndigits_pos(self):
def fn(x, i):
return x + round(i, 1)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.zeros(2, device=self.device)
i = 123
with torch.no_grad():
self.assertEqual(cfn(x, i), fn(x, i))
def test_builtins_round_int_ndigits_zero(self):
def fn(x, i):
return x + round(i, 0)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.zeros(2, device=self.device)
i = 123
with torch.no_grad():
self.assertEqual(cfn(x, i), fn(x, i))
def test_silu(self):
def fn(a):
return (torch.nn.functional.silu(a),)
self.common(fn, (torch.randn(8, 8),))
@skip_if_halide # halide has buggy nan handling
def test_nan_to_num(self):
def fn(a):
return (
torch.nan_to_num(a),
torch.nan_to_num(a, nan=3.0),
torch.nan_to_num(a, nan=None),
torch.nan_to_num(a, posinf=4.0),
torch.nan_to_num(a, neginf=5.0),
torch.nan_to_num(a, nan=3.0, posinf=4.0, neginf=5.0),
)
self.common(
fn,
(torch.tensor((float("nan"), float("inf"), float("-inf"), 1.0)),),
check_lowp=False, # a much more elaborate test is required to match finfo max's for float and half
)
def test_one_hot(self):
def fn(a):
return torch.nn.functional.one_hot(a, 8) + 1
self.common(
fn,
(torch.arange(100).view(4, 5, 5) % 8,),
check_lowp=False,
)
def test_div1(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(fn, (torch.randn(8, 8) * 100, torch.randn(8, 8) * 100))
def test_div2(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(fn, (torch.randint(-100, 100, [8, 8]), 100 * torch.randn(8, 8)))
def test_div3(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
a = torch.randint(1, 100, [8, 8])
self.common(fn, (a * 2, a))
def test_div4(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(
fn,
(torch.randint(-100, 0, [8, 8]), torch.randint(1, 10, [8, 8])),
)
def test_div5(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
# divide a scalar
self.common(fn, (torch.randint(-100, 0, [8, 8]), 16))
def test_div6(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
# treat boolean as integer
self.common(
fn,
(torch.ones([8, 8], dtype=torch.bool), torch.randint(-100, -1, [8, 8])),
)
@skip_if_triton_cpu # divide by zero; cannot xfail because it crashes process
def test_div7(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(
fn,
(
torch.randint(2**32, 2**40, [100, 100]),
torch.randint(-10, -1, [100, 100]),
),
)
def test_div8(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a * 0.5, b, rounding_mode=None),
aten.div(a, b * 1.0, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
self.common(fn, (1024, 100))
def test_div9(self):
def fn(x):
return (torch.div(42, x), aten.true_divide(42, x), aten.div.Tensor(42, x))
self.common(fn, (torch.randn(8),))
@skip_if_triton_cpu # divide by zero; cannot xfail because it crashes process
def test_div_zero_dim(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
for dtype in (torch.float32, torch.int64):
self.common(
fn,
(
make_tensor(10, device=self.device, dtype=dtype),
make_tensor((), device=self.device, dtype=dtype, exclude_zero=True),
),
)
self.common(
fn,
(
make_tensor((), device=self.device, dtype=dtype),
make_tensor(10, device=self.device, dtype=dtype, exclude_zero=True),
),
)
@skip_if_triton_cpu # divide by zero; cannot xfail because it crashes process
def test_div_prim(self):
def fn(a, b):
return (torch.ops.prims.div(a, b),)
for dtype in (torch.float32, torch.int64):
self.common(
fn,
(
make_tensor(100, device=self.device, dtype=dtype),
make_tensor(
100, device=self.device, dtype=dtype, exclude_zero=True
),
),
)
def test_floordiv(self):
def fn_floor_input(a, i):
n = (i * 1.234) // 8.234
return a + n
self.common(
fn_floor_input,
(make_tensor(10, device=self.device, dtype=torch.float32), 33),
)
def fn_int_input(a, i):
n = i // 8
return a + n
self.common(
fn_int_input, (make_tensor(10, device=self.device, dtype=torch.float32), 33)
)
def test_div_precision(self):
# Reproducer for https://github.com/pytorch/pytorch/issues/101039
def forward(x, y):
z = x.div(y)
return F.softmax(z, dim=-1)
query = torch.randn(1, 10, 40)
key = torch.randn(1, 2, 40)
x = torch.matmul(query, key.transpose(-2, -1))
self.common(forward, (x, 1e-6))
x = torch.tensor(
[
[
[
[-16.1649, 5.6846, -5.1022, -9.1134],
[-11.5552, -2.2615, -12.8913, 10.6538],
[-7.1666, -5.3333, 2.0776, -9.7984],
[7.4469, -2.3948, 2.7371, 0.9201],
],
[
[-8.0361, -16.3771, 22.7741, 4.4685],
[20.8047, -0.7771, -2.4355, -2.2299],
[3.8343, -2.0914, -2.4077, 2.2740],
[-15.8663, -2.7015, -12.5241, -3.0040],
],
[
[-2.5139, 14.4393, -3.7186, 1.2255],
[5.6742, 14.1842, -8.5976, 16.8366],
[-9.7358, -3.0279, 11.8164, -4.0787],
[-9.0621, 8.2580, 29.9486, -2.4107],
],
[
[7.3622, 12.5640, -20.5592, 13.6237],
[-11.5640, 0.8832, 16.7275, -2.5009],
[-2.0953, -12.2276, -26.2633, 4.5268],
[15.3329, -11.7492, 6.5650, -9.2483],
],
],
[
[
[7.9980, -4.9369, 3.1508, 5.2994],
[3.8052, 3.9514, 8.4987, -10.5045],
[-2.6827, -4.0010, -4.0611, 6.4091],
[-19.0318, 6.4073, 2.8923, 8.0250],
],
[
[7.1650, -3.4585, 5.7720, -5.0305],
[-0.9765, -3.0086, 11.7114, 8.0555],
[-3.1027, -3.5514, 9.6182, -8.8526],
[-9.2348, -6.0239, 6.2528, -6.7221],
],
[
[11.5936, 22.4139, -0.4089, -4.9889],
[14.8217, -2.3426, -17.6189, 3.7427],
[1.9546, -13.0902, 8.6293, -7.2457],
[-7.6900, -4.5796, 9.6332, -10.2631],
],
[
[0.8027, -1.0955, 14.8404, -0.2673],
[3.2143, -1.8640, -2.9678, 6.5165],
[-3.9865, 6.5230, 6.3019, -0.4247],
[8.3185, -13.5076, 27.0986, -1.6792],
],
],
]
)
x = torch.matmul(x, x)
y = torch.tensor([[[0.6331]], [[1.6358]], [[-0.3459]], [[1.0196]]])
self.common(forward, (x, y))
def test_div_softmax_symfloat(self):
def forward(x, y):
z = x.div(y * x.shape[-1])
return F.softmax(z, dim=-1)
query = torch.randn(1, 10, 40)
key = torch.randn(1, 2, 40)
x = torch.matmul(query, key.transpose(-2, -1))
cf = torch.compile(forward, dynamic=True)
cf(x, 1e-5)
cf(x, 1e-6)
def test_div_presicion_accuracy(self):
# fix https://github.com/pytorch/pytorch/issues/157959
def forward(x, y):
return (x / y).sum()
x = torch.rand((5, 5))
y = 101
self.common(forward, (x, y))
def test_mul_softmax_symfloat(self):
def forward(x, y):
z = x.mul(y * x.shape[-1])
return F.softmax(z, dim=-1)
query = torch.randn(1, 10, 40)
key = torch.randn(1, 2, 40)
x = torch.matmul(query, key.transpose(-2, -1))
cf = torch.compile(forward, dynamic=True)
cf(x, 1e-5)
cf(x, 1e-6)
def test_div_by_zero(self):
def fn(x, runtime_zero, runtime_neg_zero):
zero = torch.zeros_like(x)
return (
x / 0.0,
x / -0.0,
zero / 0.0,
x / zero,
x / -zero,
zero / zero,
x / runtime_zero,
# NOTE: -runtime_zero doesn't work as -(0.0) is broken in triton
x / runtime_neg_zero,
runtime_zero / runtime_neg_zero,
)
a = torch.randn(10)
zero = torch.zeros(10)
neg_zero = -zero
self.common(fn, (a, zero, neg_zero))
def test_both_scalars(self):
def fn(a, b):
return (
aten.add(a, b),
aten.add(b, a),
aten.sub(a, b),
aten.sub(b, a),
aten.mul(a, b),
aten.mul(b, a),
)
self.common(fn, (4, 3.3), reference_in_float=False)
def test_sum_keepdims(self):
def fn(a, b):
return (torch.sum(a + b, -1, keepdim=True),)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
@skip_if_cpu
@skip_if_halide # only 32-bit indexing
@largeTensorTest("4GB", inductor=True)
def test_large_tensor_reduction(self):
# Test 64-bit indexing works correctly
def fn(a):
return torch.max(a)
t = torch.ones(2**32, dtype=torch.int8, device=self.device)
t[-1] = 2
# self.common OOMs here because it copies inputs to check for mutations
compiled_fn = torch.compile(fn)
actual = compiled_fn(t)
expect = torch.tensor(2, dtype=torch.int8, device=self.device)
self.assertEqual(actual, expect)
@skip_if_cpu
@skip_if_gpu_halide # only 32-bit indexing
def test_large_broadcast_reduction(self):
# Test 64-bit indexing works correctly when inputs are less than 32-bit
# but intermediate tensors require 64-bit indexing
def fn(a, b):
return torch.max(a + b)
t1 = torch.ones(1, 2**16, dtype=torch.int8, device=self.device)
t2 = torch.ones(2**16, 1, dtype=torch.int8, device=self.device)
t1[-1, -1] = 2
t2[-1, -1] = 2
# self.common OOMs here because it copies inputs to check for mutations
compiled_fn = torch.compile(fn)
actual = compiled_fn(t1, t2)
expect = torch.tensor(4, dtype=torch.int8, device=self.device)
self.assertEqual(actual, expect)
@skip_if_halide # only 32-bit indexing
@largeTensorTest("4GB", inductor=True)
def test_large_pointwise(self):
def fn(a):
return a + 1
t = torch.ones(2**31 + 1, dtype=torch.int8, device=self.device)
compiled_fn = torch.compile(fn)
actual = compiled_fn(t)
# Can't use assertEqual as it expands broadcasted inputs
del t
if torch.device(self.device).type == GPU_TYPE:
getattr(torch, GPU_TYPE).empty_cache()
self.assertTrue((actual == 2).all())
@skip_if_halide # only 32-bit indexing
@largeTensorTest("3GB", inductor=True)
def test_large_offset_pointwise(self):
# Test 64-bit indexing is used when input views a tensor that can be
# indexed with 32-bit strides but the storage offset pushes it over
# INT_MAX
def fn(a):
return a + 4
t = torch.ones(2**31 + 1, dtype=torch.int8, device=self.device)
t[2**30 :] = 0
compiled_fn = torch.compile(fn)
actual = compiled_fn(t[2**30 :])
self.assertTrue((actual == 4).all())
@skip_if_halide # only 32-bit indexing
@largeTensorTest("2GB", inductor=True)
def test_large_strided_reduction(self):
# Test 64-bit indexing is used when input numel is less than INT_MAX
# but stride calculations go above INT_MAX
def fn(a):
return torch.max(a)
storage = torch.ones(2**31 + 1, dtype=torch.int8, device=self.device)
view = storage[::32]
view[-1] = 2
compiled_fn = torch.compile(fn)
actual = compiled_fn(view)
expect = torch.tensor(2, dtype=torch.int8, device=self.device)
self.assertEqual(actual, expect)
def test_softmax(self):
def fn(a, b):
return (torch.softmax(a + b, -1), torch.softmax(a, 0), torch.softmax(b, 1))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_log_softmax(self):
def fn(a, b):
return (F.log_softmax(a + b, -1), F.log_softmax(a, 0), F.log_softmax(b, 1))
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_transpose(self):
def fn(a, b):
return (
torch.t(a) + b,
torch.transpose(b * 2, 0, 1) + 10,
)
self.common(fn, (torch.randn(8, 8), torch.randn(8, 8)))
def test_permute1(self):
def fn(a):
return (
torch.permute(a + 1, [2, 1, 4, 0, 3]) + 2,
torch.permute(a, [2, 1, 4, 0, 3]) + 2,
)
self.common(fn, (torch.randn(2, 2, 2, 2, 2),))
def test_permute2(self):
def fn(a):
a = a.unfold(0, 2, 1)
a = torch.unsqueeze(a, 1)
a = torch.permute(a, [0, 2, 3, -3])
return (a,)
self.common(fn, (torch.randn(4, 4),))
def test_expand(self):
def fn(a):
return (
(a + 1).expand(3, 4, 2, 3, 2) + 2,
a.expand(2, 1, 2, 3, 2) + 2,
), a.expand(2, -1, 5, -1)
self.common(fn, (torch.randn(2, 1, 2),))
def test_squeeze1(self):
def fn(a):
return ((a + 1).squeeze() + 2, a.squeeze() + 2)
self.common(fn, (torch.randn(1, 2, 1, 2, 2, 1, 1),))
def test_squeeze2(self):
def fn(a):
return ((a + 1).squeeze(-1).squeeze(2) + 2, a.squeeze(0) + 2)
self.common(fn, (torch.randn(1, 2, 1, 2, 2, 2, 1),))
def test_squeeze_varargs(self):
def fn(x):
return x.squeeze(1, 2).clone()
a = torch.randn(1024, 1, 1)
self.common(fn, (a,))
def test_simplify_loops(self):
def fn(a, b):
return a + b
self.common(
fn,
(
torch.randn(2, 3, 4, 5, 6),
torch.randn(4, 2, 3, 5, 6).permute(1, 2, 0, 3, 4),
),
)
def test_unsqueeze(self):
def fn(a):
return (
torch.unsqueeze(a + 1, -1) + 2,
torch.unsqueeze(a, 2) + 2,
torch.unsqueeze(a + 1, 0) + 2,
torch.unsqueeze(a, -2) + 2,
)
self.common(
fn,
(
torch.randn(
2,
2,
2,
2,
),
),
)
def test_unsqueeze_inplace(self):
def fn(a):
tmp1 = a + 1
aten.unsqueeze_(tmp1, 2)
tmp2 = aten.unsqueeze_(a + 1, 0) + 2
return (tmp1, tmp2)
self.common(
fn,
(
torch.randn(
2,
2,
2,
2,
),
),
)
def test_addmm(self):
def fn(a, b, c):
return (torch.addmm(a + 1, b + 2, c + 3) + 4,)
self.common(
fn,
(
torch.randn(8, 8),
torch.randn(8, 8),
torch.randn(8, 8),
),
)
def test_addmv(self):
def fn(a, b, c):
return torch.addmv(a, b, c)
cfn = torch.compile(backend="inductor")(fn)
input = torch.tensor([2], dtype=torch.int32)
mat = torch.tensor(np.random.randn(0, 0), dtype=torch.int32)
vec = torch.tensor([])
with torch.no_grad():
self.assertEqual(cfn(input, mat, vec), fn(input, mat, vec))
# https://github.com/pytorch/pytorch/issues/98979
@skipCUDAIf(True, "cuda failed for float64 linear")
@skipIfXpu(msg="Double and complex datatype matmul is not supported in oneDNN")
def test_linear_float64(self):
_dtype = torch.float64
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(_dtype)
else self.assertRaises(TypeError)
)
with ctx:
mod = torch.nn.Sequential(torch.nn.Linear(8, 16).to(_dtype)).eval()
with torch.no_grad():
self.common(mod, (torch.randn(2, 8).to(_dtype),))
def test_linear1(self):
mod = torch.nn.Sequential(
torch.nn.Linear(8, 16),
torch.nn.Sigmoid(),
ToTuple(),
)
self.common(mod, (torch.randn(2, 8),))
def test_linear2(self):
mod = torch.nn.Sequential(
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
)
self.common(
mod,
(torch.randn(2, 8),),
atol=1e-3,
rtol=0.01,
)
def test_bmm1(self):
def fn(a, b):
return (
torch.bmm(a, b),
torch.bmm(a + 1, b + 2) + 3,
)
self.common(
fn,
(
torch.randn(2, 8, 8),
torch.randn(2, 8, 8),
),
check_lowp=False,
)
self.common(
fn,
(
torch.randn(1, 16, 8),
torch.randn(1, 8, 10),
),
check_lowp=False,
)
def test_bmm2(self):
def fn(a, b):
return torch.bmm(a.permute(0, 2, 1), b)
self.common(
fn,
(
torch.randn(1, 8, 8),
torch.randn(1, 8, 8),
),
check_lowp=False,
)
@skipIfPy312 # segfaults
@skipCUDAIf(not SM80OrLater, "Requires sm80")
def test_mixed_mm(self):
def fn(a, b):
return torch.mm(a, b.to(a.dtype))
self.common(
fn,
(
torch.randn(8, 8),
torch.randint(-128, 127, (8, 8), dtype=torch.int8),
),
check_lowp=True,
)
@skipIfPy312 # segfaults
@skipCUDAIf(not SM80OrLater, "Requires sm80")
def test_mixed_mm2(self):
def fn(a, b, scale, bias):
return torch.mm(a, b.to(a.dtype)) * scale + bias
self.common(
fn,
(
torch.randn(8, 8),
torch.randint(-128, 127, (8, 8), dtype=torch.int8),
torch.randn(8),
torch.randn(8),
),
check_lowp=True,
)
@skipIfPy312 # segfaults
@skipCUDAIf(not SM80OrLater, "Requires sm80")
def test_mixed_mm3(self):
def fn(a, b):
return torch.mm(a, b.to(a.dtype))
# (256, 256) @ (256, 256) so different block sizes are tried out during autotuning
self.common(
fn,
(
torch.randn(256, 256),
torch.randint(-128, 127, (256, 256), dtype=torch.int8),
),
rtol=0.01,
atol=0.1,
)
@with_tf32_off
def test_uint4x2_mixed_mm(self):
def fn(a, b):
return torch.mm(
a,
torch.cat((b & 0xF, b >> 4), 1)
.reshape(-1, b.shape[1])
.to(a.dtype)
.sub(8),
)
self.common(
fn,
(
torch.randn(8, 8),
torch.randint(0, 255, (4, 8), dtype=torch.uint8),
),
check_lowp=True,
)
def test_mm_mixed_dtype(self):
def fn(a, b):
return torch.mm(a, b)
t1 = torch.arange(6, dtype=torch.float, device=self.device).view(2, 3)
t2 = torch.arange(9, dtype=torch.int64, device=self.device).view(3, 3)
msg = "expected .* and .* to have the same dtype, but got: .* != .*"
with self.assertRaisesRegex(RuntimeError, msg):
fn(t1, t2)
if config.cpp_wrapper:
msg = "aoti_torch_.* API call failed at .*"
with self.assertRaisesRegex(RuntimeError, msg):
torch.compile(fn)(t1, t2)
@xfail_if_mps_unimplemented # linear for non-float inputs
def test_linear_mixed_dtype(self):
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__() # noqa: UP008
self.fc1 = nn.Linear(3, 3)
def forward(self, x):
x = self.fc1(x.permute(1, 2, 0))
return x
fn = Net().to(self.device)
t = torch.arange(27, device=self.device).view(3, 3, 3)
msg = "expected .* and .* to have the same dtype, but got: .* != .*"
with self.assertRaisesRegex(RuntimeError, msg):
fn(t)
if config.cpp_wrapper:
msg = "aoti_torch_.* API call failed at .*"
with self.assertRaisesRegex(RuntimeError, msg):
with torch.no_grad():
torch.compile(fn)(t)
with self.assertRaisesRegex(RuntimeError, "Autograd not support dtype:.*"):
torch.compile(fn)(t)
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
@config.patch(
{
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
}
)
def test_linear_dynamic_maxautotune(self):
if self.device == "cpu":
raise unittest.SkipTest("using triton backend only is not supported on CPU")
@torch.compile(dynamic=True)
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(1, 1)
def forward(self, x):
return self.linear(x)
x = torch.randn(10, 1)
torch._dynamo.mark_dynamic(x, 0)
self.common(Model(), (x,))
def test_scalar_input(self):
def fn(x, y):
a = torch.div(x, y, rounding_mode="floor")
return a
self.common(fn, [torch.randint(5, (1, 8)), 5400])
@torch._dynamo.config.patch(dynamic_shapes=True)
@torch._dynamo.config.patch(assume_static_by_default=False)
def test_scalar_output(self):
def fn(arg0_1, arg2_1):
arg1_1 = arg2_1.size(1)
view = torch.ops.aten.view.default(arg2_1, [-1, arg1_1])
embedding = torch.ops.aten.embedding.default(arg0_1, view)
full = torch.ops.aten.full.default([1, arg1_1], 1, dtype=torch.float32)
return (full, arg1_1, embedding)
arg0_1 = rand_strided((32128, 768), (768, 1), device="cpu", dtype=torch.float32)
arg2_1 = rand_strided((1, 22), (22, 1), device="cpu", dtype=torch.int64)
self.common(fn, [arg0_1, arg2_1])
def test_shape_prop_torch_ones(self):
class Model(torch.nn.Module):
def forward(self, attention_scores):
extended_attention_mask = torch.ones(
8, 1, 1, 512, device=attention_scores.device
)
attention_scores = attention_scores + extended_attention_mask
return attention_scores
mod = Model().eval()
with torch.no_grad():
self.common(
mod,
(torch.randn(8, 12, 512, 512),),
)
@slowTest
@expectedFailureCodegenDynamic
@config.patch({"freezing": True})
def test_conv_bn_fuse(self):
# For gpu path, there is an accuracy issue
if self.device == GPU_TYPE:
raise unittest.SkipTest("only support cpu conv bn test")
# fails dynamic check which bn is fused, and there will not have loops vars.
input_shapes = {1: (112,), 2: (112, 112), 3: (55, 55, 55)}
conv_modules = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d, 3: torch.nn.Conv3d}
bn_modules = {
1: torch.nn.BatchNorm1d,
2: torch.nn.BatchNorm2d,
3: torch.nn.BatchNorm3d,
}
options = itertools.product(
[1, 2, 3],
[True, False],
[1, 3],
[1, 2],
[1, 4],
)
for (
dim,
bias,
kernel_size,
dilation,
groups,
) in options:
oC = 32 * groups
iC = 3 * groups
x_shape = (1, iC) + input_shapes[dim]
mod = torch.nn.Sequential(
conv_modules[dim](
iC,
oC,
kernel_size=kernel_size,
dilation=dilation,
groups=groups,
bias=bias,
),
bn_modules[dim](oC),
).eval()
test_memory_format = [torch.contiguous_format]
# TODO: GPU path doesn't support channels_last now.
if not HAS_GPU and dim > 1:
channels_last = (
torch.channels_last if dim == 2 else torch.channels_last_3d
)
test_memory_format.append(channels_last)
for memory_format in test_memory_format:
v = torch.randn(x_shape, dtype=torch.float32).to(
memory_format=memory_format
)
with torch.no_grad():
self.common(
mod,
(v,),
)
def test_conv_functional_bn_fuse(self):
# For gpu path, there is an accuracy issue
if self.device == GPU_TYPE:
raise unittest.SkipTest("only support cpu conv bn test")
# Define a BatchNorm using functional BN.
class BatchNorm(torch.nn.BatchNorm2d):
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
**factory_kwargs,
)
def forward(self, x):
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
# TODO: if statement only here to tell the jit to skip emitting this when it is None
if self.num_batches_tracked is not None: # type: ignore[has-type]
self.num_batches_tracked = self.num_batches_tracked + 1 # type: ignore[has-type]
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / float(
self.num_batches_tracked
)
else: # use exponential moving average
exponential_average_factor = self.momentum
if self.training:
bn_training = True
else:
bn_training = (self.running_mean is None) and (
self.running_var is None
)
x = F.batch_norm(
x,
# If buffers are not to be tracked, ensure that they won't be updated
(
self.running_mean
if not self.training or self.track_running_stats
else None
),
(
self.running_var
if not self.training or self.track_running_stats
else None
),
self.weight,
self.bias,
bn_training,
exponential_average_factor,
self.eps,
)
return x
v = torch.randn(1, 3, 556, 56, dtype=torch.float32)
mod = torch.nn.Sequential(
torch.nn.Conv2d(
3,
64,
kernel_size=3,
dilation=1,
groups=1,
bias=True,
),
BatchNorm(64),
).eval()
with torch.no_grad():
self.common(
mod,
(v,),
)
@skipIfRocm
@xfail_if_mps # Expected to find .run(
def test_conv_inference_heuristics(self):
if self.device != GPU_TYPE:
raise unittest.SkipTest(f"{GPU_TYPE} only test")
in_channels = 6
out_channels = 6
kernel_size = 3
groups = 3
grouped_conv = nn.Conv2d(
in_channels, out_channels, kernel_size, groups=groups
).to(self.device)
input_tensor = torch.randn(1, in_channels, 10, 10).to(self.device)
# Perform the forward pass
@torch.compile()
def foo(m, inp):
return m(inp)
if self.device != "xpu":
with torch.no_grad():
_, code = run_and_get_code(foo, grouped_conv, input_tensor)
# no to channels last permuting before kernel
if config.cpp_wrapper:
FileCheck().check_not(" call_triton").check("_convolution(").run(
code[0]
)
else:
FileCheck().check_not(".run(").check(".convolution(").run(code[0])
# in out should do channels last in inference
in_channels = 8
out_channels = 4
kernel_size = 3
# Create the convolution layer
conv_layer = nn.Conv2d(in_channels, out_channels, kernel_size).to(self.device)
input_tensor = torch.randn(1, in_channels, 10, 10).to(self.device)
with torch.no_grad():
_, code = run_and_get_code(foo, conv_layer, input_tensor)
# should be channels last permuting before kernel
if is_halide_backend(self.device):
FileCheck().check("halide_kernel_0(").check(".convolution(").run(
code[0]
)
else:
FileCheck().check(".run(").check("convolution(").run(code[0])
def test_upsample_cat_conv(self):
if self.device == GPU_TYPE:
raise unittest.SkipTest("only support cpu upsample_cat_conv test")
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.upsample = torch.nn.UpsamplingNearest2d(scale_factor=2)
self.conv = torch.nn.Conv2d(
8,
5,
kernel_size=1,
padding=0,
stride=1,
dilation=1,
**kwargs,
)
def forward(self, x, y):
x = self.upsample(x)
z = torch.cat([x, y], dim=1)
z = self.conv(z)
return z
v1 = torch.randn([8, 2, 12, 26])
v2 = torch.randn([8, 6, 24, 52])
with torch.no_grad():
self.common(
M().eval(),
(v1, v2),
)
def test_aliased_buffer_reuse(self):
def fn(x, y):
x = 2 * x
y = 2 * y
c = torch.cat([x, y], dim=-1)
d = 1 + c
m = torch.mm(d, d)
return m[:, :2] + x
self.common(fn, (torch.randn(4, 2), torch.randn(4, 2)), check_lowp=False)
def test_slice_view_with_graph_break(self):
def fn():
a = torch.tensor([1], device=self.device)
a = a[0:1]
b = a.squeeze()
a[0] = 0
if a[0] < 1e5:
pass
a[0] = 2
return b
expect = fn()
opt_fn = torch.compile(fn)
actual = opt_fn()
self.assertEqual(expect, actual)
def test_view_detach(self):
def fn(a):
return a[0].detach()
self.common(
fn,
(torch.randn([4, 4], requires_grad=True),),
)
def test_gather1(self):
def fn(a, b):
return (
torch.gather(a.expand([4, 5, 10, 6]), 3, b + 1),
torch.gather(a.expand([4, 5, 10, 6]), -1, b + 1),
)
self.common(
fn,
(
torch.randn([1, 1, 10, 6]),
torch.randint(5, [4, 5, 10, 1], dtype=torch.int64),
),
)
def test_gather2(self):
# 0d tensor
def fn(a, b):
return torch.gather(a, 0, b) + torch.gather(a, -1, b)
x = torch.tensor(123)
y = torch.tensor(0)
self.assertEqual(fn(x, y), x + x)
@xfail_if_mps_unimplemented # Sparse not supported
def test_gather3(self):
def fn(a, b):
return torch.gather(a, 1, b, sparse_grad=True)
self.common(
fn,
(
torch.randn([4, 5, 10, 6], requires_grad=True),
torch.randint(5, [4, 5, 10, 1], dtype=torch.int64),
),
)
def test_device_assert(self):
def fn(x, y):
x = torch.sum(x.view(int(x.shape[0] / 6), 6), dim=1)
return torch.gather(x, 0, torch.trunc(y).to(torch.int64))
x1 = torch.randn(30, device=self.device)
x2 = torch.randn(36, device=self.device)
dtype = torch.float64 if self.device != "mps" else torch.float32
y = torch.ones(1, dtype=dtype, device=self.device)
self.assertEqual(torch.compile(fn)(x1, y), fn(x1, y))
self.assertEqual(torch.compile(fn)(x2, y), fn(x2, y))
def test_slice1(self):
def fn(a):
return (
a[:, :10, 0] + a[:, 10:, 0],
(a + 1)[:, :10, 0] + (a + 1)[:, 10:, 0],
a[:, -30:, 0], # negative index out of range
a[:, :-30, 0], # negative index out of range
)
self.common(
fn,
(torch.randn([2, 20, 2]),),
)
def test_slice2(self):
def fn(a):
return (
a[:-1, ::2, -1] + a[-1:, 1::2, -2],
(a + 1)[:-1, ::2, -1] + (a + 2)[-1:, 1::2, -2],
)
self.common(
fn,
(torch.randn([2, 20, 2]),),
)
# It's a view so it doesn't generate a kernel
@expectedFailureCodegenDynamic
def test_slice3(self):
def fn(a, b):
return torch.ops.aten.slice.Tensor(a, 0, 0, -b)
x = torch.rand(48, 3, 512, 512)
self.common(fn, (x, 2))
@expectedFailureCodegenDynamic
def test_slice4(self):
# empty slices that require clamping the start or end
def fn(a):
return (
aten.slice.Tensor(a, 0, 2, 0, 1),
aten.slice.Tensor(a, 0, a.shape[0], a.shape[0] + 10, 1),
aten.slice.Tensor(a, 0, -20, 0, 1),
aten.slice.Tensor(a, 0, -20, -16, 1),
)
x = torch.rand(10)
self.common(fn, (x,))
def test_split_with_list(self):
def fn(a, sizes):
return [t + 1.0 for t in torch.split(a * 2.0, sizes, -1)]
self.common(fn, (torch.randn(2, 2, 10), [3, 3, 4]))
self.common(fn, (torch.randn(2, 2, 10), [4, 3, 3]))
self.common(fn, (torch.randn(2, 2, 10), [1, 2, 3, 4]))
def test_split_with_integer(self):
# argument `split_size_or_sections` is integer
@torch.compile(dynamic=True)
def f(x, sizes):
return torch.split(x, sizes, -1)
# split into equally sized chunks, 10 = 5 + 5
r1, r2 = f(torch.randn(2, 10), 5)
self.assertTrue(r1.size() == (2, 5))
self.assertTrue(r2.size() == (2, 5))
# split into equally sized chunks, 12 = 4 + 4 + 4
r1, r2, r3 = f(torch.randn(2, 12), 4)
self.assertTrue(r1.size() == (2, 4))
self.assertTrue(r2.size() == (2, 4))
self.assertTrue(r3.size() == (2, 4))
# split unevenly, 10 = 3 + 3 + 3 + 1
r1, r2, r3, r4 = f(torch.randn(2, 10), 3)
self.assertTrue(r1.size() == (2, 3))
self.assertTrue(r2.size() == (2, 3))
self.assertTrue(r3.size() == (2, 3))
self.assertTrue(r4.size() == (2, 1))
def test_split_failed(self):
@torch.compile(backend="inductor")
def fn(a):
return torch.split(a, [2, 1, 1], dim=1)
with self.assertRaisesRegex(RuntimeError, ""):
fn(torch.randn(1, 5))
def test_inductor_assert(self):
@torch.compile(backend="inductor", dynamic=True)
def fn(a):
assert a.shape[0] >= 2 and a.shape[1] >= 4
return a.cos()
inp = torch.randn(2, 4, 6)
torch._dynamo.mark_dynamic(inp, 0)
torch._dynamo.mark_dynamic(inp, 1)
self.assertEqual(fn(inp), inp.cos())
def test_split(self):
def fn(a):
t = torch.split(a, 3, -1)
return (t[0], t[1], t[2], t[3])
def fn2(a):
return fn(a + 1)
self.common(
fn,
(torch.randn([2, 2, 10]),),
)
self.common(
fn2,
(torch.randn([2, 2, 10]),),
)
@parametrize("dilation", (1, 2))
@parametrize("dim", (subtest(2), subtest(3)))
def test_low_memory_max_pool(self, dilation: int, dim: int):
prims = torch.ops.prims
def fn(x):
kernel_size = [3, 3] if dim == 2 else [3, 3, 2]
stride = [2] * dim
padding = [1] * dim
ceil_mode = False
vals, offsets = prims._low_memory_max_pool_with_offsets(
x,
kernel_size,
stride,
padding,
[dilation] * dim,
ceil_mode,
)
indices = prims._low_memory_max_pool_offsets_to_indices(
offsets,
kernel_size,
x.shape[-dim:],
stride,
padding,
dilation=[dilation] * dim,
)
return vals, indices, offsets
self.common(fn, (torch.randn(1, 3, *[10] * dim),))
def test_to_dtype(self):
new_dtype = torch.float64 if self.device != "mps" else torch.bfloat16
def fn(a, b):
return (
aten._to_copy(a, dtype=6),
aten._to_copy(b + 1, dtype=6),
aten.to(b, new_dtype),
aten.to(b, torch.bool),
)
self.common(
fn,
(
torch.randn([2, 2, 10]),
torch.randn([2, 2, 10], dtype=new_dtype),
),
)
@requires_gpu()
def test_to_device(self):
def fn(a):
if a.device.type == "cpu":
return aten._to_copy(
a, device=torch.device(GPU_TYPE), dtype=6, layout=0
)
else:
return aten._to_copy(a, device=torch.device("cpu"), dtype=6, layout=0)
self.common(
fn,
(torch.randn([2, 2, 10]),),
)
def test_to_memory_format(self):
def fn(a, memory_format):
return a.to(memory_format=memory_format)
self.common(
fn,
(torch.randn([2, 2, 10, 10]), torch.channels_last),
)
self.common(
fn,
(
torch.randn([2, 2, 10, 10]).to(memory_format=torch.channels_last),
torch.contiguous_format,
),
)
@requires_gpu()
def test_to_device_constant(self):
def fn(a):
d1 = a.device.type
if d1 == "cpu":
d2 = GPU_TYPE
else:
d2 = "cpu"
const1 = torch.as_tensor(list(range(64)), device=d2)
return (
torch.arange(10, device=d2).to(d1) + a,
const1.to(d1),
(const1 + 1).to(d1),
)
self.common(
fn,
(torch.randn([10]),),
)
@requires_gpu()
@xfail_if_triton_cpu
def test_multi_device(self):
def fn(x):
x = x + 1
x = x + 2
x = x.to(device=GPU_TYPE)
x = x + 3
x = x + 4
x = x.cpu()
x = x + 5
x = x + 6
x = x.to(device=GPU_TYPE)
x = x + 7
x = x + 8
x = x.cpu()
x = x + 9
x = x + 10
return x
self.common(
fn,
(torch.randn([2, 2, 10]),),
check_lowp=False, # cpu doesn't understand fp16, and there are explicit .cpu() calls
)
@skipIfRocm
@requires_multigpu()
def test_multi_gpu_device(self):
# TODO: https://github.com/pytorch/pytorch/issues/92627
x = torch.rand([4], device=GPU_TYPE)
def fn(x, y):
r = torch.ops.aten.div(x, y)
r = r.to(f"{GPU_TYPE}:1")
return 2 * r
self.common(fn, (torch.randn(4), torch.randn(4)), check_lowp=False)
@requires_multigpu()
def test_multi_gpu_recompile_on_index(self):
torch.set_float32_matmul_precision("high")
def gemm(x, y):
return x @ y
failed_guard = None
def fail(guard):
nonlocal failed_guard
failed_guard = guard
gemm_opt = torch._dynamo.optimize("inductor", guard_fail_fn=fail)(gemm)
x0 = torch.randn(1024, 1024, device=f"{GPU_TYPE}:0")
y0 = torch.randn(1024, 1024, device=f"{GPU_TYPE}:0")
gemm_opt(x0, y0)
x1 = torch.randn(1024, 1024, device=f"{GPU_TYPE}:1")
y1 = torch.randn(1024, 1024, device=f"{GPU_TYPE}:1")
gemm_opt(x1, y1)
self.assertTrue(failed_guard is not None)
self.assertTrue(
"tensor 'x' Tensor device index mismatch. Expected device index to be"
in failed_guard.reason
)
def test_unbind(self):
def fn(a):
return torch.unbind(a), torch.unbind(a, -1)
self.common(
fn,
(torch.randn([4, 4, 4]),),
)
@skipIfXpu(msg="Incorrect reference on XPU, see issue #165392")
def test_conv1d_with_permute(self):
# fix https://github.com/pytorch/pytorch/issues/159462
class ConvModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv1d(1, 64, kernel_size=3, padding=1)
def forward(self, x):
x = x.permute(0, 2, 1)
return self.conv(x)
self.common(ConvModel(), (torch.randn([32, 100, 1]),), check_lowp=False)
def test_conv1d_depthwise(self):
class ConvModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv1d(
768,
768,
kernel_size=(9,),
stride=(1,),
padding=(4,),
groups=768,
bias=False,
)
def forward(self, x):
return self.conv(x)
input_tensor = torch.randn([1, 768, 512]).as_strided(
(1, 768, 512), (393216, 1, 768)
)
self.common(ConvModel(), (input_tensor,), check_lowp=False)
def test_convolution1(self):
m = torch.nn.Sequential(
torch.nn.Conv2d(5, 6, [3, 3]),
torch.nn.ReLU(),
ToTuple(),
)
self.common(
m,
(torch.randn([2, 5, 16, 16]),),
# Mismatched elements: 10 / 2352 (0.4%)
# Greatest absolute difference: 5.7220458984375e-05 at index (0, 3, 12, 12) (up to 1e-05 allowed)
# Greatest relative difference: 0.06512477175897748 at index (0, 4, 11, 9) (up to 0.001 allowed)
atol=6e-5,
rtol=0.001,
# Make sure we compute also with fp16 in the reference. Otherwise,
# the reference will compute with fp32 and cast back to fp16, which
# causes numeric differences beyond tolerance.
reference_in_float=not torch.version.hip,
)
def test_convolution2(self):
def fn(x, w, b):
# transposed conv
return (aten.convolution(x, w, b, [4], [0], [1], True, [0], 1),)
self.common(
fn,
(
torch.randn([2, 32, 90]),
torch.randn([32, 16, 8]),
torch.randn([16]),
),
check_lowp=False,
)
def test_convolution3(self):
# Test stride or padding or dilation is 1 element list.
m = torch.nn.Sequential(
torch.nn.Conv2d(5, 6, [3, 3], stride=[1], padding=[0], dilation=[1]),
torch.nn.ReLU(),
ToTuple(),
)
self.common(
m,
(torch.randn([2, 5, 16, 16]),),
atol=6e-5,
rtol=0.001,
# Make sure we compute also with fp16 in the reference. Otherwise,
# the reference will compute with fp32 and cast back to fp16, which
# causes numeric differences beyond tolerance.
reference_in_float=not torch.version.hip,
)
@skip_if_gpu_halide
def test_convolution4(self):
def fn(x, w):
x = F.conv2d(x, w, groups=w.shape[0])
return x.sum()
self.common(
fn,
(
torch.randn([2, 3, 16, 20]),
torch.randn([3, 1, 5, 5]),
),
)
def test_convolution5(self):
def fn(x, w):
x = F.conv2d(x, w, dilation=[x.size(0)])
return x.sum()
x = torch.randn([2, 1, 16, 20])
w = torch.randn([1, 1, 5, 5])
torch._dynamo.mark_dynamic(x, 0)
atol = None
rtol = None
if self.device == "xpu":
# set to float32 default tolerance,
# check_model_gpu with update rotl to 2e-3 for fp16.
# fix issue #129974
atol = 1e-05
rtol = 1.3e-06
self.common(fn, (x, w), atol=atol, rtol=rtol)
def test_conv3d(self):
m = torch.nn.Sequential(
torch.nn.Conv3d(3, 3, kernel_size=7),
ToTuple(),
)
self.common(
m,
(torch.randn([1, 3, 8, 16, 32]),),
atol=1e-3,
rtol=0.001,
# Make sure we compute also with fp16 in the reference. Otherwise,
# the reference will compute with fp32 and cast back to fp16, which
# causes numeric differences beyond tolerance.
reference_in_float=not torch.version.hip,
)
def test_conv2d_channels_last(self):
if self.device == GPU_TYPE:
raise unittest.SkipTest("only support cpu conv2d channels_last")
m = torch.nn.Sequential(
torch.nn.Conv2d(3, 3, 1, 1),
ToTuple(),
)
# only weight is channels_last
self.common(
m.to(memory_format=torch.channels_last),
(torch.randn([2, 3, 16, 16]),),
check_lowp=False,
)
# only activation is channels_last
self.common(
m,
(torch.randn([2, 3, 16, 16]).to(memory_format=torch.channels_last),),
check_lowp=False,
)
# activation and weight are all channels_last
self.common(
m.to(memory_format=torch.channels_last),
(torch.randn([2, 3, 16, 16]).to(memory_format=torch.channels_last),),
check_lowp=False,
)
def test_conv2d_backward_channels_last(self):
def fn(grad_output, inp, weight):
convolution_backward_8 = torch.ops.aten.convolution_backward.default(
grad_output,
inp,
weight,
[320],
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
[True, True, True],
)
return convolution_backward_8
# only weight is channels_last
self.common(
fn,
(
torch.randn([2, 320, 8, 8]),
torch.randn([2, 2048, 8, 8]),
torch.randn([320, 2048, 1, 1]).to(memory_format=torch.channels_last),
),
check_lowp=False,
)
@parametrize(
"use_block_ptr",
[subtest(False), subtest(True, decorators=[skip_if_not_triton])],
)
def test_conv3d_channels_last(self, use_block_ptr: bool):
if self.device == GPU_TYPE:
raise unittest.SkipTest("only support cpu conv3d channels_last")
m = torch.nn.Sequential(
torch.nn.Conv3d(3, 3, 1, 1),
ToTuple(),
)
with config.patch({"triton.use_block_ptr": use_block_ptr}):
# only weight is channels_last
self.common(
m.to(memory_format=torch.channels_last_3d),
(torch.randn([2, 3, 16, 16, 16]),),
)
# only activation is channels_last
self.common(
m,
(
torch.randn([2, 3, 16, 16, 16]).to(
memory_format=torch.channels_last_3d
),
),
)
# activation and weight are all channels_last
self.common(
m.to(memory_format=torch.channels_last_3d),
(
torch.randn([2, 3, 16, 16, 16]).to(
memory_format=torch.channels_last_3d
),
),
)
@skip_if_gpu_halide # slow
@xfail_if_mps # Non-divisible input sizes are not implemented on MPS device
def test_adaptive_avg_pool2d1(self):
def fn(x):
return aten._adaptive_avg_pool2d(x, (6, 6)), aten._adaptive_avg_pool2d(
x + 1, (2, 5)
)
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
check_lowp=False,
)
# lowering to avg_pool2d case
self.common(
fn,
(torch.randn(2, 4, 3, 3),),
)
# no-op case
self.common(
fn,
(torch.randn(2, 4, 6, 6),),
)
@xfail_if_mps # Non-divisible input sizes are not implemented on MPS device
def test_adaptive_avg_pool2d2(self):
# Big kernel size, use fallback
def fn(x):
return aten._adaptive_avg_pool2d(x, (4, 4))
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(torch.randn(2, 4, 21, 21),),
check_lowp=False,
)
assertGeneratedKernelCountEqual(self, 0)
@xfail_if_mps
@skip_if_gpu_halide # slow
def test_adaptive_max_pool2d1(self):
def fn(x):
return aten.adaptive_max_pool2d(x, (6, 6))
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
check_lowp=False,
)
self.common(
fn,
(torch.randn(2, 4, 3, 3),),
)
# no-op case
self.common(
fn,
(torch.randn(2, 4, 6, 6),),
)
@skip_if_gpu_halide # slow
def test_adaptive_max_pool2d2(self):
# Big kernel size, use fallback
def fn(x):
return aten.adaptive_max_pool2d(x, (4, 4))
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(torch.randn(2, 4, 21, 21),),
check_lowp=False,
)
assertGeneratedKernelCountEqual(self, 0)
@skip_if_gpu_halide # slow
def test_adaptive_max_pool2d3(self):
# test when adaptive_max_pool2d fallbacks to max_pool2d
def fn(x):
return aten.adaptive_max_pool2d(x, (2, 2))
# Big kernel (12 / 2 * 12 / 2 > 25)
self.common(
fn,
(torch.randn(2, 4, 12, 12),),
)
# Small kernel
self.common(
fn,
(torch.randn(2, 4, 4, 4),),
)
@xfail_if_mps_unimplemented
def test_fractional_max_pool2d1(self):
def fn(x, samples):
return aten.fractional_max_pool2d(x, (3, 3), (2, 2), samples)
self.common(
fn, (torch.randn(1, 4, 16, 16), torch.rand(1, 4, 2)), check_lowp=False
)
@xfail_if_mps_unimplemented
def test_fractional_max_pool2d2(self):
# large kernel size without unrolling
def fn(x, samples):
return aten.fractional_max_pool2d(x, (6, 5), (3, 3), samples)
self.common(
fn,
(torch.randn(2, 4, 36, 36), torch.rand(2, 4, 2)),
check_lowp=False,
)
@xfail_if_mps_unimplemented
def test_fractional_max_pool2d3(self):
def fn(x, samples):
return aten.fractional_max_pool2d(x, (1, 1), (16, 16), samples)
self.common(
fn, (torch.randn(2, 4, 16, 16), torch.rand(2, 4, 2)), check_lowp=False
)
@xfail_if_mps_unimplemented
@config.patch(fallback_random=True)
@skip_if_halide # Can only unroll for loops over a constant extent
def test_fractional_max_pool2d4(self):
random.seed(1234)
torch.manual_seed(1234)
# check rectangular kernel/output size
def fn(x):
return torch.nn.functional.fractional_max_pool2d_with_indices(
x, (4, 3), (3, 2)
)
self.common(fn, (torch.randn(1, 4, 16, 16),), check_lowp=False)
@xfail_if_mps_unimplemented
def test_fractional_max_pool2d5(self):
def fn(x, samples):
return aten.fractional_max_pool2d(x, (3, 3), (1, 1), samples)
self.common(
fn, (torch.randn(2, 4, 6, 6), torch.rand(2, 4, 2)), check_lowp=False
)
def test_multi_threading(self):
model = torch.nn.Linear(2, 3).eval()
inp = torch.randn(4, 2)
num_run = 3
def run_weights_sharing_model(m, inp):
with torch.no_grad():
for _ in range(num_run):
y = m(inp)
numb_instance = 2
threads = []
compiled_m = torch.compile(model)
for _ in range(1, numb_instance + 1):
thread = threading.Thread(
target=run_weights_sharing_model, args=(compiled_m, inp)
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
@unittest.skipIf(config.is_fbcode(), "fbcode triton error, needs debugging")
@skip_if_triton_cpu("Flaky on Triton CPU")
@skip_if_gpu_halide # https://github.com/halide/Halide/issues/8311
def test_adaptive_avg_pool2d_low_prec(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
def forward(self, x):
x = self.avgpool(x)
return x
mod = Model().to(self.device)
for dtype in [torch.half, torch.bfloat16]:
# Skip bfloat16 on MacOS-13 for MPS tests
if not self.is_dtype_supported(dtype):
continue
x = torch.randn(4, 3, 7, 7, device=self.device).to(dtype=dtype)
opt_mod = torch.compile(mod)
res = opt_mod(x)
expected = mod(x)
self.assertTrue(torch.allclose(res, expected))
def test_buffer_copied_in_graph(self):
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buf = torch.nn.Buffer(torch.zeros(1))
self.w1 = torch.nn.Parameter(torch.zeros(1))
self.w2 = torch.nn.Parameter(torch.zeros(1))
def forward(self, x):
self.buf.add_(1)
return (self.w1 * x * self.w2).sum() + self.buf.sum()
model_for_eager = MyModel().to(self.device)
model_for_compile = copy.deepcopy(model_for_eager)
eager_version_counters = [
buffer._version for _, buffer in model_for_eager.named_buffers()
]
compile_version_counters = [
buffer._version for _, buffer in model_for_compile.named_buffers()
]
compiled_f = torch.compile(model_for_compile, backend="inductor")
inp_ref = torch.ones(1, requires_grad=True, device=self.device)
inp_test = torch.ones(1, requires_grad=True, device=self.device)
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
eager_version_counters_after = [
buffer._version for _, buffer in model_for_eager.named_buffers()
]
compile_version_counters_after = [
buffer._version for _, buffer in model_for_compile.named_buffers()
]
eager_delta = list(
map(operator.sub, eager_version_counters_after, eager_version_counters)
)
compile_delta = list(
map(operator.sub, compile_version_counters_after, compile_version_counters)
)
self.assertEqual(eager_delta, compile_delta)
@skip_if_gpu_halide
def test_buffer_copied_in_graph_with_different_shapes(self):
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buf = torch.nn.Buffer(torch.ones(4, 4))
self.w = torch.nn.Parameter(
torch.Tensor([[4, 5], [1, 2], [6, 7], [8, 9]])
)
def forward(self, x):
self.buf.add_(1)
return (self.w @ x).sum() + self.buf.sum()
model_for_eager = MyModel().to(self.device)
model_for_compile = copy.deepcopy(model_for_eager)
eager_version_counters = [
buffer._version for _, buffer in model_for_eager.named_buffers()
]
compile_version_counters = [
buffer._version for _, buffer in model_for_compile.named_buffers()
]
compiled_f = torch.compile(model_for_compile, backend="inductor")
inp_ref = torch.ones(2, 4, requires_grad=True, device=self.device)
inp_test = torch.ones(2, 4, requires_grad=True, device=self.device)
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
eager_version_counters_after = [
buffer._version for _, buffer in model_for_eager.named_buffers()
]
compile_version_counters_after = [
buffer._version for _, buffer in model_for_compile.named_buffers()
]
eager_delta = list(
map(operator.sub, eager_version_counters_after, eager_version_counters)
)
compile_delta = list(
map(operator.sub, compile_version_counters_after, compile_version_counters)
)
self.assertEqual(eager_delta, compile_delta)
def test_buffer_batch_norm(self):
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m = torch.nn.BatchNorm1d(100)
def forward(self, x):
return self.m(x)
model_for_eager = MyModel().to(self.device)
model_for_compile = copy.deepcopy(model_for_eager)
eager_version_counters = [
buffer._version for _, buffer in model_for_eager.named_buffers()
]
compile_version_counters = [
buffer._version for _, buffer in model_for_compile.named_buffers()
]
compiled_f = torch.compile(model_for_compile, backend="inductor")
inp_ref = torch.ones(20, 100, requires_grad=True, device=self.device)
inp_test = torch.ones(20, 100, requires_grad=True, device=self.device)
out_ref = model_for_eager(inp_ref.clone())
out_test = compiled_f(inp_test.clone())
eager_version_counters_after = [
# TODO: remove the + 1 after https://github.com/pytorch/pytorch/issues/120622 is fixed
(
buffer._version + 1
if k in ["m.running_mean", "m.running_var"]
else buffer._version
)
for k, buffer in model_for_eager.named_buffers()
]
compile_version_counters_after = [
buffer._version for _, buffer in model_for_compile.named_buffers()
]
eager_delta = list(
map(operator.sub, eager_version_counters_after, eager_version_counters)
)
compile_delta = list(
map(operator.sub, compile_version_counters_after, compile_version_counters)
)
self.assertEqual(eager_delta, compile_delta)
@xfail_if_mps # Non-divisible input sizes are not implemented on MPS device
def test_adaptive_avg_pool_with_output_size_0(self):
m1 = nn.AdaptiveAvgPool1d(0)
self.common(m1, (torch.randn(1, 2),))
m2 = nn.AdaptiveAvgPool2d(0)
self.common(m2, (torch.randn(1, 2, 3),))
def test_max_pool2d1(self):
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
)
@skip_if_gpu_halide # slow
def test_max_pool2d2(self):
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
@skip_if_gpu_halide # slow
def test_max_pool2d3(self):
def fn(x):
# with padding
return (
aten.max_pool2d_with_indices(x, [3, 3], [2, 2], [1, 1]),
aten.max_pool2d_with_indices(
x,
[
3,
],
[
2,
],
[
1,
],
),
)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
)
@skip_if_halide # Can only unroll for loops over a constant extent
def test_max_pool2d4(self):
def fn(x):
# with padding
return aten.max_pool2d_with_indices(x, [3, 3], [2, 2], [0, 0], [1, 1], True)
self.common(
fn,
(torch.randn([2, 8, 111, 111]),),
)
@skip_if_gpu_halide # slow
def test_max_pool2d5(self):
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 3], [])
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
@skip_if_gpu_halide # slow
@parametrize("dilation", (1, 2))
def test_max_pool2d6(self, dilation: int):
# Big kernel size
def fn(x):
return aten.max_pool2d_with_indices(
x, [13, 13], [], dilation=[dilation] * 2
)
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
# From https://github.com/pytorch/pytorch/issues/94775
def test_max_pool2d7(self):
# ceil mode turns on
def fn(x):
return torch.nn.functional.max_pool2d(
x, 1, stride=(2, 2), padding=0, ceil_mode=True
)
self.common(
fn,
(torch.randn([1, 1, 6, 7]),),
)
# From https://github.com/pytorch/pytorch/issues/93384
def test_max_pool2d8(self):
# dilation is not 1
def fn(x):
return aten.max_pool2d_with_indices(x, [3, 2], [2, 1], [1, 1], [1, 2])
self.common(
fn,
(torch.randn([2, 2, 3, 6]),),
)
def test_avg_pool2d1(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn(2, 4, 16, 16),),
)
def test_avg_pool2d2(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2])
self.common(
fn,
(torch.randn([16, 64, 55, 55]),),
)
def test_avg_pool2d3(self):
def fn(x):
return (
aten.avg_pool2d(x, [3, 3], [2, 2], [1, 1]),
aten.avg_pool2d(
x,
[
3,
],
[
2,
],
[
1,
],
),
)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
check_lowp=not is_halide_backend(self.device), # misaligned addr fp16
)
def test_avg_pool2d4(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2], [0, 0], True)
self.common(
fn,
(torch.randn([2, 8, 111, 111]),),
)
def test_avg_pool2d5(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2], [1, 1], count_include_pad=False)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
check_lowp=not is_halide_backend(self.device), # misaligned addr fp16
)
def test_avg_pool2d6(self):
def fn(x):
return aten.avg_pool2d(x, [3, 3], [2, 2], [1, 1], divisor_override=3)
self.common(
fn,
(-torch.arange(1 * 8 * 8, dtype=torch.float32).view(1, 1, 8, 8),),
check_lowp=not is_halide_backend(self.device), # misaligned addr fp16
)
def test_avg_pool2d7(self):
# Large kernel size, use fallback
def fn(x):
return aten.avg_pool2d(x, [13, 13], [1, 1], [0, 0])
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
(-torch.arange(1 * 24 * 24, dtype=torch.float32).view(1, 1, 24, 24),),
)
assertGeneratedKernelCountEqual(self, 0)
def test_avg_pool2d8(self):
# https://github.com/pytorch/pytorch/issues/100987
def fn(x):
return aten.avg_pool2d(
x, kernel_size=3, stride=2, padding=1, ceil_mode=True
)
self.common(
fn,
(torch.randn(1, 3, 6, 6),),
check_lowp=not is_halide_backend(self.device), # misaligned addr fp16
)
@tf32_on_and_off(0.006)
@skip_if_gpu_halide # slow
def test_alexnet_prefix(self):
def forward(arg6, arg7, arg16):
convolution = torch.ops.aten.convolution(
arg16, arg7, arg6, [4, 4], [2, 2], [1, 1], False, [0, 0], 1
)
relu = torch.ops.aten.relu(convolution)
max_pool2d_with_indices = torch.ops.aten.max_pool2d_with_indices(
relu, [3, 3], [2, 2]
)
getitem = max_pool2d_with_indices[0]
return (getitem,)
self.common(
forward,
(
rand_strided((64,), (1,), torch.float32, "cpu"),
rand_strided((64, 3, 11, 11), (363, 121, 11, 1), torch.float32, "cpu"),
rand_strided(
(16, 3, 224, 224), (150528, 50176, 224, 1), torch.float32, "cpu"
),
),
# Mismatched elements: 127 / 746496 (0.0%)
# Greatest absolute difference: 0.0009765625 at index (1, 62, 7, 16) (up to 1e-05 allowed)
# Greatest relative difference: 0.05187467899332306 at index (14, 18, 11, 0) (up to 0.001 allowed)
atol=3e-3,
rtol=2,
)
def test_elu(self):
def fn(x):
return aten.elu(x, 1.6732632423543772, 1.0507009873554805) + 2, aten.elu(
x + 1, 2, 3, 4
)
self.common(
fn,
(torch.randn([16, 16]),),
rtol=1e-4,
atol=1e-4,
)
def test_tan(self):
def fn(x):
return aten.tan(x) + 2, aten.tan(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_tanh(self):
def fn(x):
return aten.tanh(x) + 2, aten.tanh(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
@skip_if_halide # lgamma not implemented
@xfail_if_triton_cpu
def test_lgamma(self):
def fn(x):
return aten.lgamma(x) + 2, aten.cos(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_cos(self):
def fn(x):
return aten.cos(x) + 2, aten.cos(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_sin(self):
def fn(x):
return aten.sin(x) + 2, aten.sin(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_repeat(self):
def fn(x):
return (
x.repeat(0, 1, 1, 1),
x.repeat(2, 2, 3, 1),
x.repeat(8, 1, 1, 1),
x.repeat(2, 1, 1, 1, 1, 1),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
def test_repeat_as_strided(self):
# Reproducer for #127474
def fn(x):
view_size = (3, 2)
full = x.repeat((3, 2))
view = torch.as_strided(full, view_size, full.stride())
result = view + view
return result
self.common(fn, (torch.randn(1, 1),))
def test_as_strided_on_views(self):
# https://github.com/pytorch/pytorch/issues/163286
def fn(a):
c = a.view(-1)
# convert to float16
d = c.view(torch.float16)
e = d.as_strided((2, 5), (1, 1))
# convert back to bfloat16
f = e.view(torch.bfloat16)
g = f.as_strided((10, 10), (1, 1))
return g
a = torch.randn(10, 10, dtype=torch.bfloat16)
self.common(fn, (a,), reference_in_float=False)
# test dtype separately
out = fn(a)
assert out.dtype == torch.bfloat16
out = torch.compile(fn)(a)
assert out.dtype == torch.bfloat16
def test_repeat_interleave(self):
def fn(x):
return (
x.repeat_interleave(2),
x.repeat_interleave(3, dim=0),
x.repeat_interleave(x.size(1), dim=1),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
@config.patch(implicit_fallbacks=True)
def test_repeat_interleave_2(self):
def fn(x):
return torch.ops.aten.repeat_interleave.Tensor(x, output_size=12)
self.common(
fn,
(torch.tensor([2, 4, 6]),),
)
@config.patch(fallback_random=True)
def test_randn_with_dtype_and_device(self):
if self.device == GPU_TYPE:
raise unittest.SkipTest("only support cpu randn_with_dtype_and_device test")
def fn(vectors):
rotations_shape = (12, vectors.shape[-1], 1, 64)
random_rotations = torch.randn(
rotations_shape, device=vectors.device, dtype=vectors.dtype
)
random_rotations += 1
return random_rotations
self.common(
fn,
(torch.randn([4, 12, 2, 64]),),
)
def test_embedding(self):
m = torch.nn.Sequential(
torch.nn.Embedding(10, 4, padding_idx=0),
torch.nn.ReLU(),
ToTuple(),
)
self.common(
m,
(torch.randint(10, [2, 8]),),
)
def test_embedding_sparse(self):
# Fix https://github.com/pytorch/pytorch/issues/150656
def fn(weight, indices):
return F.embedding(indices, weight, sparse=True)
indices = torch.randint(10, (2, 3))
weight = torch.randn(10, 3, requires_grad=True)
self.common(
fn,
(weight, indices),
)
def test_mean(self):
def fn(x):
return (
x.mean(),
x.mean(-1),
torch.mean(x, -2, keepdim=True),
x.mean([0, 1]),
)
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
@parametrize("tile_reduction", (False, True))
def test_var_mean(self, tile_reduction: bool):
def fn(x):
return (
*torch.var_mean(x, -1),
*torch.var_mean(x, [1, 3]),
)
with config.patch(
{
"triton.prefer_nd_tiling": tile_reduction,
"triton.tile_reductions": tile_reduction,
}
):
self.common(
fn,
(torch.randn([1, 2, 4, 8]),),
)
def test_var_mean_div_by(self):
def fn(x):
var, mean = torch.var_mean(x, dim=2, keepdim=True)
return x / var, var, mean
self.common(fn, (torch.rand([1, 17, 2048]),))
def test_var_correction(self):
def fn(x):
dim = -1
return (
torch.var(x, dim=dim, correction=1.3),
torch.var(x, dim=dim, correction=3),
torch.var(x, dim=dim, correction=10),
)
self.common(fn, (torch.randn([2, 8]),))
# Unrolled reduction
self.common(fn, (torch.randn([2, 4]),))
@config.patch(pick_loop_orders=True)
def test_transposed_propagates(self):
@torch.compile(backend="inductor", fullgraph=True)
def fn(x, y):
return x + y
a = torch.randn(1, 4, 4, 4, device=self.device).permute(0, 2, 3, 1)
b = torch.randn(4, 4, 4, device=self.device).permute(1, 2, 0)
c = fn(a, b)
self.assertEqual(a.stride(), c.stride())
self.assertEqual(c.stride()[2], 1)
@skip_if_gpu_halide
def test_std(self):
def fn(x):
return (
torch.var(x, True),
torch.var(x, False),
torch.var(x, -1, True),
torch.var(x, -1, False),
torch.std(x, False),
torch.std(x, [0, 1], True),
torch.std(x, [0, 1], False),
torch.std(x, -2, True, keepdim=True),
)
self.common(
fn,
(torch.randn([2, 4, 4, 8]),),
)
def test_embedding_bag(self):
def fn(w, i, o):
return aten._embedding_bag(w, i, o, False, 0, False, None)
self.common(
fn,
(torch.randn([10, 4]), torch.randint(10, [8]), torch.tensor([0, 2, 6])),
)
def test_batch_norm_2d(self):
m = torch.nn.Sequential(
torch.nn.BatchNorm2d(10),
torch.nn.ReLU(),
)
m.eval()
self.common(m, (torch.randn([2, 10, 8, 8]),), check_lowp=False)
self.common(
m,
(torch.randn([3, 10, 16, 16]),),
check_lowp=False, # too painful to match types of bn model
)
# From yolov3
@with_tf32_off
def test_batch_norm_2d_2(self):
if self.device == "cpu":
raise unittest.SkipTest(f"requires {GPU_TYPE}")
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.self_0 = torch.nn.Conv2d(
64,
128,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1),
bias=False,
)
self.self_1 = torch.nn.BatchNorm2d(
128,
eps=0.0001,
momentum=0.03,
affine=True,
track_running_stats=True,
)
self.self_2 = torch.nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, l_input_: torch.Tensor):
self_0 = self.self_0(l_input_)
self_1 = self.self_1(self_0)
self_2 = self.self_2(self_1)
return (self_2,)
inp = torch.randn((4, 64, 192, 256), dtype=torch.float32, device=GPU_TYPE)
mod = Repro().to(device=GPU_TYPE)
o1 = mod(inp)
o2 = torch.compile(mod)(inp)
self.assertEqual(o1, o2, rtol=1e-3, atol=1e-3)
@patch.object(config.trace, "enabled", True)
def test_layer_norm(self):
m = torch.nn.Sequential(
torch.nn.LayerNorm(32),
torch.nn.ReLU(),
)
m.eval()
with torch.no_grad():
self.common(m, (torch.randn([16, 32]),), check_lowp=False)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 1)
@torch._functorch.config.patch("donated_buffer", True)
def test_matmul_layer_norm(self):
batch_size = 32
seq_length = 50
hidden_size = 256
inp = torch.randn(
batch_size,
seq_length,
hidden_size,
requires_grad=True,
device=self.device,
)
weight = torch.randn(
hidden_size, hidden_size, requires_grad=True, device=self.device
)
layer_norm = torch.nn.LayerNorm(hidden_size, device=self.device)
def foo(inp, weight):
matmul_output = inp @ weight
final_output = layer_norm(matmul_output)
return final_output
self.common(foo, (inp, weight), check_lowp=False)
def test_transpose_add(self):
def fn(a, b):
return a.t() + b
self.common(
fn, (torch.randn([16, 32]), torch.randn([32, 16])), check_lowp=False
)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 1)
@patch.object(config.triton, "persistent_reductions", True)
def test_softmax_one_kernel_persist(self):
def fn(x):
dim = 1
x_max = torch.amax(x, dim, keepdim=True)
unnormalized = torch.exp(x - x_max)
result = unnormalized / torch.sum(unnormalized, dim, keepdim=True)
return result
self.common(fn, (torch.randn([16, 32]),), check_lowp=False)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 1)
@patch.object(config.triton, "persistent_reductions", False)
def test_softmax_one_kernel_loop(self):
def fn(x):
x_max = torch.amax(x, 1, keepdim=True)
unnormalized = torch.exp(x - x_max)
result = unnormalized / torch.sum(unnormalized, 1, keepdim=True)
return result
self.common(fn, (torch.randn([16, 32]),), check_lowp=False)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 1)
def test_complex_fallback(self):
def fn(x):
return x * x + 10
self.common(
fn,
(torch.randn([1, 2, 4, 8]).to(dtype=torch.complex64),),
)
assertGeneratedKernelCountEqual(self, 0)
class ToComplex(nn.Module):
def forward(self, x):
return (x + x + 12).to(torch.complex64)
self.common(ToComplex(), (torch.rand([1, 2, 4, 8]),), check_lowp=False)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 1)
def test_complex_from_real_imag(self):
def fn(x, y):
return aten.complex.default(x, y)
a = torch.randn([5, 3]).permute(1, 0)
self.common(
fn,
(a, a),
exact_stride=True,
reference_in_float=False,
)
@skipIfMPS
def test_linalg_eig_stride_consistency(self):
def fn(x):
eigenvals, eigenvecs = torch.linalg.eig(x)
return eigenvecs
x = torch.randn(5, 5, device=self.device, dtype=torch.float32)
self.common(
fn,
[x],
exact_stride=True,
exact_dtype=True,
check_lowp=False,
)
def test_view_as_complex(self):
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, view_2):
clone = torch.ops.aten.clone.default(
view_2, memory_format=torch.contiguous_format
)
view_2 = None
view_as_complex = torch.ops.aten.view_as_complex.default(clone)
clone = None
return (view_as_complex,)
inp = torch.empty_strided((128, 64, 12, 32, 2), (1, 98304, 8192, 256, 128)).to(
self.device
)
mod = Repro()
o1 = mod(inp)
o2 = torch.compile(mod)(inp)
self.assertEqual(o1, o2)
def test_view_as_real(self):
def fn(x):
y = torch.view_as_real(x)
return y + 1
x = torch.randn(4, dtype=torch.complex64)
self.common(fn, (x,))
def test_polar(self):
def fn(dist, angle):
return torch.polar(dist, angle)
dtype = torch.float64 if self.device != "mps" else torch.float32
inp = (
torch.tensor([1, 2], dtype=dtype),
torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=dtype),
)
self.common(fn, (*inp,), reference_in_float=self.device != "mps")
@skip_if_gpu_halide # incorrect result on CUDA
def test_cauchy(self):
def fn(x, y):
return torch.sum(1 / (torch.unsqueeze(x, -1) - y))
self.common(
fn,
(
torch.randn(32),
torch.randn(32),
),
# Absolute difference: 0.0003662109375 (up to 0.0001 allowed)
# Relative difference: 1.8804297408767818e-05 (up to 1e-05 allowed)
atol=5 * 1e-4,
rtol=5 * 1e-5,
check_lowp=False,
)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 1)
@skip_if_gpu_halide # misaligned address error
def test_fusing_write_into_disjoint_read(self):
def test_flip(a):
return a.copy_(torch.flip(a, (0,)))
self.common(test_flip, (torch.rand([20]),))
assertGeneratedKernelCountEqual(self, 2)
# issue only manifests on cuda with large tensors
if self.device != "cpu":
def f(a):
a[:, 20:40] = a[:, 20:40] + 1
a[:, 2:900025] = a[:, 1:900024] + 2
a = torch.rand((1, 1000000), device=self.device)
self.common(f, (a,))
def test_inplace_flip(self):
def f(x, y):
x.copy_(x.flip(1))
y = y.sum(dim=1, keepdim=True) + y
return x + y
x = torch.randn(20, 1024 * 1024)
y = torch.randn(20, 1024 * 1024)
self.common(f, (x, y), atol=1e-3, rtol=1e-3)
def test_gather_scatter(self):
def fn(node_feat, edge_index):
src_node_feat = node_feat[edge_index[0]]
dst_node_feat = node_feat[edge_index[1]]
edge_feat = src_node_feat - dst_node_feat + 1
new_node_feat = torch.zeros_like(node_feat)
new_node_feat.scatter_add_(
0, edge_index[1].unsqueeze(-1).expand_as(edge_feat), edge_feat
)
return new_node_feat
num_nodes = 16
num_features = 32
node_feat = torch.randn(num_nodes, num_features)
edge_index = torch.randint(0, num_nodes, size=(2, num_nodes * 5))
self.common(
fn,
(
node_feat,
edge_index,
),
check_lowp=False,
)
if self.device != "cpu":
assertGeneratedKernelCountEqual(self, 2)
@config.patch(max_fusion_size=1)
def test_no_mega_fusion_during_lowering(self):
n = 50
def fn(*args):
x = args[0]
for i in range(n):
x = torch.add(x, args[i])
return x
self.common(
fn,
[torch.randn(64) for _ in range(n)],
check_lowp=False,
)
print("-->", torch._inductor.metrics.generated_kernel_count)
if self.device != "cpu":
self.assertTrue(torch._inductor.metrics.generated_kernel_count > 1)
def test_move_arange(self):
def fn(x):
return torch.arange(len(x), device="cpu").to(x.device) + x
self.common(fn, (torch.randn([32]),), check_lowp=False)
# if we have a copy there will be more than 1 kernel
assertGeneratedKernelCountEqual(self, 1)
def test_leaky_relu(self):
def fn(x):
return aten.leaky_relu(x, 0.2) + 2, aten.leaky_relu(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_gelu(self):
def fn(x):
return aten.gelu(x) + 2, aten.gelu(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_clone(self):
def fn(x):
return aten.clone(x) + 2, aten.clone(x + 1)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_masked_fill(self):
def fn(mask, value):
return aten.masked_fill(value, mask, -10000.0) + 2, aten.masked_fill(
value / 2.0, torch.logical_not(mask), 667
)
self.common(
fn,
(
torch.randint(0, 1, [1, 16], dtype=torch.bool),
torch.randn([16, 16]),
),
)
def test_masked_fill_promotion(self):
def fn(mask, value):
return aten.masked_fill(value, mask, torch.tensor(3.5))
opt_fn = torch.compile(fn, backend="inductor")
for inp in (
torch.randn(
[16, 16],
dtype=torch.float16 if self.device == GPU_TYPE else torch.float32,
device=self.device,
),
torch.randint(16, (16, 16), device=self.device),
):
inputs = (
torch.randint(0, 1, [1, 16], dtype=torch.bool, device=self.device),
inp,
)
self.assertEqual(fn(*inputs), opt_fn(*inputs))
@xfail_if_mps # 'NullHandler' object has no attribute 'wrapper_code'
def test_masked_scatter(self):
def fn(value, mask, source):
return torch.masked_scatter(value, mask, source)
value = make_tensor(10, 10, dtype=torch.float32, device=self.device)
mask = make_tensor(10, 10, dtype=torch.bool, device=self.device)
source = make_tensor(
mask.count_nonzero(), dtype=torch.float32, device=self.device
)
self.common(fn, (value, mask, source))
def test_fill1(self):
def fn(x):
tmp = torch.ones_like(x)
return tmp, aten.fill.Scalar(tmp, 2)
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_fill2(self):
def fn(x):
tmp = torch.ones_like(x)
return tmp, aten.fill.Tensor(tmp, torch.tensor(3.0))
self.common(
fn,
(torch.randn([16, 16]),),
)
def test_pow1(self):
def fn(x):
return [aten.pow(x, e) for e in range(-8, 9)]
self.common(
fn,
(torch.randn([16, 16]),),
)
@xfail_if_triton_cpu
def test_pow2(self):
def fn(x):
return aten.pow(1000, x), aten.pow(x, 1000)
self.common(
fn,
(
torch.randn(
[16, 16],
dtype=torch.float32,
),
),
# Mismatched elements: 9 / 256 (3.5%)
# Greatest absolute difference: 2.491354329061828e+28 at index (6, 6) (up to 1e-05 allowed)
# Greatest relative difference: 2.9793410720160818e-05 at index (4, 5) (up to 1.3e-06 allowed)
atol=1e-5,
rtol=3e-05,
)
@skip_if_gpu_halide # https://github.com/halide/Halide/issues/8318
@config.patch("halide.scheduler_cuda", "Li2018")
def test_pow3(self):
# power of 0.5 is special-cased, arbitrary power would still produce triton codegen error
def fn(x):
z = torch.tensor(0.123, device=self.device)
w = z + x
return torch.pow(w, 0.5)
opt = torch.compile(fn, backend="inductor")
input = torch.rand((), device=self.device)
self.assertTrue(same(opt(input), fn(input)))
def test_pow_int(self):
def fn(x, y):
return torch.pow(x, 0x57), torch.pow(x, y)
for dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
intmax = torch.iinfo(dtype).max
make_arg = functools.partial(
make_tensor, dtype=dtype, device=self.device, requires_grad=False
)
self.common(
fn,
(
make_arg(16, 16),
make_arg(16, 16, high=intmax),
),
)
@xfail_if_triton_cpu
def test_pow_symfloat(self):
def fn(x):
r = math.sqrt(x.size(0))
r = r**10
return x * r
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
x = torch.randn([16, 16], device=self.device)
self.assertEqual(cfn(x), fn(x))
def test_glu(self):
def fn(x):
return aten.glu(x, -1), aten.glu(x, 1), aten.glu(x, 2)
self.common(
fn,
(torch.randn([8, 16, 8, 8]),),
)
def test_unsigned_constant_tensors(self):
def fn(x):
c = torch.tensor(7, dtype=torch.uint8)
return c + x, torch.neg(c), torch.neg(c) + x
self.common(
fn,
(torch.randn([16, 16]),),
)
# Disable size_asserts for this test due to https://github.com/pytorch/pytorch/issues/145963
@config.patch(size_asserts=os.environ.get("TORCHINDUCTOR_SIZE_ASSERTS") == "1")
@torch._dynamo.config.patch(capture_dynamic_output_shape_ops=True)
def test_nonzero_unbacked_refinement(self):
def fn(x):
z = x.nonzero()
torch._check(z.size(0) == 4)
return z + 3
self.common(
fn,
(torch.tensor([0, 1, 3, 4, 2, 0, 0]),),
)
with self.assertRaises(RuntimeError):
torch.compile(fn)(torch.tensor([0, 0, 0, 0]))
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_unbacked_floordiv_simplify(self):
def fn(x, y):
z = y.item()
torch._check(z // 2 == 3)
return x + x.new_ones(z)
self.common(
fn,
(
torch.randn(6),
torch.tensor([6]),
),
)
self.common(
fn,
(
torch.randn(7),
torch.tensor([7]),
),
)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_unbacked_floordiv_simplify_errors(self):
def fn(x, y):
z = y.item()
torch._check(z // 2 == 3)
return x + x.new_zeros(z)
# This is a little suboptimal: we actually fail /in the compiler/ but
# not in a way that causes Dynamo to graph break
with self.assertRaises(RuntimeError):
torch.compile(fn)(torch.randn(8), torch.tensor(8))
def test_cat(self):
tgt_dtype = torch.double if self.device != "mps" else torch.half
def fn(a):
tmp = a * 2
return (
torch.cat((a, a[:, :4] + 1, a + 2), -1),
torch.cat((tmp, tmp), 0),
torch.cat((tmp, tmp.to(dtype=tgt_dtype)), 0),
)
self.common(
fn,
(torch.randn([8, 16]),),
)
self.common(
fn,
(torch.randn([1, 3, 3, 16]).to(memory_format=torch.channels_last),),
)
def test_cat_uint8(self):
def fn(x):
batch_shape = x.shape[:1]
out = torch.cat([x.new_zeros(1).expand(batch_shape + (1,)), x], dim=-1)
return out
self.common(
fn,
(torch.randint(0, 256, size=(3, 255), dtype=torch.uint8),),
)
def test_cat_empty(self):
def fn_2(*tensors):
return torch.cat(tensors)
self.common(
fn_2,
(
torch.randn([1, 3, 3, 16]),
torch.ones([0]),
),
)
self.common(
fn_2,
(
torch.randn([1, 3, 3, 16]),
torch.ones([0]),
torch.randn([1, 3, 3, 16]),
),
)
self.common(
fn_2,
(
torch.ones([0]),
torch.randn([1, 3, 3, 16]),
),
)
def test_cat_empty_index(self):
def fn(out, x):
return torch.cat([out[0], x], dim=0)
self.common(fn, (torch.randn(1, 0, 64), torch.randn(128, 64)))
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_cat_unbacked_legacy_empty(self):
def fn(x, y):
z = y.item()
return torch.cat([x, x.new_ones(z)])
with self.assertRaisesRegex(
RuntimeError,
"Expected 2-D tensors, but got 1-D for tensor number 1 in the list",
):
self.common(
fn,
(
torch.randn([2, 3]),
torch.tensor([0]),
),
)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_cat_unbacked_empty_1d(self):
def fn(x, y):
z = y.item()
return torch.cat([x, x.new_ones(z)])
self.common(
fn,
(
torch.randn([2]),
torch.tensor([0]),
),
)
self.common(
fn,
(
torch.randn([2]),
torch.tensor([3]),
),
)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_cat_unbacked_2d(self):
def fn(x, y):
z = y.item()
return torch.cat([x, x.new_ones(z, x.shape[1])])
self.common(
fn,
(
torch.randn([2, 3]),
torch.tensor([0]),
),
)
self.common(
fn,
(
torch.randn([2, 3]),
torch.tensor([4]),
),
)
def test_cat_negative_dim(self):
def fn(*tensors):
return torch.cat(tensors, dim=-1)
self.common(
fn,
(
torch.randn([2, 3]),
torch.randn([2, 4]),
),
)
self.common(
fn,
(
torch.randn([2, 3]),
torch.randn([0]),
torch.randn([2, 4]),
),
)
self.common(
fn,
(
torch.randn([0]),
torch.randn([2, 3]),
torch.randn([2, 4]),
),
)
@expectedFailureCodegenDynamic
def test_cat_single_empty(self):
# fails dynamic check for 'has a dynamic dimension'
def fn_2(*tensors):
return torch.cat(tensors)
self.common(
fn_2,
(torch.ones([0]),),
)
def test_cat_upcasting(self):
def fn(arg4_1, slice_7):
cat_1 = aten.cat.default([arg4_1, slice_7], 1)
return (cat_1,)
self.common(
fn,
(
torch.randn([8, 16], dtype=torch.float32),
torch.randn([8, 20], dtype=torch.float16),
),
)
def test_cat_extern_kernel(self):
def fn(x1, x2, x3, x4):
x = torch.mm(x2, x3)
s = torch.narrow(x, 1, 0, 100)
x = torch.mm(s, x4)
c = torch.cat((x, x1), 1)
return (c,)
if self.device == "xpu":
atol = 3e-4
rtol = 1e-4
else:
atol = 5e-4
rtol = 3e-4
# MPS has correctness problem before MacOS15
with (
contextlib.nullcontext()
if self.device != "mps" or MACOS_VERSION >= 15.0
else self.assertRaises(AssertionError)
):
self.common(
fn,
(
torch.randn(256, 256),
torch.randn(256, 1024),
torch.randn(1024, 1600),
torch.randn(100, 256),
),
atol=atol,
rtol=rtol,
check_lowp=False, # accuracy issues with relatively large matmuls
)
@skip_if_gpu_halide
# Constant folding was explicitly turned off due to issue #108388
# Turn it back on for test
@unittest.skipIf(config.triton.native_matmul, "native matmul has better precision")
@torch._inductor.config.patch(
joint_graph_constant_folding=True,
# Numerical accuracy failure for triton fp16
max_autotune_gemm_backends="ATEN",
)
def test_remove_no_ops(self):
def matmul_with_op(x, y, fn):
return fn(x @ y)
foo_opt = torch.compile(matmul_with_op)
# test no-op
fns = (
lambda x: x + torch.zeros([256, 256], dtype=torch.float32, device=x.device), # noqa: E731
lambda x: x - torch.zeros([256, 256], dtype=torch.float32, device=x.device), # noqa: E731
lambda x: x * torch.ones([256, 256], dtype=torch.float32, device=x.device), # noqa: E731
lambda x: x / torch.ones([256, 256], dtype=torch.float32, device=x.device), # noqa: E731
)
inps = [torch.rand([256, 256], device=self.device) for _ in range(2)]
for fn in fns:
out, source_codes = run_and_get_code(foo_opt, inps[0], inps[1], fn)
self.assertEqual(out, matmul_with_op(inps[0], inps[1], fn))
atol, rtol = None, None
if self.device == "cpu":
FileCheck().check_not("cpp_fused").run(source_codes[0])
else:
FileCheck().check_not("triton.jit").run(source_codes[0])
# test dtype conversion
for lowp_dtype in [torch.float16, torch.bfloat16]:
if not self.is_dtype_supported(lowp_dtype):
continue
inps = [
torch.rand([256, 256], device=self.device, dtype=lowp_dtype)
for _ in range(2)
]
for fn in fns:
out, source_codes = run_and_get_code(foo_opt, inps[0], inps[1], fn)
self.assertEqual(
out, matmul_with_op(inps[0], inps[1], fn), atol=atol, rtol=rtol
)
# test broadcasted shape bail
fn = lambda x: x + torch.zeros( # noqa: E731
[256, 256, 256], dtype=lowp_dtype, device=self.device
)
out, source_codes = run_and_get_code(foo_opt, inps[0], inps[1], fn)
self.assertEqual(
out, matmul_with_op(inps[0], inps[1], fn), atol=atol, rtol=rtol
)
def test_remove_noop_copy(self):
def fn(x, y):
x = x.cos()
a = x.copy_(y)
return a.sin()
self.common(fn, (torch.randn(8, 8), torch.randn(8)))
def fn2(a, b):
abs_max = torch.abs(a).max()
b[0] = abs_max.to(a.dtype)
return b
self.common(
fn2,
(
torch.randn(8, 8, dtype=torch.float16),
torch.randn(8, dtype=torch.float32),
),
)
def test_remove_noop_clone(self):
def fn(x):
y = x.clone().reshape(-1, 4)
y[:, [2, 0]] = y[:, [0, 2]]
return y + x
self.common(fn, (torch.randn(2, 4),))
def test_remove_noop_slice(self):
def f(x):
x = x + 1
size = x.shape[-1]
y = torch.ops.aten.slice(x, -1, 0, size) # noop
return y + 1
f = torch.compile(f)
x = torch.ones((2, 3, 2), device=self.device)
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
torch._dynamo.mark_dynamic(x, 2)
post_grad_graph = get_post_grad_graph(f, (x,))
expected_graph = f"""\
def forward(self, arg0_1: "Sym(s77)", arg1_1: "Sym(s27)", arg2_1: "Sym(s53)", arg3_1: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}"):
add: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}" = torch.ops.aten.add.Tensor(arg3_1, 1); arg3_1 = None
add_9: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}" = torch.ops.aten.add.Tensor(add, 1); add = None
return (add_9,)""" # noqa: B950
self.assertExpectedInline(
post_grad_graph,
expected_graph,
ignore_comments=True,
ignore_empty_lines=True,
)
def test_remove_noop_slice1(self):
def f(x):
x = x + 1
y = torch.ops.aten.slice(x, -1, 0, -1) # not a noop
return y + 1
f = torch.compile(f)
x = torch.ones((2, 3, 2), device=self.device)
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
post_grad_graph = get_post_grad_graph(f, (x,))
expected_graph = f"""\
def forward(self, arg0_1: "Sym(s77)", arg1_1: "Sym(s27)", arg2_1: "f32[s77, s27, 2][2*s27, 2, 1]{str(x.device)}"):
add: "f32[s77, s27, 2][2*s27, 2, 1]{str(x.device)}" = torch.ops.aten.add.Tensor(arg2_1, 1); arg2_1 = None
slice_1: "f32[s77, s27, 1][2*s27, 2, 1]{str(x.device)}" = torch.ops.aten.slice.Tensor(add, -1, 0, -1); add = None
add_9: "f32[s77, s27, 1][s27, 1, 1]{str(x.device)}" = torch.ops.aten.add.Tensor(slice_1, 1); slice_1 = None
return (add_9,)""" # noqa: B950
self.assertExpectedInline(
post_grad_graph,
expected_graph,
ignore_comments=True,
ignore_empty_lines=True,
)
def test_remove_noop_slice_scatter(self):
def f(x):
x = x + 1
y = torch.empty_like(x)
size = x.shape[-1]
out = torch.ops.aten.slice_scatter(y, x, -1, 0, size) # noop
return out + 1
f = torch.compile(f)
x = torch.ones((2, 3, 2), device=self.device)
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
torch._dynamo.mark_dynamic(x, 2)
post_grad_graph = get_post_grad_graph(f, (x,))
expected_graph = f"""\
def forward(self, arg0_1: "Sym(s77)", arg1_1: "Sym(s27)", arg2_1: "Sym(s53)", arg3_1: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}"):
empty: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}" = torch.ops.aten.empty.memory_format([arg0_1, arg1_1, arg2_1], dtype = torch.float32, layout = torch.strided, device = {repr(x.device)}, pin_memory = False); arg0_1 = arg1_1 = arg2_1 = None
permute: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}" = torch.ops.aten.permute.default(empty, [0, 1, 2]); empty = permute = None
add: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}" = torch.ops.aten.add.Tensor(arg3_1, 1); arg3_1 = None
add_13: "f32[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}" = torch.ops.aten.add.Tensor(add, 1); add = None
return (add_13,)""" # noqa: B950
self.assertExpectedInline(
post_grad_graph,
expected_graph,
ignore_comments=True,
ignore_empty_lines=True,
)
def test_cat_of_loops_and_extern_kernel(self):
class M(torch.nn.Module):
def __init__(
self,
**kwargs,
):
super().__init__()
self.conv = torch.nn.Conv2d(
64,
5,
1,
**kwargs,
)
self.max_pool2d = torch.nn.MaxPool2d(2)
def forward(self, x, y):
x1 = self.conv(x)
y1 = self.max_pool2d(y)
return torch.cat([x1, y1], 1)
mod = M()
opt_mod = torch.compile(mod, backend="inductor")
memory_format = torch.channels_last
inputs = (
torch.randn([1, 64, 16, 16]).to(memory_format=memory_format),
torch.randn([1, 64, 32, 32]).to(memory_format=memory_format),
)
y = mod(*inputs)
opt_y = opt_mod(*inputs)
self.assertEqual(y, opt_y)
self.assertEqual(y.stride(), opt_y.stride())
def test_cat_inplace(self):
def fn(x):
rt = torch.cat([x])
v = x.sin_()
return rt
# can't use self.common because input is modified inplace
inp = torch.ones(2)
opt_fn = torch.compile(fn)
res = opt_fn(inp.clone())
expected = fn(inp.clone())
self.assertEqual(res, expected)
def test_stack(self):
def fn(a, b):
return torch.stack(
[
a.expand(12, 16),
b.expand(12, 16),
],
2,
)
self.common(fn, (torch.randn([1, 16]), torch.randn([12, 1])))
def test_hardtanh(self):
def fn(x):
return F.hardtanh(x), F.hardtanh(x + 1), F.hardtanh(x - 1)
self.common(
fn,
(torch.randn([64]),),
)
def test_hardsigmoid(self):
def fn(x):
return F.hardsigmoid(x), F.hardsigmoid(x + 3), F.hardsigmoid(x - 3)
self.common(
fn,
(torch.randn([64]),),
)
def test_hardswish(self):
def fn(x):
return F.hardswish(x), F.hardswish(x + 3), F.hardswish(x - 3)
self.common(
fn,
(torch.randn([64]),),
)
def test_rsqrt(self):
def fn(x):
return torch.rsqrt(x), torch.rsqrt(x + 1) - 2
self.common(
fn,
(torch.randn([64]),),
)
def test_expm1(self):
def fn(x):
return torch.expm1(x), torch.expm1(x) * 2
for dtype in (torch.float16, torch.float, torch.double, torch.int, torch.int64):
if not self.is_dtype_supported(dtype):
continue
self.common(
fn,
(torch.randn([64]).to(dtype=dtype),),
)
self.common(
fn,
(torch.arange(-1e-5, 1e-5, 1e-7).to(dtype=dtype),),
)
@xfail_if_mps_unimplemented
def test_adaptive_pool_errors_with_long(self):
class Model(torch.nn.Module):
def __init__(self, pool_operator):
super().__init__()
self.pool = pool_operator
def forward(self, x):
x = torch.argmax(x, dim=1)
x = self.pool(x)
return x
for dim in (1, 2, 3):
op_inst = eval(f"torch.nn.AdaptiveMaxPool{dim}d(5)")
model = Model(op_inst).to(self.device)
x = torch.randn([1] * (dim + 2)).to(self.device)
model = torch.compile(model, fullgraph=True)
with self.assertRaisesRegex(
RuntimeError, r".*(not implemented|aoti_torch_).*"
):
model(x)
@xfail_if_mps_unimplemented
def test_adaptive_avg_pool_errors_with_long(self):
class Model(torch.nn.Module):
def __init__(self, pool_operator):
super().__init__()
self.pool = pool_operator
def forward(self, x):
x = torch.argmax(x, dim=1)
x = self.pool(x)
return x
for dim in (1, 2, 3):
op_inst = eval(f"torch.nn.AdaptiveAvgPool{dim}d(5)")
model = Model(op_inst).to(self.device)
x = torch.randn([1] * (dim + 2)).to(self.device)
model = torch.compile(model, fullgraph=True)
with self.assertRaisesRegex(
RuntimeError, r".*(not implemented|aoti_torch_).*"
):
model(x)
@torch._dynamo.config.patch(recompile_limit=12)
def test_avg_pool_errors_with_uint(self):
for dim in (1, 2, 3):
for dtype in (torch.uint8, torch.uint16, torch.uint32, torch.uint64):
x = torch.randn([2] * (dim + 2)).to(dtype)
op = eval(f"torch.nn.functional.avg_pool{dim}d")
c_op = torch.compile(op)
with self.assertRaisesRegex(
RuntimeError, r".*(not implemented|aoti_torch_).*"
):
c_op(x, kernel_size=2, stride=2)
def test_replication_pad_errors_with_bool(self):
for dim in (1, 2, 3):
def fn(x):
x = torch.signbit(x)
x = eval(f"nn.ReplicationPad{dim}d(padding=1)")(x)
return x
c_fn = torch.compile(fn)
x = torch.randn([1] * (dim + 2))
with self.assertRaisesRegex(
RuntimeError, r".*(not implemented|aoti_torch_).*"
):
c_fn(x)
def test_log1p(self):
def fn(x):
return torch.log1p(x), torch.log1p(x) * 2
for dtype in (torch.float16, torch.float, torch.double, torch.int, torch.int64):
if not self.is_dtype_supported(dtype):
continue
self.common(
fn,
(torch.randn([64]).to(dtype=dtype),),
)
self.common(
fn,
(torch.arange(-1e-5, 1e-5, 1e-7).to(dtype=dtype),),
)
@config.patch(force_disable_caches=True)
@skip_if_cpp_wrapper("run_and_get_kernels issue")
def test_deterministic_codegen(self):
if "cpu" in str(self.device) and config.is_fbcode():
raise unittest.SkipTest("cpp packaging is wacky in fbcode")
@torch.compile(fullgraph=True)
def a(x):
return x.cos().sin().softmax(-1)
@torch.compile(fullgraph=True)
def b(x):
return x.sin().cos().softmax(-1)
@torch.compile(fullgraph=True)
def c(x):
return x.cos().sin().softmax(-1)
x = torch.randn(16, 256, device=self.device)
_, (coda_a0,) = _run_and_get_stripped_kernels(a, x)
_, (coda_b0,) = _run_and_get_stripped_kernels(b, x)
_, (coda_c0,) = _run_and_get_stripped_kernels(c, x)
self.assertEqual(coda_a0, coda_c0)
# compile in a different order
torch.compiler.reset()
_, (coda_c1,) = _run_and_get_stripped_kernels(c, x)
_, (coda_a1,) = _run_and_get_stripped_kernels(a, x)
_, (coda_b1,) = _run_and_get_stripped_kernels(b, x)
self.assertEqual(coda_a0, coda_a1)
self.assertEqual(coda_b0, coda_b1)
self.assertEqual(coda_c0, coda_c1)
# force a different CompileId
torch.compiler.reset()
CompileContext_init = CompileContext.__init__
with patch.object(
CompileContext,
"__init__",
lambda self, _: CompileContext_init(
self, CompileId(frame_id=999, frame_compile_id=999)
),
):
_, (coda_a2,) = _run_and_get_stripped_kernels(a, x)
_, (coda_c2,) = _run_and_get_stripped_kernels(c, x)
_, (coda_b2,) = _run_and_get_stripped_kernels(b, x)
self.assertEqual(coda_a0, coda_a2)
self.assertEqual(coda_b0, coda_b2)
self.assertEqual(coda_c0, coda_c2)
@config.patch(force_disable_caches=True)
@skip_if_cpp_wrapper("run_and_get_kernels issue")
def test_deterministic_codegen_on_graph_break(self):
if "cpu" in str(self.device) and config.is_fbcode():
raise unittest.SkipTest("cpp packaging is wacky in fbcode")
def a(x):
return x.cos().sin().softmax(-1)
@torch.compile()
def b(x):
x = a(x)
torch._dynamo.graph_break()
x = a(x)
return x
x = torch.randn(16, 256, device=self.device)
_, (code0, code1) = _run_and_get_stripped_kernels(b, x)
self.assertEqual(code0, code1)
@config.patch(
force_disable_caches=True,
# Test expects a single (fused) kernel to be generated
max_autotune_gemm_backends="ATEN",
)
@skip_if_cpp_wrapper("run_and_get_kernels issue")
@unittest.skipIf(config.triton.native_matmul, "matmul is now generated")
def test_deterministic_codegen_with_suffix(self):
if "cpu" in str(self.device) and config.is_fbcode():
raise unittest.SkipTest("cpp packaging is wacky in fbcode")
@torch.compile(fullgraph=True)
def a(x):
return x.cos().sin().softmax(-1)
@torch.compile(fullgraph=True)
def b(x, y):
x = x.cos().sin().softmax(-1)
x = torch.matmul(x, y)
return x
x = torch.randn(16, 256, device=self.device)
y = torch.randn(256, 256, device=self.device)
_, (code0,) = _run_and_get_stripped_kernels(a, x)
_, (code1,) = _run_and_get_stripped_kernels(b, x, y)
self.assertEqual(code0, code1)
def test_flip(self):
def fn(x):
return torch.flip(x, (-1,)), torch.flip(x, (0, 2)) - 2
self.common(
fn,
(torch.randn([1, 2, 6, 6]),),
)
def test_signbit(self):
def fn(x):
return torch.signbit(x), ~torch.signbit(-x) & 1
self.common(
fn,
(torch.randn([1, 2, 6, 6]),),
)
def test_sign_dtype(self):
def fn(x):
y = torch.sign(x)
return torch.tanh(y)
self.common(fn, (torch.randn([1, 2, 6, 6]),))
@xfail_if_triton_cpu
def test_fmod(self):
def fn(a, b):
return torch.fmod(a, b), torch.fmod(3.0 * a, b) - 2.0
shape = [1, 2, 6, 6]
self.common(fn, (torch.randn(shape), torch.randn(shape)))
@xfail_if_triton_cpu
def test_fmod_zero_dim(self):
def fn(a, b):
return (torch.fmod(a, b),)
self.common(
fn,
(
make_tensor(10, device=self.device, dtype=torch.float32),
make_tensor((), device=self.device, dtype=torch.float32),
),
)
self.common(
fn,
(
make_tensor((), device=self.device, dtype=torch.float32),
make_tensor(10, device=self.device, dtype=torch.float32),
),
)
@skip_if_halide # log2 not implemented for halide
def test_log2(self):
def fn(x):
return torch.log2(x), torch.log2(x + 1) - 2
self.common(
fn,
(torch.randn([64]) + 10,),
)
def test_logsumexp(self):
def fn(x):
return torch.logsumexp(x, -1), torch.logsumexp(x, 0) - 2
self.common(
fn,
(torch.randn([8, 8]) + 10,),
)
@skip_if_halide # log2 not implemented for halide
def test_log_fp64(self):
def fn(x):
return torch.log(x), torch.log2(x)
_dtype = torch.float64
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(_dtype)
else self.assertRaises(TypeError)
)
with ctx:
self.common(
fn,
(torch.randn([1024], dtype=_dtype) + 10,),
)
def test_bitwise(self):
def fn(x, y):
return (
torch.bitwise_not(x),
torch.bitwise_or(x, y),
torch.bitwise_xor(x, y),
torch.bitwise_and(x, y),
)
self.common(
fn,
(
torch.randint(0, 2**30, [64], dtype=torch.int32),
torch.randint(0, 2**30, [64], dtype=torch.int32),
),
)
def test_bitwise2(self):
# again with bool types
def fn(x, y):
return (
torch.bitwise_not(x),
torch.bitwise_or(x, y),
torch.bitwise_xor(x, y),
torch.bitwise_and(x, y),
)
self.common(
fn,
(
torch.randint(0, 2, (2, 20), dtype=torch.bool),
torch.randint(0, 2, (2, 20), dtype=torch.bool),
),
)
def test_bitwise3(self):
# Repro for https://github.com/pytorch/pytorch/issues/97968
def fn(x, y):
return (
torch.max(torch.bitwise_and(x, y), y),
torch.clamp_max(torch.bitwise_or(x, y), y),
torch.clamp_min(torch.bitwise_xor(x, y), y),
)
self.common(
fn,
(
torch.rand([5, 10, 1]).to(torch.int8),
torch.rand([10, 1]).to(torch.int8),
),
)
def test_inf(self):
def fn(a):
return a + float("inf"), a + float("-inf"), a * -float("inf")
self.common(fn, (torch.randn(8),))
def test_remainder(self):
def fn(a, b):
return (
torch.remainder(a, b),
torch.remainder(a + 1, b - 1),
torch.remainder(a - 1, b + 1),
)
self.common(fn, (torch.randn(64), torch.randn(64)))
def test_zeros(self):
def fn(a):
return (
a + 1,
torch.zeros(
(1, 8, 64, 64),
dtype=torch.float32,
device=a.device,
),
torch.zeros(
1,
8,
64,
64,
dtype=torch.float32,
device=a.device,
),
torch.zeros(2, 3),
a + torch.ones(8, device=a.device),
torch.full((2, 3), 3.1416, device=a.device),
)
self.common(fn, (torch.randn(8),))
def test_new_ones(self):
def fn(a):
return (
aten.new_ones(
a, [], device=a.device, dtype=6, layout=0, pin_memory=False
),
aten.new_zeros(
a, [], device=a.device, dtype=6, layout=0, pin_memory=False
),
)
self.common(fn, (torch.randn(8),))
def test_full_like(self):
def fn(a):
return torch.full_like(a, 7.777) - 1
self.common(fn, (torch.randn(8),))
def test_full_like_transposed(self):
def fn(a):
return torch.full_like(a, 3)
self.common(fn, (torch.randn(4, 5, 6).transpose(1, -1),), exact_stride=True)
def test_full_like_sliced(self):
def fn(a):
return torch.full_like(a, 3)
self.common(fn, (torch.rand(3, 4)[:, ::2],), exact_stride=True)
def test_full_truncation(self):
def fn(a):
return a + torch.full_like(a, 7.777)
for dtype in all_types():
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(dtype)
else self.assertRaises(TypeError)
)
with ctx:
self.common(
fn,
(make_tensor(8, dtype=dtype, device=self.device),),
check_lowp=False,
)
def test_full_boolean(self):
def fn(n):
x = torch.full((1,), n >= 1024, device=self.device)
return x, x + 1
self.common(fn, (1024,))
self.common(fn, (1023,))
def test_index1(self):
def fn(a, b, c):
return aten.index(a, [b, c])
self.common(
fn,
(
torch.randn(8, 8, 12),
torch.tensor([0, 0, 2, 2], dtype=torch.int64),
torch.tensor([3, 4, 4, 3], dtype=torch.int64),
),
)
self.common(
fn,
(
torch.randn(8, 8, 12),
torch.tensor([[0, 0, 2, 2]], dtype=torch.int64),
torch.tensor([[3], [4], [4], [3]], dtype=torch.int64),
),
)
def test_index2(self):
def fn(a, b):
return (
aten.index(a, [b]),
aten.index(a, [None, b]),
)
self.common(
fn,
(
torch.randn(8, 8, 8),
torch.tensor([[0, 0, 2, 2]], dtype=torch.int64),
),
)
def test_index3(self):
def fn(x, ia, ib):
return (x[:, ia, None, ib, 0],)
self.common(
fn,
(
torch.randn(3, 4, 4, 4, 3),
torch.tensor([0, 2, 1], dtype=torch.int64),
torch.tensor([0, 2, 1], dtype=torch.int64),
),
)
def test_output_strides(self):
def fn(x):
y = x.permute(0, 2, 3, 1).contiguous()
torch._dynamo.graph_break()
return y.view(-1, 4)
inp = torch.rand([4, 4, 4, 4], device=self.device)
fn_opt = torch.compile(fn, backend="inductor")
self.assertEqual(fn(inp), fn_opt(inp))
self.assertEqual(fn(inp).stride(), fn_opt(inp).stride())
# no redundant copy
def foo(x):
return x[0:2:2].T[3:].squeeze(0)
foo_opt = torch.compile(foo, backend="inductor")
out = foo_opt(inp)
self.assertEqual(inp.storage(), out.storage())
def test_index_select(self):
def fn(a, b):
return (
torch.index_select(a, 0, b),
torch.index_select(a, 1, b),
torch.index_select(torch.index_select(a, 2, b), 1, b),
)
for ind_dtype in (torch.int32, torch.int64):
self.common(
fn,
(
torch.randn(8, 8, 8),
torch.tensor([0, 0, 2, 1], dtype=ind_dtype),
),
)
@xfail_if_mps_unimplemented
@skipCUDAIf(not TEST_CUDNN, "CUDNN not available")
@skipIfXpu
@skipIfRocm
def test_cudnn_rnn(self):
if self.device == "cpu":
raise unittest.SkipTest(f"requires {GPU_TYPE}")
def fn(
a0,
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
a3,
a4,
a5,
):
a1 = [
b0,
b1,
b2,
b3,
b4,
b5,
b6,
b7,
b8,
b9,
b10,
b11,
b12,
b13,
b14,
b15,
]
return aten._cudnn_rnn(
a0,
a1,
4,
a3,
a4,
a5,
2,
2048,
0,
2,
False,
0.0,
False,
True,
[],
None,
)
self.common(
fn,
(
torch.randn([92, 8, 2048]),
torch.randn([8192, 2048]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([8192, 2048]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([8192, 4096]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([8192, 4096]),
torch.randn([8192, 2048]),
torch.randn([8192]),
torch.randn([8192]),
torch.randn([167837696]),
torch.randn([4, 8, 2048]),
torch.randn([4, 8, 2048]),
),
check_lowp=False, # difference in rnn is too large between half and float inputs
)
def test_upsample_nearest1d(self):
def fn(a):
return (
aten.upsample_nearest1d(a, [74], None),
aten.upsample_nearest1d(a, [70], None),
aten.upsample_nearest1d(a, [45], None),
aten.upsample_nearest1d(a, [36], None),
aten.upsample_nearest1d(a, None, [2.0]),
)
self.common(fn, (torch.randn([2, 4, 37]),))
def test_upsample_nearest2d(self):
def fn(a):
return (
aten.upsample_nearest2d(a, [74, 76]),
aten.upsample_nearest2d(a, [70, 75]),
aten.upsample_nearest2d(a, [45, 74]),
aten.upsample_nearest2d(a, [36, 39]),
aten.upsample_nearest2d(a, None, [2.0, 2.0]),
)
self.common(fn, (torch.randn([2, 4, 37, 38]),))
def test_upsample_nearest3d(self):
def fn(a):
return (
aten.upsample_nearest3d(a, [74, 76, 78], None),
aten.upsample_nearest3d(a, [70, 75, 80], None),
aten.upsample_nearest3d(a, [45, 74, 103], None),
aten.upsample_nearest3d(a, [36, 39, 40], None),
aten.upsample_nearest3d(a, None, [2.0, 2.0, 2.0]),
)
self.common(fn, (torch.randn([2, 4, 37, 38, 39]),))
def test_upsample_nearest2d_backward(self):
func = torch.ops.aten.upsample_nearest2d_backward
def fn(a):
return (
func(a, output_size=[6, 12], input_size=[3, 3, 3, 6]),
func(a, output_size=[6, 12], input_size=[3, 3, 4, 5]),
func(a, output_size=[6, 12], input_size=[3, 3, 2, 8]),
func(a, output_size=[6, 12], input_size=[3, 3, 2, 8]),
func(a, output_size=[6, 12], input_size=[3, 3, 4, 7]),
)
self.common(fn, (torch.randn([3, 3, 6, 12]),))
@skip_if_x86_mac()
def test_upsample_bilinear2d_a(self):
def fn(a):
return (
aten.upsample_bilinear2d(a, [45, 45], False, None),
aten.upsample_bilinear2d(a, None, True, [2.0, 2.0]),
)
self.common(fn, (torch.randn([2, 4, 37, 38]),), atol=2.5e-5, rtol=1.3e-6)
def test_upsample_bilinear2d_b(self):
def fn(a):
return aten.upsample_bilinear2d(a, None, True, [2.0, 2.0])
self.common(
fn,
[
torch.randn([1, 2, 40, 59]),
],
atol=2.5e-5,
rtol=1.3e-6,
)
@skip_if_gpu_halide # accuracy issue
def test_reflection_pad2d(self):
def fn(a, pad):
return (
aten.reflection_pad2d(a, [1, 1, 1, 1]),
aten.reflection_pad2d(a, pad),
)
self.common(
fn,
(
torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),
[5, 2, 3, 4],
),
)
@xfail_if_mps
def test_reflection_pad2d_backward(self):
def template(size, padding):
def fn(grad_output, x):
return aten.reflection_pad2d_backward(grad_output, x, padding)
x = torch.randint(0, 999, size=size, dtype=torch.float32)
result = aten.reflection_pad2d(x, padding)
grad_output = torch.randn_like(result)
self.common(
fn, (grad_output, x), check_lowp=not is_halide_backend(self.device)
)
template([1, 1, 8, 8], [0, 0, 0, 0])
template([1, 1, 8, 8], [1, 1, 1, 1])
template([1, 1, 8, 8], [1, 2, 3, 4])
template([1, 1, 8, 8], [0, -1, 2, 2])
template([1, 1, 8, 8], [-1, 0, 2, 2])
template([1, 1, 8, 8], [2, 2, 0, -1])
template([1, 1, 8, 8], [2, 2, -1, 0])
@xfail_if_mps_unimplemented # Unsupported Border padding mode
def test_grid_sampler_2d(self):
def fn(a, b):
return (
aten.grid_sampler_2d(a, b, 0, 0, True),
aten.grid_sampler_2d(a, b, 0, 1, False),
)
self.common(
fn,
(
torch.randn([4, 3, 352, 352], dtype=torch.float32),
torch.rand([4, 352, 352, 2], dtype=torch.float32) * 2 - 1,
),
check_lowp=False,
# Mismatched elements: 154697 / 1486848 (10.4%)
# Greatest absolute difference: 0.0001976490020751953 at index (0, 0, 101, 243) (up to 1e-05 allowed)
# Greatest relative difference: 7.332530120481928 at index (1, 1, 258, 301) (up to 1.3e-06 allowed)
atol=0.0002,
rtol=1.3e-06,
)
@requires_gpu()
def test_grid_sampler_expand_preserves_view(self):
if not self.device.startswith("cuda") and not self.device.startswith("xpu"):
self.skipTest("requires CUDA or XPU")
torch.manual_seed(0)
torch._dynamo.reset()
repeats = 9000
batch = 48
channels = 3
img = 224
grid_size = 13
device = self.device
class ExpandGridSampler(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.grid = torch.nn.Parameter(
torch.randn(repeats, grid_size, grid_size, 2, device=device)
)
self.fc = torch.nn.Linear(grid_size * grid_size * channels, 16).to(
device
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
per_channel = []
for i in range(channels):
channel = x[:, i, ...].expand(repeats, -1, -1, -1)
patch = torch.nn.functional.grid_sample(
channel,
self.grid,
mode="bilinear",
align_corners=False,
padding_mode="border",
)
patch = patch.transpose(0, 1).flatten(start_dim=2)
per_channel.append(patch)
x = torch.cat(per_channel, dim=2)
return self.fc(x)
model = ExpandGridSampler().to(device)
compiled = torch.compile(model, backend="inductor")
inp = torch.randn(batch, channels, img, img, device=device)
out = compiled(inp)
out.sum().backward()
self.assertIsNotNone(model.grid.grad)
def test_upsample_bicubic2d(self):
def fn(a):
return (
aten.upsample_bicubic2d(a, (128, 128), True),
aten.upsample_bicubic2d(a, (128, 256), False),
)
# Mismatched elements: 10 / 196608 (0.0%)
# Greatest absolute difference: 1.3869255781173706e-05 at index (2, 1, 88, 65) (up to 1e-05 allowed)
# Greatest relative difference: 0.0033082996811011046 at index (3, 1, 88, 91) (up to 1.3e-06 allowed)
self.common(
fn,
(torch.randn([4, 3, 64, 32], dtype=torch.float32),),
atol=2e-5,
rtol=1e-3,
)
def test_float_index_expression(self):
# Test that index propagation doesn't generate bad index_expr calls like
# ops.index_expr(0.5*x, dtype) where the expression is not integral
def fn(x):
return aten.upsample_bicubic2d(x, (256, 256), False)
x = torch.randn(1, 1, 128, 128, dtype=torch.float32, device=self.device)
_, source_codes = run_and_get_code(fn, x)
pattern = r"0\.50*\*[ix][\d]"
for code in source_codes:
self.assertIsNone(
re.search(pattern, code), msg="Found bad index_expr in code:\n" + code
)
def test_float_index_expression_type_promotion(self):
# Test that float indexing expressions participate in type promotion
def fn(x):
return x + 1.0 / x.size(0)
x = torch.arange(10)
self.common(fn, (x,))
def test_sort(self):
def fn(a, descending):
return torch.sort(a)
inp = torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32)
self.common(fn, (inp, False))
self.common(fn, (inp, True))
@parametrize("stable", (True, False))
@parametrize("descending", (True, False))
def test_nan_sort(self, descending, stable):
def test_sort(x, descending, stable):
out = torch.sort(x, descending=descending, stable=stable)
if stable:
return out
else:
# non stable idx may not be equal
return out[0]
tensor = torch.tensor(
[
0.7308,
0.7053,
0.3349,
-0.7158,
torch.nan,
0.1234,
1.0284,
torch.nan,
-1.8767,
-0.4369,
],
device=self.device,
)
inps = (tensor, descending, stable)
a = torch.compile(test_sort)(*inps)
b = test_sort(*inps)
self.assertEqual(a, b, equal_nan=True)
def test_sort_stable(self):
def fn(a, descending):
return a.sort(dim=-1, stable=True, descending=descending)
# Duplicates give deterministic indices when stable sorting
inp = torch.rand(10, 128, dtype=torch.float32)
inp[:, 10:20] = 1.0
inp[:, 30:40] = 1.0
self.common(fn, (inp, False))
self.common(fn, (inp, True))
# Non-power of two
inp = inp[:, :120]
self.common(fn, (inp, False))
self.common(fn, (inp, True))
def test_sort_bool(self):
def fn(a, descending):
return torch.sort(a.to(torch.int8), stable=True, descending=descending)
inp = torch.randint(0, 2, size=[10, 128], dtype=torch.bool)
self.common(fn, (inp, False))
self.common(fn, (inp, True))
@skipIfWindows(msg="Crash UT")
def test_sort_transpose(self):
def fn(a, descending):
return torch.sort(a, stable=True, descending=descending)
# MPS has correctness problem for transposed sort before MacOS15
ctx = (
contextlib.nullcontext()
if self.device != "mps" or MACOS_VERSION >= 15.0
else self.assertRaises(AssertionError)
)
inp = torch.randn(128, 10).transpose(0, 1)
with ctx:
self.common(fn, (inp, False))
self.common(fn, (inp, True))
def test_topk(self):
def fn(a):
return torch.topk(a, 2, -1)
self.common(
fn, (torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),)
)
def test_long_tensor(self):
def fn(a):
return (
torch.LongTensor([294]).to(a.device) - a,
torch.as_tensor([295]).to(a.device) + a,
)
self.common(fn, (torch.randint(0, 999, size=[8, 8]),))
@skip_if_gpu_halide # correctness issue
def test_constant_pad_1d(self):
def fn(a):
return (
aten.constant_pad_nd(a, [0, 1], 6.0),
aten.constant_pad_nd(a, [2, 3], 99.0),
)
self.common(fn, (torch.randint(0, 999, size=[2, 16, 31], dtype=torch.float32),))
def test_constant_pad_fill_dtype(self):
def fn(a, b):
return (
aten.constant_pad_nd(a, (1, 1), 1.0) & b,
aten.constant_pad_nd(a, (1, 1), 0.0) & b,
)
self.common(
fn,
(torch.randint(2, (4,), dtype=torch.bool), torch.ones(6, dtype=torch.bool)),
)
@skip_if_gpu_halide # misaligned address
def test_constant_pad_2d(self):
def fn(a):
return (
aten.constant_pad_nd(a, [1, 1, 1, 1], 6.0),
aten.constant_pad_nd(a, [1, 2, 3, 4], 99.0),
)
self.common(
fn, (torch.randint(0, 999, size=[1, 1, 8, 8], dtype=torch.float32),)
)
def test_constant_pad_2d_strides_nonpositive(self):
def fn(a):
return torch.constant_pad_nd(a, [0, 0, 0, -2, 0, 0])
self.common(
fn, (torch.empty_strided((2, 4, 5), (20, 1, 4), dtype=torch.float32),)
)
@skip_if_gpu_halide # misaligned address
def test_constant_pad_3d(self):
def fn(a):
return (
aten.constant_pad_nd(a, [1, 2, 3, 4, 5, 6], 6.0),
aten.constant_pad_nd(a, [0, 0, 3, 4, 0, 0], 6.0),
)
self.common(
fn, (torch.randint(0, 999, size=[2, 4, 4, 4], dtype=torch.float32),)
)
def test_constant_pad_float64(self):
# Repro for https://github.com/pytorch/pytorch/issues/93351
def fn(input):
v1 = torch.nn.functional.pad(input, pad=(1, 0))
return torch.gt(v1, input)
_dtype = torch.float64
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(_dtype)
else self.assertRaises(TypeError)
)
x = torch.rand([1, 2, 2, 1], dtype=_dtype)
with ctx:
self.common(fn, (x,))
def test_constant_pad_nd_inplace(self):
def fn(a):
return aten.constant_pad_nd(a, [0, 0])
x = torch.randn([2], device=self.device)
fn_compiled = torch.compile(fn)
y = fn_compiled(x)
self.assertTrue(y is not x)
def test_l1_loss(self):
def fn(a, b):
return torch.nn.functional.l1_loss(a, b), torch.nn.functional.mse_loss(a, b)
self.common(
fn,
(
torch.randn([2, 3, 16, 16]),
torch.randn([2, 3, 16, 16]),
),
check_lowp=False,
)
def test_triu(self):
def fn(a):
return aten.triu(a, 1), aten.triu(a, 0), aten.triu(a, 2)
self.common(fn, (torch.randn([2, 10, 10]),))
def test_no_op_reduction(self):
def fn(a):
return a.sum(-1), torch.amax(a + 1, 1, keepdim=True)
self.common(fn, (torch.randn([8, 1, 1]),))
def test_inplace_add(self):
@torch.compile(backend="inductor")
def fn(x, y):
return x.add_(y)
inputs = (
rand_strided((4, 4), (4, 1), device=self.device),
rand_strided((4, 4), (4, 1), device=self.device),
)
inp_clone = inputs[0].clone()
out = fn(*inputs)
self.assertTrue(same(out, inp_clone + inputs[1]))
self.assertTrue(out is inputs[0])
# The following 2 tests are meant to check the logic that drops
# xmask from triton load/store if xnumel = 1
@requires_gpu()
def test_single_elem(self):
def fn(a):
b = a + 1
return (b,)
self.common(fn, (torch.randn(1),))
@requires_gpu()
def test_single_elem_indirect(self):
def fn(a, b):
c = a[b] + 1
return (c,)
a = torch.randn(1)
b = (torch.tensor([0], dtype=torch.int64),)
self.common(fn, (a, b))
# This test is meant to check for issues from the logic
# that drops xmask from trito load/store if XBLOCK divides xnumel
@requires_gpu()
def test_xblock_divides_xnumel(self):
def fn(a):
b = a + 1
return (b,)
# assumption is that XBLOCK is always a divisor of 1024
# so xmask will be dropped iff xnumel is multiple of 1024
self.common(fn, (torch.randn(1024),))
self.common(fn, (torch.randn(1025),))
def test_inplace_mixed_dtype_ops(self):
@torch.compile(backend="inductor")
def fn(x, y):
z = x + y.float()
w = z.add_(y)
return w.mul_(y)
tgt_dtype = torch.double if self.device != "mps" else torch.half
inputs = (
rand_strided((4, 4), (4, 1), device=self.device, dtype=torch.float),
rand_strided((4, 4), (4, 1), device=self.device, dtype=tgt_dtype),
)
out = fn(*inputs)
out_eager = (inputs[0] + inputs[1].float()).add_(inputs[1]).mul_(inputs[1])
self.assertTrue(same(out, out_eager))
@config.patch(
{"triton.unique_kernel_names": True, "triton.descriptive_names": False}
)
def test_kernel_names(self):
@torch.compile(backend="inductor")
def fn(x):
return 2 * x
inputs = (rand_strided((8,), (1,), device=self.device),)
self.assertTrue(same(fn(*inputs), 2 * inputs[0]))
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_strided_inputs(self):
@torch.compile(backend="inductor")
def fn(x, y):
return x + y
inputs = (
rand_strided((8, 16), (32, 2), device=self.device),
rand_strided((8, 16), (16, 1), device=self.device),
)
self.assertTrue(same(fn(*inputs), inputs[0] + inputs[1]))
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_input_mutation1(self):
def fn(a):
b = a + 1
a.copy_(b)
c = a + 2
return a * b / c
arg1 = torch.randn(64, device=self.device)
arg2 = arg1.clone()
arg3 = torch.randn(64, device=self.device)
arg4 = arg3.clone()
correct1 = fn(arg1)
correct2 = fn(arg3)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
actual2 = opt_fn(arg4)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(actual2, correct2))
self.assertTrue(same(arg1, arg2))
self.assertTrue(same(arg3, arg4))
def test_input_mutation2(self):
def fn(a):
b = a + 1
a.view(64).copy_(torch.tensor([66.0], device=a.device))
c = a + 2
return b, c
# NOTE: this test fails when none of the inputs require grad.
# That seems like an inductor bug.
arg1 = torch.randn([1, 64], device=self.device).requires_grad_(True).add(1)
arg2 = arg1.clone()
correct1 = fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(arg1, arg2))
def test_input_mutation3(self):
def fn(a):
a += 1
a *= 2
aten.sigmoid_(a)
a = a.view(64)
a += 3
a *= 4
aten.relu_(a)
return a
arg1 = torch.randn([1, 64], device=self.device)
arg2 = arg1.clone()
correct1 = fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(arg1, arg2))
def test_input_mutation4(self):
def fn(a):
torch.relu_(a)
return a
arg1 = torch.randn([1, 64], device=self.device)
arg2 = arg1.clone()
correct1 = fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
actual1 = opt_fn(arg2)
self.assertTrue(same(actual1, correct1))
self.assertTrue(same(arg1, arg2))
def test_input_mutation5(self):
def fn(x):
tmp = x.ceil()
x.add_(10)
return tmp
opt_fn = torch.compile(fn)
a = torch.zeros((), dtype=torch.int64, device=self.device)
a_expect = a.clone()
expect = fn(a_expect)
a_actual = a.clone()
actual = opt_fn(a_actual)
self.assertEqual(a_expect, a_actual)
self.assertEqual(expect, actual)
def test_slice_mutation1(self):
def fn(a):
x = torch.zeros_like(a)
b = x + 1
x[:, 3] = 3.0
c = torch.clone(x)
x[4, :] = 4.0
d = x + 1
return x, b, c, d
self.common(fn, (torch.randn([8, 8]),))
@skip_if_gpu_halide # accuracy issue
def test_slice_mutation2(self):
def fn(a):
a[:, 20:40] = a[:, 20:40] + 1
a[:, 2:11] = a[:, 1:10] + 2
arg1 = torch.randn([1, 64], device=self.device)
arg2 = arg1.clone()
fn(arg1)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
opt_fn(arg2)
self.assertTrue(same(arg1, arg2))
def test_slice_mutation3(self):
def fn(a):
a[:2, :2].fill_(10)
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
x1 = torch.randn(8, 8, device=self.device)
x2 = x1.clone()
fn(x1)
opt_fn(x2)
self.assertEqual(x1, x2)
def test_tensor_index_slice(self):
def fn(a):
x = torch.tensor([1, 2], device=self.device)
y = torch.tensor([2, 3], device=self.device)
xx = torch.tensor([1, 2], device=self.device).view(1, 2)
yy = torch.tensor([1, 2, 3], device=self.device).view(3, 1)
return [
a[x, y],
a[:, x, y],
a[:, x, y, :],
a[x, :, y],
a[:, x, :, y, :],
a[xx, yy],
a[:, xx, yy],
a[xx, :, yy],
a[xx, yy, :],
a[:, xx, :, yy],
]
a = torch.arange(3 * 4 * 5 * 6 * 7, device=self.device).view(3, 4, 5, 6, 7)
refs = fn(a)
tests = torch.compile(fn)(a)
for ref, test in zip(refs, tests):
torch.testing.assert_close(ref, test)
@torch._dynamo.config.patch(recompile_limit=10)
def test_tensor_index_put_slice(self):
def fn(a, version):
x = torch.tensor([1, 2], device=self.device, dtype=torch.int32)
y = torch.tensor([2, 3], device=self.device, dtype=torch.int32)
xx = torch.tensor([1, 2], device=self.device).view(1, 2)
yy = torch.tensor([1, 2, 3], device=self.device).view(3, 1)
if version == 0:
a[x, y] = torch.zeros_like(a[x, y])
elif version == 1:
a[:, x, y] = torch.zeros_like(a[:, x, y])
elif version == 2:
a[:, x, y, :] = torch.zeros_like(a[:, x, y, :])
elif version == 3:
a[x, :, y] = torch.zeros_like(a[x, :, y])
elif version == 4:
a[:, x, :, y, :] = torch.zeros_like(a[:, x, :, y, :])
elif version == 5:
a[xx, yy] = torch.zeros_like(a[xx, yy])
elif version == 6:
a[:, xx, yy] = torch.zeros_like(a[:, xx, yy])
elif version == 7:
a[xx, :, yy] = torch.zeros_like(a[xx, :, yy])
elif version == 8:
a[xx, yy, :] = torch.zeros_like(a[xx, yy, :])
elif version == 9:
a[:, xx, :, yy] = torch.zeros_like(a[:, xx, :, yy])
return a
a = torch.arange(3 * 4 * 5 * 6 * 7, device=self.device, dtype=torch.int32).view(
3, 4, 5, 6, 7
)
for i in range(10):
ref = fn(torch.clone(a), i)
test = torch.compile(fn)(torch.clone(a), i)
torch.testing.assert_close(ref, test)
def test_indirect_load_broadcast(self):
def fn(in_ptr0, in_ptr1, in_ptr2):
return torch.gather(in_ptr1, 0, in_ptr2) + in_ptr0
arg190 = rand_strided((32, 21), (1, 32), device=self.device, dtype=torch.int64)
arg190.fill_(0)
arg111 = rand_strided(
(9521, 512), (512, 1), device=self.device, dtype=torch.float32
)
self.common(
fn,
(
torch.randn(32, 1),
arg111,
arg190,
),
)
def test_roi_align(self):
if not has_torchvision_roi_align():
raise unittest.SkipTest("requires torchvision")
def fn(a, b):
return torch.ops.torchvision.roi_align(a, b, 0.25, 7, 7, 2, False)
self.common(fn, (torch.zeros([4, 256, 296, 304]), torch.zeros([2292, 5])))
# https://github.com/halide/Halide/issues/8256
@config.patch("halide.scheduler_cuda", "Li2018")
def test_nll_loss_forward(self):
def fn(a, b):
return aten.nll_loss_forward(a, b, None, 1, -100)
labels = (
torch.zeros([5], dtype=torch.int64),
torch.tensor([-100, -100, 3, -100, -100], dtype=torch.int64),
)
inps = (torch.randn(5, 5), torch.randn(5, 5))
for a, b in zip(inps, labels):
self.common(
fn,
(a, b),
)
@xfail_if_mps # dtypes mismatch
def test_nll_loss_backward(self):
def fn(a, b, c):
return aten.nll_loss_backward(
a, b, c, None, 1, -100, torch.tensor(1.0, device=self.device)
)
labels = (
torch.zeros([5], dtype=torch.int64),
torch.tensor([-100, -100, 3, -100, -100], dtype=torch.int64),
)
inps = (torch.randn(5, 5), torch.randn(5, 5))
grad_outs = (torch.randn(()), torch.randn(()))
for a, b, c in zip(grad_outs, inps, labels):
self.common(
fn,
(a, b, c),
)
def test_isinf(self):
def fn(x):
return x.isinf(), x.isnan()
values = [1, float("inf"), 2, float("-inf"), float("nan")]
for dtype in [torch.float32, torch.float64, torch.half, torch.bfloat16]:
ctx = (
contextlib.nullcontext()
if self.is_dtype_supported(dtype)
else self.assertRaises(TypeError)
)
with ctx:
self.common(fn, [torch.tensor(values, dtype=dtype)], check_lowp=False)
@skip_if_halide # different nan behavior in ==
def test_isinf2(self):
def fn(x):
y = torch.tensor(
[1, float("inf"), 2, float("-inf"), float("nan")], device=self.device
)
return x == y
self.common(
fn, (torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]),)
)
def test_any(self):
def fn(x):
return (
x.any(-1),
x.isinf().any(),
torch.all(x.isinf(), dim=0),
torch.all(torch.logical_not(x.isinf())),
)
self.common(fn, [-torch.rand(64)])
tmp = torch.randn(16, 8)
tmp[1, 1] = float("inf")
self.common(fn, [tmp])
@skip_if_gpu_halide
def test_multilayer_any(self):
def fn(x):
return (x.isinf().any(), x.isfinite().all())
sample = torch.rand(9, 3, 353, 353)
self.common(fn, [sample])
sample.view(-1)[-1] = float("inf")
self.common(fn, [sample])
def test_inplace_activations(self):
def fn(x):
a = aten.hardswish_(x + 1)
b = aten.hardtanh_(x + 1)
c = aten.leaky_relu_(x + 1)
d = aten.silu_(x + 1)
e = aten.log1p(x + 1)
f = aten.masked_fill_(x + 1, torch.zeros_like(x, dtype=torch.bool), 99.0)
h = aten.masked_fill_(x + 1, torch.ones_like(x, dtype=torch.bool), 99.0)
return (a, b, c, d, e, f, h)
self.common(fn, [torch.randn(64) * 10])
def test_baddbmm(self):
def fn(a, b, c, beta):
return aten.baddbmm(a, b, c, beta=beta)
b = torch.randn(6, 128, 64)
c = torch.randn(6, 64, 100)
options = itertools.product(
[torch.randn(6, 1, 100), torch.randn(6, 1, 100).fill_(torch.nan)],
[0.0, 1.0],
)
for a, beta in options:
self.common(
fn,
[a, b, c, beta],
# Mismatched elements: 1212 / 76800 (1.6%)
# Greatest absolute difference: 0.001953125 at index (0, 0, 93) (up to 1e-05 allowed)
# Greatest relative difference: 1.0 at index (3, 19, 4) (up to 0.001 allowed)
atol=0.002,
rtol=0.001,
)
@config.patch({"triton.max_tiles": 2})
def test_fuse_tiled(self):
def fn(a, b, c):
return a + b, c + 1
self.common(
fn, [torch.randn(128, 1), torch.randn(1, 128), torch.randn(128, 128)]
)
def test_expand_as(self):
def fn(a, b):
return aten.expand_as(a, b), aten.expand_as(a + 1, b + 1) + 1
self.common(
fn,
[
torch.randn(6, 1, 100),
torch.randn(6, 128, 100),
],
)
def test_index_put1(self):
def fn(a, b, c):
return (
torch.index_put(a, [b], c),
torch.index_put_(a + 1, [b + 1], c + 1) + 1,
)
self.common(
fn,
[
torch.randn([800, 256, 7, 7]),
torch.randperm(601),
torch.randn([601, 256, 7, 7]),
],
)
self.common(
fn, [torch.randn(1024, 4, 2), torch.arange(4), torch.randn(4, 1, 1)]
)
def test_index_put2(self):
def fn(a, b, c):
return torch.index_put(a, [b], c, True)
self.common(
fn,
[
torch.randn([100, 256, 7, 7]),
torch.randint(0, 100, size=[600], dtype=torch.int64),
torch.randn([600, 256, 7, 7]),
],
# workaround for https://github.com/triton-lang/triton/issues/558
check_lowp=False,
)
def test_index_put3(self):
def fn(a, b, c):
torch.ops.aten.index_put_(a, (None, b, None), c)
a1 = a + 1
torch.ops.aten.index_put_(a1, (None, b + 1, None), c + 1)
return (a, a1)
self.common(
fn,
[
torch.randn([1024, 4, 2]),
torch.arange(3),
torch.randn([1024, 1, 2]),
],
)
def test_index_put4(self):
# a, b[0] are not broadcastable
# https://github.com/pytorch/pytorch/issues/97104
def fn(a, b, c):
return torch.index_put(a, [b], c)
self.common(
fn,
[
torch.rand([8, 2]),
torch.rand([8]) > 0.5,
torch.rand([]),
],
)
def test_index_put_as_masked_fill(self):
def fn(a, b, c, d):
a = a.clone()
torch.ops.aten.index_put_(a, [b], c, d)
return a
self.common(
fn,
(
torch.randn([1024, 4, 2]),
torch.randn([1024, 4, 2]) > 0,
torch.randn([]),
False,
),
)
self.common(
fn,
(
torch.randn([1024, 4, 2]),
torch.randn([1024, 4, 2]) > 0,
torch.randn([]),
True,
),
)
def test_index_put_fallback1(self):
def fn(a, b, c, d):
a = a.clone()
torch.ops.aten.index_put_(a, [b], c, d)
return a
self.common(
fn,
(
torch.randn([3]),
torch.as_tensor([True, True, False]),
torch.randn([2]),
False,
),
)
self.common(
fn,
(
torch.randn([3]),
torch.as_tensor([True, True, False]),
torch.randn([2]),
True,
),
)
def test_index_put_fallback2(self):
def fn(a, b, c, d, e):
a = a.clone()
torch.ops.aten.index_put_(a, [None, b, c], d, e)
return a
self.common(
fn,
(
torch.randn([1, 2, 3]),
torch.as_tensor([0, 1]),
torch.as_tensor([True, True, False]),
torch.randn([]),
False,
),
)
self.common(
fn,
(
torch.randn([1, 2, 3]),
torch.as_tensor([0, 1]),
torch.as_tensor([True, True, False]),
torch.randn([]),
True,
),
)
def test_index_put_deterministic_fallback(self):
with DeterministicGuard(True):
def fn(a, b, c):
return torch.index_put(a, [b], c, True)
self.common(
fn,
[
torch.randn([100, 32]),
torch.randint(0, 100, size=[600], dtype=torch.int64),
torch.randn([600, 32]),
],
check_lowp=False,
)
@skip_if_gpu_halide # https://github.com/halide/Halide/issues/8312
def test_index_put_index(self):
def fn(ind, x, src):
y = torch.ops.aten.index_put.default(x, [ind], src)
return torch.ops.aten.index.Tensor(y, [ind])
args = [torch.tensor([1], dtype=torch.int64), torch.randn(8, 4), torch.randn(4)]
self.common(fn, args)
def test_index_put_reinplace(self):
def fn(x, idx):
src = torch.ones(idx.size(0), device=x.device)
x.index_put_((idx,), src)
return x.expand((2, x.shape[0]))
a = torch.randn(1024)
idx = torch.arange(10)
torch._inductor.metrics.generated_kernel_count = 0
self.common(fn, (a, idx))
assertGeneratedKernelCountEqual(self, 1)
def test_index_put_failed_reinplace(self):
def fn(x, idx):
src = torch.ones(idx.size(0), device=x.device)
y = x.index_put((idx,), src)
return x, y
a = torch.randn(1024)
idx = torch.arange(10)
torch._inductor.metrics.generated_kernel_count = 0
self.common(fn, (a, idx))
assertGeneratedKernelCountEqual(self, 2)
def test_adding_tensor_offsets(self):
@torch.compile(fullgraph=True)
def fn(x):
return x[16:32]
with torch.no_grad():
x = torch.randn(1024, device=self.device)
self.assertEqual(fn(x[0:]), x[16:][:16])
self.assertEqual(fn(x[128:]), x[128 + 16 :][:16])
def test_index_float_zero(self):
def fn(arg0, arg1, arg2):
t1 = torch.tanh(arg0)
t2 = t1.clone()
t2.fill_(arg1.item())
t3 = torch.clamp(t2, 0, arg2.size(0) - 1).to(torch.long)
return torch.nn.functional.embedding(t3, arg2)
arg0 = torch.randint(0, 1000, [47], dtype=torch.int64, device=self.device)
arg1 = torch.randint(0, 1000, [], dtype=torch.int64, device=self.device)
arg2 = torch.rand([256, 88], dtype=torch.float16, device=self.device)
cfn = torch.compile(fullgraph=True, dynamic=True)(fn)
self.assertEqual(fn(arg0, arg1, arg2), cfn(arg0, arg1, arg2))
# from GPT2ForSequenceClassification
@skip_if_gpu_halide
def test_index_tensor(self):
def fn(x, y):
ne = torch.ops.aten.ne.Scalar(x, 0)
sum = torch.ops.aten.sum.dim_IntList(ne, [-1])
sub = torch.ops.aten.sub.Tensor(sum, 1)
iota = torch.ops.prims.iota.default(
1,
start=0,
step=1,
dtype=torch.int64,
device=x.device,
requires_grad=False,
)
return torch.ops.aten.index.Tensor(y, [iota, sub])
self.common(fn, [torch.randn(1, 1024), torch.randn(1, 1024, 2)])
@config.patch(fallback_random=True)
def test_bernoulli1(self):
def fn(a):
b = a.clone()
# aten.bernoulli_() uses aten.bernoulli.p() behind the scene, so it will be decomposed.
return aten.bernoulli_(b).sum() / torch.prod(torch.tensor(a.size()))
p = 0.3
self.common(
fn,
[
torch.ones(200, 200) * p,
],
atol=p * 0.06,
rtol=0.06,
)
@skip_if_triton_cpu
def test_bernoulli2(self):
def fn(a):
return aten.bernoulli(a).sum() / torch.prod(torch.tensor(a.size()))
p = 0.3
self.common(
fn,
[torch.ones(200, 200) * p],
atol=p * 0.06,
rtol=0.06,
)
def test_narrow(self):
def fn(x):
return (
aten.narrow(x, 1, 10, 16),
aten.narrow(x + 2, 0, 10, 16) + 1,
aten.narrow_copy(x, 1, 10, 16),
)
self.common(fn, [torch.randn(64, 64)])
def test_as_strided(self):
def fn(x):
return (
aten.as_strided(x, (8, 8, 64), (8 * 64, 64, 1), 0),
aten.as_strided(x + 1, (8, 8, 64), (8 * 64, 64, 1), 0) + 2,
)
def fn_channels_last(x):
return (
aten.as_strided(
x, (8, 384, 2, 20, 12), (153600, 1, 61440, 384, 7680), 0
),
aten.as_strided(
x + 1, (8, 384, 2, 20, 12), (153600, 1, 61440, 384, 7680), 0
)
+ 2,
)
self.common(fn, [torch.randn(64, 64)])
self.common(
fn_channels_last,
[torch.randn(8, 384, 20, 20).to(memory_format=torch.channels_last)],
)
def test_exact_stride(self):
full = torch.randn((16, 16), device=self.device)
view = torch.as_strided(full, (16, 8), full.stride())
def fn(x):
result = x + x
result_strided = torch.empty_strided(
x.size(), x.stride(), device=self.device
)
result_strided[:] = result
return result_strided
self.common(fn, [view])
reference_out = fn(view)
compiled_fn = torch.compile(fn)
actual_out = compiled_fn(view)
self.assertEqual(reference_out.stride(), actual_out.stride())
def test_like_channels_last(self):
def foo():
randn = torch.randn((4, 3, 8, 8), device=self.device, dtype=torch.float32)
xc = randn.contiguous(memory_format=torch.channels_last)
clone = torch.zeros_like(xc, memory_format=torch.preserve_format)
rand_like = torch.rand_like(randn)
return (xc, clone, rand_like)
out = foo()
out_comp = torch.compile()(foo)()
for t, t_comp in zip(out, out_comp):
self.assertEqual(t.stride(), t_comp.stride())
def test_as_strided_scatter(self):
def fn(a, b):
return aten.as_strided_scatter(
a * 8 + 10,
b * 2 - 4,
size=(a.shape[0], a.shape[1] // 2),
stride=(a.shape[1], 2),
storage_offset=0,
)
self.common(fn, [torch.randn(10, 1024), torch.randn(10, 512)])
def test_select_scatter(self):
def fn(x, a, b):
return (
aten.select_scatter(x, a, 1, 0),
aten.select_scatter(x, b, 0, 1),
)
self.common(
fn,
[
torch.randn(8, 197, 38),
torch.randn(8, 38),
torch.randn(197, 38),
],
)
@skip_if_gpu_halide # accuracy issue
def test_slice_scatter(self):
def fn(x, a):
return (
aten.slice_scatter(x, a, 2, 10, -10),
aten.slice_scatter(x, a[:, :, :40], 2, 10, -10, 2),
)
self.common(
fn,
[
torch.randn(4, 8, 100),
torch.randn(4, 8, 80),
],
)
def test_slice_scatter2(self):
def fn(a, b):
return aten.slice_scatter(a, b, 0, 0, 9223372036854775807)
self.common(
fn,
[
torch.randn([8, 197, 384]),
torch.randn([8, 197, 384]),
],
)
def test_slice_scatter3(self):
def fn(a, b):
return aten.slice_scatter.default(a, b, 1, 1, 9223372036854775807, 2)
self.common(
fn,
[
torch.randn([1, 4]),
torch.randn([1, 2]),
],
)
def test_slice_scatter4(self):
def fn(a, b):
return aten.slice_scatter.default(a, b, 1, 2, 9223372036854775807, 3)
self.common(
fn,
[
torch.randn([1, 9]),
torch.randn([1, 3]),
],
)
def test_slice_scatter5(self):
# empty slices that require clamping the start or end
def fn(a, b):
return (
aten.slice_scatter.default(a, b, 0, 2, 0, 1),
aten.slice_scatter.default(a, b, 0, a.shape[0], a.shape[0] + 10, 1),
aten.slice_scatter.default(a, b, 0, -20, 0, 1),
aten.slice_scatter.default(a, b, 0, -20, -16, 1),
)
a = torch.arange(10, dtype=torch.float)
b = torch.empty(0)
self.common(fn, [a, b])
@with_tf32_off
def test_slice_scatter_reinplace(self):
class M(nn.Module):
def __init__(self, device):
super().__init__()
self.linear1 = nn.Linear(64, 64, bias=False)
self.cache_k = torch.zeros((56, 384, 8, 64), device=device)
def forward(self, x, start_pos):
bsz, seqlen, _, _ = x.shape
xk = self.linear1(x)
with torch.no_grad():
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
keys = self.cache_k[:bsz, : start_pos + seqlen]
scores = torch.matmul(
xk.transpose(1, 2), keys.transpose(1, 2).transpose(2, 3)
)
return scores
kv_cache_module = M(self.device)
inp = torch.randn(1, 32, 8, 64)
# Test that the cache update is reinplaced such that the cache is updated inplace
# rather than copy-scatter-copy-back.
torch._inductor.metrics.generated_kernel_count = 0
with torch.no_grad():
self.common(kv_cache_module, (inp, 1), check_lowp=False)
if (
config.triton.native_matmul
and config.cuda_backend == "triton"
and self.device == "cuda"
):
assertGeneratedKernelCountEqual(self, 2)
else:
assertGeneratedKernelCountEqual(self, 1)
@skipIfMPS
def test_slice_scatter_dtype_consistency(self):
# Test dtype consistency of slice_scatter
def fn(x, y):
return torch.slice_scatter(y, x, 0)
for dtype in [
torch.int64,
torch.float64,
]:
self.common(
fn,
[
torch.tensor([0], dtype=dtype),
torch.tensor([0], dtype=torch.float32),
],
)
@skip_if_gpu_halide # compile error on gpu
def test_scatter1(self):
def fn(a, dim, index, b):
return aten.scatter(a, dim, index, b)
self.common(
fn,
[
torch.zeros(2, 3),
-1,
torch.tensor([[0]]),
torch.ones(2, 3),
],
)
def test_scatter2(self):
if self.device == "cuda":
raise unittest.SkipTest("unstable on sm86")
check_lowp = True
if self.device == "xpu":
check_lowp = False
def fn(a, dim, index, b):
return aten.scatter.reduce(a, dim, index, b, reduce="add")
self.common(
fn,
[
torch.zeros(64, 512),
0,
torch.zeros((64, 512), dtype=torch.int64),
torch.ones(64, 512),
],
check_lowp=check_lowp,
)
def test_scatter3(self):
def fn(a, dim, index, b):
return aten.scatter(a, dim, index, b, reduce="add")
check_lowp = True
if self.device == "xpu":
check_lowp = False
self.common(
fn,
[
torch.randn(5, 29, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
0.8, # src can be a scalar
],
# Mismatched elements: 1 / 1885 (0.1%)
# Greatest absolute difference: 0.00018310546875 at index (0, 0, 3) (up to 1e-05 allowed)
# Greatest relative difference: 0.0022371364653243847 at index (0, 0, 3) (up to 0.001 allowed)
atol=2e-4,
rtol=1e-3,
check_lowp=check_lowp,
)
def test_scatter4(self):
def fn(x, ind, src):
return torch.scatter(x, 0, ind, src)
check_lowp = True
if self.device == "xpu":
check_lowp = False
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self.common(
fn,
[
torch.randn(196, 992),
torch.randint(196, (1, 992)),
torch.randn(1, 992),
],
check_lowp=check_lowp,
)
def test_scatter5(self):
def fn(a, dim, index, b, reduce):
a = a.clone()
a.scatter_(dim, index, b, reduce=reduce)
a1 = a + 1.0
a1.scatter_(dim, index, b, reduce=reduce)
return (a, a1)
check_lowp = True
if self.device == "xpu":
check_lowp = False
for reduce in ["add", "multiply"]:
self.common(
fn,
[
torch.ones((4, 5)),
0,
torch.tensor([[1], [2], [3]], dtype=torch.int64),
torch.randn(4, 5),
reduce,
],
check_lowp=check_lowp,
)
def test_scatter6(self):
def fn(a, dim, index, b):
return aten.scatter(a, dim, index, b)
check_lowp = True
if self.device == "xpu":
check_lowp = False
for deterministic in [False, True]:
with DeterministicGuard(deterministic):
self.common(
fn,
[
torch.randn(5, 8, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
0.8, # src can be a scalar
],
check_lowp=check_lowp,
)
@unittest.skip("Flaky test, needs debugging")
def test_scatter_add1(self):
def fn(a, dim, index, b):
return aten.scatter_add(a, dim, index, b)
check_lowp = True
if self.device == "xpu":
check_lowp = False
self.common(
fn,
[
torch.randn(2, 3),
0,
torch.tensor([[0]]),
torch.randn(2, 3),
],
check_lowp=check_lowp,
)
def test_scatter_add2(self):
def fn(a, dim, index, b):
return aten.scatter_add(a, dim, index, b)
check_lowp = True
if self.device == "xpu":
check_lowp = False
self.common(
fn,
[
torch.randn(2, 3),
0,
torch.tensor([[0, 0, 0], [1, 1, 1]]),
torch.randn(2, 3),
],
check_lowp=check_lowp,
)
def test_scatter_add3(self):
def fn(a, dim, index, b):
return aten.scatter_add(a, dim, index, b)
check_lowp = True
if self.device == "xpu":
check_lowp = False
for deterministic in [False, True]:
if deterministic and self.device == "xpu":
# There is no deterministic implementation for scatter_add on Intel GPU.
continue
with DeterministicGuard(deterministic):
self.common(
fn,
[
torch.randn(5, 29, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
torch.randn(1, 1, 10),
],
check_lowp=check_lowp,
)
def test_scatter_reduce1(self):
def fn(a, dim, index, b):
return aten.scatter_reduce(a, dim, index, b, "sum")
check_lowp = True
if self.device == "xpu":
check_lowp = False
self.common(
fn,
[
torch.randn(5, 29, 13),
2,
torch.tensor([[[3, 5, 7, 9]]]),
torch.randn(1, 1, 10),
],
check_lowp=check_lowp,
)
def test_scatter_reduce2(self):
def fn(a, dim, index, b, reduce):
return aten.scatter_reduce(a, dim, index, b, reduce, include_self=False)
check_lowp = True
if self.device == "xpu":
check_lowp = False
for reduce in ["sum", "amax"]:
self.common(
fn,
[
torch.randn(2, 3),
0,
torch.zeros((2, 3), dtype=torch.int64),
torch.randn(2, 3),
reduce,
],
check_lowp=check_lowp,
)
def test_scatter_reduce3(self):
def fn(a, dim, index, b, reduce):
a = a.clone()
a.scatter_reduce_(dim, index, b, reduce=reduce)
a1 = a + 1.0
a1.scatter_reduce_(dim, index, b, reduce=reduce)
return (a, a1)
check_lowp = True
if self.device == "xpu":
check_lowp = False
for reduce in ["sum", "prod"]:
self.common(
fn,
[
torch.ones((4, 5)),
0,
torch.tensor([[1], [2], [3]], dtype=torch.int64),
torch.randn(4, 5),
reduce,
],
check_lowp=check_lowp,
)
@skip_if_gpu_halide
def test_dense_mask_index(self):
r"""
There will be a little difference for reduce order between aten and inductor
https://github.com/pytorch/pytorch/pull/122289
Absolute difference: 0.00067138671875 (up to 1e-05 allowed)
Relative difference: 3.1747371732500974e-06 (up to 1.3e-06 allowed)
"""
kwargs = {}
if self.device == "cpu":
kwargs["atol"] = 1e-4
kwargs["rtol"] = 1.3e-5
def fn(x, y):
y = torch.ops.aten.select.int(y, 0, 2)
z = x * y
return z.sum()
self.common(fn, [torch.randn(102400), torch.randn(3)], **kwargs)
def test_empty1(self):
def fn():
return torch.empty((1, 128, 128))
self.common(fn, [], assert_equal=False)
def test_empty2(self):
def fn():
return aten.empty((1, 128, 128))
self.common(fn, [], assert_equal=False)
def test_new_empty(self):
def fn(a):
return aten.new_empty(a, [1, 128, 128])
self.common(fn, [torch.randn(55)], assert_equal=False)
def test_empty_strided(self):
def fn():
return aten.empty_strided([1, 128, 128], [16384, 128, 1])
self.common(fn, [], assert_equal=False)
def test_new_empty_strided(self):
def fn(a):
return aten.new_empty_strided(a, [1, 128, 128], [16384, 128, 1])
self.common(fn, [torch.randn(55)], assert_equal=False)
def test_dropout_trivial_0(self):
def fn1(a):
return torch.nn.functional.dropout(a, 0.0, True) + a
self.common(fn1, [torch.randn(55)])
def test_dropout_trivial_1(self):
def fn2(a):
return torch.nn.functional.dropout(a, 1.0, True) + a
self.common(fn2, [torch.randn(55)])
@config.patch({"triton.cudagraphs": True})
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_dropout(self):
random.seed(1234)
torch.manual_seed(1234)
@torch.compile(backend="inductor")
def fn1(a):
return torch.nn.functional.dropout(a)
x = torch.ones(1000, device=self.device, dtype=torch.float32)
result1 = fn1(x)
self.assertTrue(400 < result1.nonzero().shape[0] < 600)
self.assertTrue(0.9 < result1.mean().item() < 1.1)
random.seed(1234)
torch.manual_seed(1234)
@torch.compile(backend="inductor")
def fn2(a):
return torch.nn.functional.dropout(a, 0.5, True)
result2 = fn2(x)
self.assertTrue(400 < result2.nonzero().shape[0] < 600)
self.assertTrue(0.9 < result2.mean().item() < 1.1)
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_dropout_deterministic(self):
@torch.compile(backend="inductor")
def fn(a):
return torch.nn.functional.dropout(a, 0.55, True)
for cg in [False, True]:
with patch.object(config.triton, "cudagraphs", cg):
torch._dynamo.reset()
x = torch.ones(1024, device=self.device, dtype=torch.float32)
torch.manual_seed(1234)
a0 = fn(x).clone()
a1 = fn(x).clone()
a2 = fn(x).clone()
torch.manual_seed(1234)
b0 = fn(x).clone()
b1 = fn(x).clone()
b2 = fn(x).clone()
# same seed, same values
self.assertTrue(torch.allclose(a0, b0))
self.assertTrue(torch.allclose(a1, b1))
self.assertTrue(torch.allclose(a2, b2))
# different calls, different values
self.assertFalse(torch.allclose(a0, a1))
self.assertFalse(torch.allclose(a1, a2))
def test_rand_like_deterministic(self):
@torch.compile(backend="inductor")
def fn(a):
return torch.rand_like(a), torch.rand_like(a)
x = torch.ones(1024, device=self.device, dtype=torch.float32)
torch.manual_seed(1234)
a0 = fn(x)[0].clone()
a1 = fn(x)[0].clone()
a2 = fn(x)[0].clone()
torch.manual_seed(1234)
b0 = fn(x)[0].clone()
b1 = fn(x)[0].clone()
b2 = fn(x)[0].clone()
# same seed, same values
self.assertTrue(torch.allclose(a0, b0))
self.assertTrue(torch.allclose(a1, b1))
self.assertTrue(torch.allclose(a2, b2))
# different calls, different values
self.assertFalse(torch.allclose(a0, a1))
self.assertFalse(torch.allclose(a1, a2))
c, d = fn(x)
self.assertFalse(torch.allclose(c, d))
self.assertTrue((c >= 0).all())
self.assertTrue((c < 1).all())
self.assertTrue((d >= 0).all())
self.assertTrue((d < 1).all())
@config.patch(implicit_fallbacks=True)
def test_needs_contiguous_strides(self):
# Construct a custom op whose output strides are not contiguous
@torch.library.custom_op("mylib::myop", mutates_args={})
def myop(x: torch.Tensor) -> torch.Tensor:
return torch.zeros(2, 2).t()
@myop.register_fake
def _(x):
return torch.zeros(2, 2).t()
# custom op that needs contiguous inputs
@torch.library.custom_op(
"mylib::second_op",
mutates_args={},
tags=[torch._C.Tag.needs_contiguous_strides],
)
def second_op(x: torch.Tensor) -> torch.Tensor:
assert x.is_contiguous()
return torch.ones(2, 2)
@second_op.register_fake
def _(x):
return torch.ones(2, 2)
def f(x):
y = myop(x)
return second_op(y)
# Check that the x.is_contiguous() assertion never gets triggered
x = torch.randn(2, 2)
_ = torch.compile(f, backend="inductor", fullgraph=True)(x)
@config.patch(implicit_fallbacks=True)
def test_fallback_mutable_op_basic(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as m:
def impl(a, b, c, d, e=2):
a.add_(b[0] * c * e)
if d is not None:
d.add_(b[1])
m.define(
"inplace_(Tensor(a!) a, Tensor[] b, SymInt c, *, Tensor(b!)? d, SymInt e=2) -> ()"
)
m.impl("inplace_", impl, "CompositeExplicitAutograd")
# We do some clones and copy_ to test that Inductor doesn't reorder
# the copy_ w.r.t. inplace_.
def f(a, b1, b2, c, d):
a_ = a.clone()
d_ = d if d is None else d.clone()
torch.ops.mylib.inplace_(a_, (b1, b2), c, d=d_)
a.copy_(a_)
if d is not None:
d.copy_(d_)
return ()
a = torch.tensor([0.0, 1.0, 2])
b = [torch.tensor([2.0, 3.0, 5.0]), torch.tensor([1.0, 4.0, 6.0])]
c = 4
d = torch.tensor([2.0, 1, 0])
args = (a, b[0], b[1], c, d)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
mod = make_fx(f)(*cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_f = compile_fx_inner(mod, cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_f(list(cloned_args))
f(*args)
self.assertEqual(cloned_args, args)
@skip_if_cpp_wrapper(
"Without major redesign, cpp_wrapper will not support custom ops that are "
"defined in Python."
)
@config.patch(implicit_fallbacks=True)
def test_fallback_mutable_op_list_tensor(self):
@torch.library.custom_op(
"mylib::mysin",
mutates_args=["out_list"],
schema="(Tensor x, Tensor(a!)[]? out_list) -> Tensor",
)
def mysin(x, out_list) -> torch.Tensor:
r = x.sin()
if out_list is not None:
out_list[0].copy_(r)
return r
@mysin.register_fake
def _(x, out_list) -> torch.Tensor:
return torch.empty_like(x)
def fn(x):
x = x * 3
s = [torch.empty_like(x)]
x = mysin(x, s)
x = x / 3
return x, s[0]
x = torch.randn(3, requires_grad=False)
expected = fn(x)
result = torch.compile(fn, fullgraph=True)(x)
self.assertEqual(result, expected)
@config.patch(implicit_fallbacks=True)
def test_fallback_mutable_op_with_return(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as m:
def impl(a, b, c, d, e=2):
a.add_(b[0] * c * e)
if d is not None:
d.add_(b[1])
return b[0] + b[1]
m.define(
"inplace_(Tensor(a!) a, Tensor[] b, SymInt c, *, Tensor(b!)? d, SymInt e=2) -> Tensor"
)
m.impl("inplace_", impl, "CompositeExplicitAutograd")
# We do some clones and copy_ to test that Inductor doesn't reorder
# the copy_ w.r.t. inplace_.
def f(a, b0, b1, c, d):
a_ = a.clone()
d_ = d if d is None else d.clone()
res = torch.ops.mylib.inplace_(a_, (b0, b1), c, d=d_)
a.copy_(a_)
if d is not None:
d.copy_(d_)
return (res,)
a = torch.tensor([0.0, 1.0, 2])
b = [torch.tensor([2.0, 3.0, 5.0]), torch.tensor([1.0, 4.0, 6.0])]
c = 4
d = torch.tensor([2.0, 1, 0])
args = (a, b[0], b[1], c, d)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
mod = make_fx(f)(*cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_f = compile_fx_inner(mod, cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_out = compiled_f(list(cloned_args))
out = f(*args)
self.assertEqual(cloned_args, args)
self.assertEqual(compiled_out, out)
@config.patch(implicit_fallbacks=True)
def test_fallback_mutable_op_no_mutated_tensors(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as m:
def impl(a, b):
if b is not None:
b.add_(1)
m.define("inplace_(Tensor a, Tensor(b!)? b) -> ()")
m.impl("inplace_", impl, "CompositeExplicitAutograd")
def f(a):
torch.ops.mylib.inplace_(a, None)
return ()
a = torch.tensor([0.0, 1.0, 2])
args = (a,)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
mod = make_fx(f)(*cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_f = compile_fx_inner(mod, cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_f(list(cloned_args))
f(*args)
self.assertEqual(cloned_args, args)
@config.patch(implicit_fallbacks=True)
@skip_if_cpp_wrapper(
"Without major redesign, cpp_wrapper will not support custom ops that are "
"defined in Python."
)
def test_fallback_mutable_op_list(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as m:
def impl(a, b):
for bi in b:
bi.add_(a)
m.define("inplace_(Tensor a, Tensor(a!)[] b) -> ()")
m.impl("inplace_", impl, "CompositeExplicitAutograd")
def f(a, b):
torch.ops.mylib.inplace_(a, b)
return None
a = torch.tensor([0.0, 1.0, 2])
b = [torch.tensor([2.0, 3.0, 5.0]), torch.tensor([1.0, 4.0, 6.0])]
args = (a, b)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
mod = make_fx(f)(*cloned_args)
cloned_args = pytree.tree_map_only(torch.Tensor, torch.clone, args)
compiled_f = compile_fx_inner(mod, cloned_args)
@torch.library.custom_op("mylib::sin_out", mutates_args={"outs"})
def sin_out(x: torch.Tensor, outs: list[torch.Tensor]) -> None:
x_np = x.numpy()
assert len(outs) == 2
out_np0 = out[0].numpy()
out_np1 = out[1].numpy()
np.sin(x_np, out=out_np0)
np.sin(x_np, out=out_np1)
@torch.compile
def g(x):
outs = [torch.empty_like(x) for _ in range(2)]
sin_out(x, outs)
return outs
x = torch.randn(3)
out = [torch.empty_like(x) for _ in range(2)]
y = g(x)
@xfail_if_mps_unimplemented # rng_prims not supported for MPS
def test_functionalize_rng_wrappers(self):
# Ideally, we would like to use torch.compile for these operators. But
# currently the plan is to introduce these operators at the partitioner
# level, obviating the need to support them fully through the
# torch.compile stack. To ensure that we have good enough debugging with
# minifiers, we have ensure that they work with make_fx. This test uses
# make_fx to do the testing. In future, we can move on torch.compile.
def fn():
rng_state1, a1 = torch._prims.rng_prims.run_and_save_rng_state(
torch.ops.aten.rand.default,
[4, 4],
dtype=torch.float32,
device=self.device,
)
rng_state2, a2 = torch._prims.rng_prims.run_and_save_rng_state(
torch.ops.aten.rand.default,
[4, 4],
dtype=torch.float32,
device=self.device,
)
b1 = torch._prims.rng_prims.run_with_rng_state(
rng_state1,
torch.ops.aten.rand.default,
[4, 4],
dtype=torch.float32,
device=self.device,
)
b2 = torch._prims.rng_prims.run_with_rng_state(
rng_state2,
torch.ops.aten.rand.default,
[4, 4],
dtype=torch.float32,
device=self.device,
)
return (a1, a2, b1, b2)
mod = make_fx(fn)()
compiled_f = compile_fx_inner(mod, ())
a1, a2, b1, b2 = compiled_f(())
self.assertEqual(a1, b1)
self.assertEqual(a2, b2)
@patch.object(torch._functorch.config, "functionalize_rng_ops", True)
@expectedFailureXPU
@skip_if_gpu_halide # rand
@xfail_if_mps
def test_philox_rand(self):
if self.device == "cpu":
raise unittest.SkipTest(
f"functionalization of rng ops supported only on {GPU_TYPE}"
)
@torch.compile(backend="inductor")
def fn(x):
a = torch.rand_like(x) * x
a = torch.rand_like(x) * a
return a
def check(x):
torch.manual_seed(123)
a = fn(x)
torch.manual_seed(1234)
b = fn(x)
torch.manual_seed(123)
c = fn(x)
# same seed, same values
self.assertTrue(torch.allclose(a, c))
# different calls, different values
self.assertFalse(torch.allclose(a, b))
check(torch.ones(1024, device=self.device, dtype=torch.float32))
# Need comment: should we add "_get_rng_state_offset" to common device interface?
self.assertEqual(getattr(torch, self.device)._get_rng_state_offset(), 2048)
# Check non-multiple of 4 numel
check(torch.ones(3, device=self.device, dtype=torch.float32))
self.assertEqual(getattr(torch, self.device)._get_rng_state_offset(), 8)
# Already on by default, just want to make sure
@patch.object(torch._inductor.config, "allow_buffer_reuse", True)
def test_reuse_buffers_with_aliasing(self):
def f(x):
z = x + 1
z = torch.view_as_complex(z)
a = torch.view_as_real(z)
out = a + 1
return out, torch.view_as_real(z + 1)
self.common(f, (torch.zeros((4, 2)),))
code = run_and_get_triton_code(torch.compile(f), torch.zeros((4, 2)))
# Make sure that we haven't added complex support and made this test
# invalid. If we've added complex support please update the test to use
# a different set of view ops we don't lower
self.assertTrue("aten.view_as_real" in code)
def f2(x):
z = x + 1
z = torch.view_as_complex(z)
z = torch.view_as_real(z)
z = torch.view_as_complex(z)
a = torch.view_as_real(z)
out = a + 1
return out, torch.view_as_real(z + 1)
self.common(f, (torch.zeros((4, 2)),))
@xfail_if_triton_cpu # libdevice.fma
def test_softmax_backward_data(self):
def fn(a, b):
return aten._softmax_backward_data(a, b, dim=1, input_dtype=torch.float32)
self.common(
fn,
(
torch.randn(10, 10),
torch.randn(10, 10),
),
)
def test_randn_like_empty(self):
class Model(torch.nn.Module):
def __init__(
self,
):
super().__init__()
def forward(self, v1: torch.Tensor):
vx = v1.min(dim=1).values
v2 = torch.randn_like(vx)
return v2
model = Model()
x = torch.rand(10, 3, 0)
self.common(model, (x,), exact_stride=True)
def test_randint(self):
@torch.compile(fullgraph=True)
def fn(x):
return (
torch.randint(10, [1024], device=x.device),
torch.randint(-4, 7, [1024], dtype=torch.int32, device=x.device),
torch.randint_like(x, 2**50),
)
torch.manual_seed(12345)
a0, b0, c0 = fn(torch.zeros([40, 40], device=self.device))
self.assertEqual(a0.shape, [1024])
self.assertEqual(b0.shape, [1024])
self.assertEqual(c0.shape, [40, 40])
torch.manual_seed(12345)
a1, b1, c1 = fn(torch.zeros([40, 40], device=self.device))
self.assertEqual(a0, a1)
self.assertEqual(b0, b1)
self.assertEqual(c0, c1)
self.assertEqual(a0.min(), 0)
self.assertEqual(a0.max(), 9)
self.assertEqual(b0.min(), -4)
self.assertEqual(b0.max(), 6)
self.assertGreaterEqual(c0.min(), 0)
self.assertGreater(c0.max(), 2**40)
self.assertLess(c0.max(), 2**50)
def test_randint_distribution(self):
@torch.compile(fullgraph=True)
def fn(n_argsmax, size):
return torch.randint(n_max, (size,), device=self.device)
def bin(index, max_size):
return index // (max_size // n_bins)
size = 1_000_000
n_max = int(0.75 * 2**32)
n_bins = 8
res = fn(n_max, size)
bins = bin(res, n_max).float().cpu()
hist, _ = bins.histogram(8, range=(0, n_bins))
expected_bin = res.shape[0] / 8
expected_error = math.sqrt(expected_bin) / expected_bin * 3
error = (hist - expected_bin).abs().max() / expected_bin
self.assertTrue(error < expected_error)
@config.patch(fallback_random=True)
@xfail_if_mps # 100% are not close
def test_like_rands(self):
def fn(x):
return torch.rand_like(x), torch.randn_like(x), torch.randint_like(x, 1, 11)
self.common(fn, [torch.zeros([20, 20])], exact_stride=True)
@config.patch(fallback_random=True)
@xfail_if_mps # 100% are not close
def test_like_rands_sliced(self):
def fn(x):
return (
torch.randn_like(x),
torch.randn_like(x),
torch.randint_like(x, 1, 11),
)
self.common(fn, (torch.zeros([3, 4])[:, ::2].permute(1, 0),), exact_stride=True)
@config.patch(check_stack_no_cycles_TESTING_ONLY=True)
def test_check_stack_no_cycles(self):
if config.cpp_wrapper and self.device != "cpu":
raise unittest.SkipTest(
"codegen() gets called twice in cpp_wrapper GPU compilation, which "
"causes this test to fail. This can be removed if GPU compilation is "
"done in a single pass."
)
@torch.compile()
def fn(x):
return x * 3
r = fn(torch.randn(2, device=self.device, requires_grad=True))
# Backward compilation isn't hooked into cprofile, it probably
# should...
# r.sum().backward()
def test_like_rands2(self):
# rand_like with kwargs `device` of str type
d = self.device
assert isinstance(d, str)
@torch.compile
def fn(x):
return torch.rand_like(x, device=d)
x = torch.ones(10, device=self.device, dtype=torch.float32)
a0 = fn(x).clone()
a1 = fn(x).clone()
self.assertFalse(torch.allclose(a0, a1))
self.assertEqual(a0.shape, a1.shape)
self.assertEqual(a0.stride(), a1.stride())
@requires_gpu()
@skip_if_triton_cpu("Flaky on Triton CPU")
def test_like_rands3(self):
# rand_like with `device` which is different from `x.device`
def test_like_rands_on_different_device(device1, device2):
@torch.compile
def fn(x, device):
return torch.rand_like(x, device=device)
x = torch.ones(10, device=device1, dtype=torch.float32)
return fn(x, device2).clone()
a0 = test_like_rands_on_different_device("cpu", GPU_TYPE)
a1 = test_like_rands_on_different_device(GPU_TYPE, "cpu")
self.assertTrue(a0.device.type == GPU_TYPE)
self.assertTrue(a1.device.type == "cpu")
self.assertEqual(a0.shape, a1.shape)
self.assertEqual(a0.stride(), a1.stride())
def test_max_pool2d_with_indices_backward(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [2, 2], [2, 2], [0, 0], [1, 1], False, c
)
x = torch.randn([2, 4, 18, 14])
result, indices = aten.max_pool2d_with_indices(
x,
[2, 2],
[2, 2],
[0, 0],
[1, 1],
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
@xfail_if_mps # Small tolerances bug
@skip_if_gpu_halide # slow
def test_max_pool2d_with_indices_backward2(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [3, 3], [2, 2], [1, 1], [1, 1], True, c
)
x = torch.randn([2, 4, 40, 56])
result, indices = aten.max_pool2d_with_indices(
x,
[3, 3],
[2, 2],
[1, 1],
[1, 1],
True,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
# From https://github.com/pytorch/torchdynamo/issues/1200
def test_max_pool2d_with_indices_backward3(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [1, 1], [2, 2], [0, 0], [1, 1], False, c
)
x = torch.randn([32, 256, 37, 38])
result, indices = aten.max_pool2d_with_indices(
x,
[1, 1],
[2, 2],
0,
1,
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
# From https://github.com/pytorch/torchdynamo/issues/1352
@xfail_if_mps # Small tolerances bug
@skip_if_halide # hangs forever
def test_max_pool2d_with_indices_backward4(self):
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [5, 5], [1, 1], [2, 2], [1, 1], False, c
)
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([2, 64, 3, 4])
result, indices = aten.max_pool2d_with_indices(
x,
[5, 5],
[1, 1],
2,
1,
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
assertGeneratedKernelCountEqual(self, 1)
@expectedFailureXPU
def test_max_pool2d_with_indices_backward5(self):
# Window size is too big. Should fallback
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [13, 13], [1, 1], [2, 2], [1, 1], False, c
)
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([2, 64, 20, 20])
result, indices = aten.max_pool2d_with_indices(
x,
[13, 13],
[1, 1],
2,
1,
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
assertGeneratedKernelCountEqual(self, 0)
# From https://github.com/pytorch/pytorch/issues/93384
def test_max_pool2d_with_indices_backward6(self):
# dilation is not 1. Should fallback
def fn(a, b, c):
return aten.max_pool2d_with_indices_backward(
a, b, [3, 2], [2, 1], [1, 1], [1, 2], False, c
)
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([2, 2, 3, 6])
result, indices = aten.max_pool2d_with_indices(
x,
[3, 2],
[2, 1],
[1, 1],
[1, 2],
False,
)
self.common(
fn,
[
torch.randn_like(result),
x,
indices,
],
)
assertGeneratedKernelCountEqual(self, 0)
def test_issue102546(self):
def fn(x):
return x.mean(0)
self.common(fn, [torch.rand(())])
def test_avg_pool2d_backward(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[2, 2],
[2, 2],
[0, 0],
True,
False,
None,
)
self.common(
fn,
[
torch.randn([2, 4, 7, 7]),
torch.randn([2, 4, 14, 14]),
],
)
@skip_if_gpu_halide # slow
def test_avg_pool2d_backward2(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[3, 3],
[1, 1],
[1, 1],
True,
False,
None,
)
self.common(
fn,
[
torch.randn([1, 1, 20, 15]),
torch.randn([1, 1, 20, 15]),
],
)
def test_avg_pool2d_backward3(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[1, 1],
[2, 2],
[0, 0],
False,
False,
None,
)
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
[
torch.randn([1, 2016, 11, 11]),
torch.randn([1, 2016, 21, 21]),
],
)
assertGeneratedKernelCountEqual(self, 1)
def test_avg_pool2d_backward4(self):
def fn(a, b):
return aten.avg_pool2d_backward(
a,
b,
[13, 13],
[1, 1],
[0, 0],
True,
False,
None,
)
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
[
torch.randn([1, 16, 12, 12]),
torch.randn([1, 16, 24, 24]),
],
check_lowp=False,
)
assertGeneratedKernelCountEqual(self, 0)
def test_avg_pool3d_backward(self):
def fn(a, b):
return aten.avg_pool3d_backward(
a,
b,
[2, 2, 2],
[2, 2, 2],
[0, 0, 0],
True,
False,
None,
)
self.common(
fn,
[
torch.randn([2, 4, 7, 7, 7]),
torch.randn([2, 4, 14, 14, 14]),
],
)
@skip_if_halide # compiles for 5+ minutes
def test_avg_pool3d_backward2(self):
def fn(a, b):
return aten.avg_pool3d_backward(
a,
b,
[3, 3, 3],
[1, 1, 1],
[1, 1, 1],
True,
False,
None,
)
self.common(
fn,
[
torch.randn([1, 1, 20, 20, 15]),
torch.randn([1, 1, 20, 20, 15]),
],
)
def test_avg_pool3d_backward3(self):
def fn(a, b):
return aten.avg_pool3d_backward(
a,
b,
[1, 1, 1],
[2, 2, 2],
[0, 0, 0],
False,
False,
None,
)
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
[
torch.randn([1, 2016, 11, 11, 11]),
torch.randn([1, 2016, 21, 21, 21]),
],
)
assertGeneratedKernelCountEqual(self, 1)
def test_avg_pool3d_backward4(self):
def fn(a, b):
return aten.avg_pool3d_backward(
a,
b,
[13, 13, 13],
[1, 1, 1],
[0, 0, 0],
True,
False,
None,
)
torch._inductor.metrics.generated_kernel_count = 0
self.common(
fn,
[
torch.randn([1, 16, 12, 12, 12]),
torch.randn([1, 16, 24, 24, 24]),
],
check_lowp=False,
)
assertGeneratedKernelCountEqual(self, 0)
@config.patch(search_autotune_cache=False)
def test_mm_views(self):
def fn(a, b):
return torch.mm(a.view(32, 32), b.view(32, 32))
self.common(
fn,
(
torch.randn([32, 32]).transpose(0, 1),
torch.randn([1, 32, 32]).transpose(0, 1),
),
check_lowp=False,
)
if (
config.triton.native_matmul
and config.cuda_backend == "triton"
and self.device == "cuda"
):
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
else:
# codegen mm kernel from template
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 0)
@torch._dynamo.config.patch(assume_static_by_default=False)
def test_dtype_sympy_expr(self):
@torch._dynamo.optimize_assert("inductor")
def fn(a):
y = a[..., :-1, :].contiguous()
return y
result = fn(torch.randn([1, 2, 16, 4]).requires_grad_())
result.sum().backward()
@xfail_if_mps
def test_dropout2(self):
n = 100000
weight = torch.ones(
n, device=self.device, dtype=torch.float32, requires_grad=True
)
ones = torch.ones(n, device=self.device, dtype=torch.float32)
@torch._dynamo.optimize_assert("inductor")
def run(x, train=True):
return F.dropout(x * weight, 0.33, train)
def check(r, g):
rmean = r.mean().item()
gmean = g.mean().item()
rcount = len(r.nonzero())
gcount = len(g.nonzero())
# dropped elements should match
self.assertTrue(same(r.nonzero(), g.nonzero()))
self.assertEqual(rcount, gcount)
# dropped should be close to 0.33
self.assertGreater(rcount, 0.64 * n)
self.assertGreater(0.68 * n, rcount)
self.assertAlmostEqual(rmean, gmean)
self.assertAlmostEqual(rmean, 1.0, places=2)
r1 = run(ones, train=False)
r1.sum().backward()
g1 = weight.grad.clone()
# eval mode should be all ones
self.assertTrue(same(r1, torch.ones_like(r1)))
self.assertTrue(same(g1, torch.ones_like(g1)))
torch.manual_seed(1234)
weight.grad.zero_()
r2, (fw_code, bw_code) = run_fw_bw_and_get_code(lambda: run(ones))
if is_halide_backend(self.device):
self.assertEqual(fw_code.count("halide_helpers.rand"), 1)
self.assertEqual(bw_code.count("halide_helpers.rand"), 0)
elif self.device == GPU_TYPE:
self.assertEqual(fw_code.count("tl.rand"), 1)
self.assertEqual(bw_code.count("tl.rand"), 0)
g2 = weight.grad.clone()
check(r2, g2)
torch.manual_seed(1234)
weight.grad.zero_()
r3 = run(ones)
r3.sum().backward()
g3 = weight.grad.clone()
check(r3, g3)
# second run is same result as first
self.assertTrue(same(r2, r3))
self.assertTrue(same(g2, g3))
@xfail_if_mps
@config.patch(search_autotune_cache=False)
@unittest.skipIf(config.triton.native_matmul, "matmul count is different")
def test_dropout3(self):
m = torch.nn.Sequential(
torch.nn.Linear(32, 32, bias=False),
torch.nn.Dropout(),
torch.nn.Linear(32, 32, bias=False),
torch.nn.Dropout(),
).to(self.device)
@torch._dynamo.optimize_assert("inductor")
def run(x):
return m(x)
torch._inductor.metrics.generated_kernel_count = 0
result, (fw_code, bw_code) = run_fw_bw_and_get_code(
lambda: run(torch.randn([8, 32], device=self.device))
)
if is_halide_backend(self.device):
self.assertEqual(fw_code.count("halide_helpers.rand"), 2)
self.assertEqual(bw_code.count("halide_helpers.rand"), 0)
elif self.device == GPU_TYPE:
# the load_seed_offset arg can be 1 or non-1; depending on whether
# the triton signature specializes on 1 vs non-1, you might get 1
# or 2 kernels. In newer versions of triton, there's no specialization
# so we get only 1 kernel.
self.assertEqual(fw_code.count("tl.rand"), 2)
self.assertEqual(bw_code.count("tl.rand"), 0)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 4)
@xfail_if_mps # Only works for triton
def test_randint_kernel_count(self):
if self.device != GPU_TYPE:
raise unittest.SkipTest("Only valid for GPU!")
@torch._dynamo.optimize_assert("inductor")
def fn1():
random_tensor1 = torch.randint(10, [32], device=self.device)
random_tensor2 = torch.randint(10, [32], device=self.device)
random_tensor3 = torch.randint(10, [32], device=self.device)
return random_tensor1, random_tensor2, random_tensor3
_, source_codes = run_and_get_code(fn1)
# cpp_wrapper does a 2-pass generation on GPU.
self.assertEqual(len(source_codes), 1)
# the load_seed_offset arg can be 1 or non-1; depending on whether
# the triton signature specializes on 1 vs non-1, you might get 1
# or 2 kernels. In newer versions of triton, there's no specialization
# so we get only 1 kernel.
self.assertEqual(source_codes[0].count("async_compile.triton"), 2)
def test_roll(self):
def fn(a):
return (
aten.roll(a, [-3, 10], [1, 2]),
aten.roll(a, [5]),
)
self.common(
fn,
[
torch.randn([2, 56, 56, 16]),
],
)
def test_argmax_min_int32(self):
# https://github.com/pytorch/pytorch/issues/94055
def fn(a, b):
c = a.argmax(3)
return torch.min(b, c)
a = torch.rand(3, 4, 2, 1).int()
b = torch.rand(2, 2, 1, 4, 1).int()
self.common(fn, (a, b))
def test_argmax_argmin1(self):
def fn(x):
return (aten.argmax(x), aten.argmin(x))
self.common(
fn,
[
torch.randn([8, 256, 256]),
],
)
@skipIfXpu(msg="Incorrect XPU reference")
def test_argmax_argmin2(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, 1),
aten.argmin(x, 1),
)
self.common(fn, (torch.randn([144, 144]),))
@skipIfXpu(msg="Incorrect XPU reference")
def test_argmax_argmin_with_duplicates(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, 1),
aten.argmin(x, 1),
)
# Unrolled reduction
t1 = torch.randint(2, size=(6, 6))
self.common(fn, (t1,))
# Persistent reduction
t1 = torch.randint(8, size=(32, 32))
self.common(fn, (t1,))
# Non-persistent reduction
t1 = torch.randint(8, size=(1028, 1028))
self.common(fn, (t1,))
@skipIfXpu(msg="# Incorrect XPU reference ")
@xfail_if_mps # eager nan is wrong, see https://github.com/pytorch/pytorch/issues/130295
@skip_if_halide # nan behavior
def test_argmax_argmin_with_nan(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, 1),
aten.argmin(x, 1),
)
# Unrolled reduction
t1 = torch.randn((6, 6))
t1[:, 1] = float("nan")
t1[:, 3] = float("nan")
self.common(fn, (t1,))
# Persistent reduction
t1 = torch.randn((32, 32))
t1[:, 4] = float("nan")
t1[:, 8] = float("nan")
self.common(fn, (t1,))
# Non-persistent reduction
t1 = torch.randn((1028, 1028))
t1[:, 40] = float("nan")
t1[:, 100] = float("nan")
self.common(fn, (t1,))
def test_conv_backward(self):
def fn(rank4_inps, rank3_inps, rank5_inps):
out1 = aten.convolution_backward(
*rank4_inps,
[C],
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
[True, True, True],
)
out2 = aten.convolution_backward(
*rank4_inps,
[C],
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
[True, False, False],
)
out3 = aten.convolution_backward(
*rank3_inps,
[C],
[1],
[0],
[1],
False,
[0],
1,
[True, True, True],
)
out4 = aten.convolution_backward(
*rank5_inps,
[C],
[1, 1, 1],
[0, 0, 0],
[1, 1, 1],
False,
[0, 0, 0],
1,
[True, True, True],
)
return (out1, out2, out3, out4)
B = 3
C = 4
H = 5
grad_out = torch.randn(B, C, H - 2, H - 2, H - 2)
inp = torch.randn(B, C, H, H, H)
weight = torch.randn(C, C, 3, 3, 3)
def shrink_rank(x, rank):
res = x
while res.dim() > rank:
res = torch.select(res, -1, 0)
return res.contiguous()
rank4_inps = [shrink_rank(x, 4) for x in [grad_out, inp, weight]]
rank3_inps = [shrink_rank(x, 4) for x in [grad_out, inp, weight]]
rank5_inps = [shrink_rank(x, 5) for x in [grad_out, inp, weight]]
with torch.backends.cudnn.flags(enabled=True, allow_tf32=False):
self.common(
fn,
[rank4_inps, rank3_inps, rank5_inps],
)
@skipIfXpu(msg="Incorrect XPU reference")
def test_argmax_argmin3(self):
def fn(x):
return (
aten.argmax(x, 0),
aten.argmin(x, 0),
aten.argmax(x, -1),
aten.argmin(x, -1),
)
self.common(
fn,
[torch.randint(0, 5, [64, 64])],
)
def test_vdd_clamp(self):
def fn(x):
return torch.clamp_min(x, 3)
self.common(
fn,
[
torch.randn([16], requires_grad=True) * 10,
],
)
@parametrize(
"use_block_ptr",
[
subtest(True, decorators=[skip_if_not_triton]),
],
)
def test_tmp_not_defined_issue1(self, use_block_ptr):
def forward(
primals_3,
primals_4,
add_tensor,
convert_element_type_default,
div_default,
reciprocal_default,
):
var_default = torch.ops.aten.var(
convert_element_type_default, [2], correction=0
)
sub_tensor = torch.ops.aten.sub.Tensor(add_tensor, div_default)
mul_tensor_1 = torch.ops.aten.mul.Tensor(sub_tensor, reciprocal_default)
mul_tensor_2 = torch.ops.aten.mul.Tensor(mul_tensor_1, primals_3)
add_tensor_2 = torch.ops.aten.add.Tensor(mul_tensor_2, primals_4)
convert_element_type_default_1 = add_tensor_2.to(dtype=torch.float32)
convert_element_type_default_2 = convert_element_type_default_1.to(
dtype=torch.float32
)
var_default_1 = torch.ops.aten.var(
convert_element_type_default_2, [2], correction=0
)
broadcast_in_dim_default_2 = var_default_1.reshape(1, 512, 1)
sum_default_1 = convert_element_type_default_2.sum(2)
add_tensor_3 = torch.ops.aten.add.Tensor(broadcast_in_dim_default_2, 1e-05)
return (var_default, sum_default_1, add_tensor_3)
inps = [
(torch.Size([1024]), torch.float32),
(torch.Size([1024]), torch.float32),
(torch.Size([1, 512, 1024]), torch.float32),
(torch.Size([1, 512, 1024]), torch.float32),
(torch.Size([1, 512, 1]), torch.float32),
(torch.Size([1, 512, 1]), torch.float32),
]
inps = [torch.randn(shape, dtype=dtype) for (shape, dtype) in inps]
with config.patch("triton.use_block_ptr", use_block_ptr):
self.common(forward, inps, atol=1e-05, rtol=2e-05)
@unittest.skipIf(
os.environ.get("BUILD_ENVIRONMENT", "").startswith("parallelnative"),
"TODO: debug this with asan",
)
@skip_if_gpu_halide
def test_tmp_not_defined_issue2(self):
def forward(arg38_1, arg81_1, getitem_17, new_zeros_default_4):
div_tensor_7 = torch.ops.aten.div.Tensor(getitem_17, arg81_1)
mul_tensor_24 = torch.ops.aten.mul.Tensor(div_tensor_7, arg38_1)
sum_default_7 = torch.ops.aten.sum.default(mul_tensor_24)
return (new_zeros_default_4, sum_default_7)
dtype = torch.float32
args = [
((1, 88, 40, 40), (140800, 1600, 40, 1), dtype),
((), (), dtype),
((1, 88, 40, 40), (140800, 1600, 40, 1), dtype),
((3,), (1,), dtype),
]
args = [
rand_strided(shape, stride, dtype).requires_grad_(True).add(1)
for shape, stride, dtype in args
]
self.common(forward, args, atol=1e-5, rtol=1e-5)
@xfail_if_mps_unimplemented # embedding bag
@requires_gpu()
@skip_if_halide # cascading accuracy issues due rsqrt fallback
def test_tmp_not_defined_issue3(self):
test_device = torch.device(type=self.device)
test_device_0 = (
torch.device(type=self.device, index=0)
if self.device != "cpu"
else test_device
)
def forward(
self,
primals_1: "f32[1001, 6]",
primals_2: "f32[1001]",
primals_3: "f32[1001, 64]",
primals_4: "f32[4190]",
primals_5: "f32[4190]",
primals_6: "f32[1739, 4190]",
primals_48: "f32[6144, 4191]",
):
_tensor_constant0: "i64[4190]" = self._tensor_constant0
lift_fresh_copy: "i64[4190]" = torch.ops.aten.lift_fresh_copy.default(
_tensor_constant0
)
index: "f32[6144, 4190]" = torch.ops.aten.index.Tensor(
primals_48, [None, lift_fresh_copy]
)
_tensor_constant1: "i64[6]" = self._tensor_constant1
lift_fresh_copy_1: "i64[6]" = torch.ops.aten.lift_fresh_copy.default(
_tensor_constant1
)
index_1: "f32[6144, 6]" = torch.ops.aten.index.Tensor(
primals_48, [None, lift_fresh_copy_1]
)
primals_48 = lift_fresh_copy_1 = None
permute: "f32[6, 1001]" = torch.ops.aten.permute.default(primals_1, [1, 0])
addmm: "f32[6144, 1001]" = torch.ops.aten.addmm.default(
primals_2, index_1, permute
)
amax: "f32[6144, 1]" = torch.ops.aten.amax.default(addmm, [-1], True)
sub: "f32[6144, 1001]" = torch.ops.aten.sub.Tensor(addmm, amax)
exp: "f32[6144, 1001]" = torch.ops.aten.exp.default(sub)
sum_1: "f32[6144, 1]" = torch.ops.aten.sum.dim_IntList(exp, [-1], True)
div: "f32[6144, 1001]" = torch.ops.aten.div.Tensor(exp, sum_1)
full_default: "i32[6144, 1001]" = torch.ops.aten.full.default(
[6144, 1001],
1,
dtype=torch.int32,
layout=torch.strided,
device=test_device_0,
pin_memory=False,
)
iota: "i32[1001]" = torch.ops.prims.iota.default(
1001,
start=0,
step=1,
dtype=torch.int32,
device=test_device,
requires_grad=False,
)
mul: "i32[6144, 1001]" = torch.ops.aten.mul.Tensor(full_default, iota)
iota_1: "i32[6144]" = torch.ops.prims.iota.default(
6144,
start=0,
step=1001,
dtype=torch.int32,
device=test_device_0,
requires_grad=False,
)
view: "i32[6150144]" = torch.ops.aten.reshape.default(mul, [-1])
view_1: "f32[6150144]" = torch.ops.aten.reshape.default(div, [-1])
_embedding_bag = torch.ops.aten._embedding_bag.default(
primals_3, view, iota_1, False, 0, False, view_1
)
getitem: "f32[6144, 64]" = _embedding_bag[0]
getitem_1: "i32[6150144]" = _embedding_bag[1]
getitem_2: "i32[6144]" = _embedding_bag[2]
getitem_3: "i32[0]" = _embedding_bag[3]
unsqueeze: "f32[6144, 1, 64]" = torch.ops.aten.unsqueeze.default(getitem, 1)
var_mean = torch.ops.aten.var_mean.correction(
index, [1], correction=0, keepdim=True
)
getitem_4: "f32[6144, 1]" = var_mean[0]
getitem_5: "f32[6144, 1]" = var_mean[1]
add: "f32[6144, 1]" = torch.ops.aten.add.Tensor(getitem_4, 1e-05)
rsqrt: "f32[6144, 1]" = torch.ops.aten.rsqrt.default(add)
sub_1: "f32[6144, 4190]" = torch.ops.aten.sub.Tensor(index, getitem_5)
mul_1: "f32[6144, 4190]" = torch.ops.aten.mul.Tensor(sub_1, rsqrt)
mul_2: "f32[6144, 4190]" = torch.ops.aten.mul.Tensor(mul_1, primals_4)
add_1: "f32[6144, 4190]" = torch.ops.aten.add.Tensor(mul_2, primals_5)
permute_1: "f32[4190, 1739]" = torch.ops.aten.permute.default(
primals_6, [1, 0]
)
return [
index,
index_1,
addmm,
amax,
sum_1,
iota_1,
view,
view_1,
getitem_1,
getitem_2,
getitem_3,
unsqueeze,
getitem_5,
rsqrt,
add_1,
permute_1,
]
kwargs = aot_graph_input_parser(forward, device=self.device)
self.common(forward, [], kwargs=kwargs)
@skip_if_gpu_halide
@config.patch("halide.scheduler_cpu", "Mullapudi2016")
def test_misaligned_address_issue1(self):
def forward(sub_tensor_1, unsqueeze_default):
gather_default = torch.ops.aten.gather.default(
sub_tensor_1, 1, unsqueeze_default
)
return gather_default
args = [
((1, 1000), (1000, 1), torch.float32),
((1, 1), (1, 1), torch.int64),
]
args = [rand_strided(shape, stride, dtype) for shape, stride, dtype in args]
self.common(forward, args)
def test_invalid_operand_issue1(self):
def forward(arg0_1, arg1_1, arg3_1, squeeze, view_1, slice_1):
slice_scatter = torch.ops.aten.slice_scatter.default(
slice_1, arg3_1, 1, 1, 9223372036854775807
)
slice_scatter_1 = torch.ops.aten.slice_scatter.default(
arg1_1, slice_scatter, 0, 0, 9223372036854775807
)
slice_2 = torch.ops.aten.slice.Tensor(
slice_scatter_1, 0, 0, 9223372036854775807
)
select_scatter = torch.ops.aten.select_scatter.default(
slice_2, squeeze, 1, 0
)
slice_scatter_2 = torch.ops.aten.slice_scatter.default(
slice_scatter_1, select_scatter, 0, 0, 9223372036854775807
)
view = torch.ops.aten.view.default(slice_scatter_2, [-1, 128])
embedding = torch.ops.aten.embedding.default(arg0_1, view, 1)
return [embedding, view_1]
args = [
((50005, 768), (768, 1), torch.float32),
((8, 128), (128, 1), torch.int64),
((8, 127), (127, 1), torch.int64),
((8,), (1,), torch.int64),
((1024,), (1,), torch.int64),
((8, 128), (128, 1), torch.int64),
]
args = [rand_strided(shape, stride, dtype) for shape, stride, dtype in args]
self.common(forward, args)
def test_sizehint_issue1(self):
def forward(x):
return torch.nn.functional.unfold(
x, kernel_size=[4, 4], dilation=1, padding=0, stride=[4, 4]
)
args = [((2, 24, 56, 56), (75264, 3136, 56, 1), torch.float32, False)]
args = [
rand_strided(sh, st, dt).requires_grad_(rg) for (sh, st, dt, rg) in args
]
self.common(forward, args)
def test_zero_dim_reductions(self):
for kd in [True, False]:
inps0 = (torch.zeros(2, 0, device=self.device, dtype=torch.float16), 1, kd)
failed_ops = [aten.argmin, aten.argmax, aten.max, aten.min]
for op in failed_ops:
with self.assertRaisesRegex(
IndexError, "Expected reduction dim 1 to have non-zero size"
):
mod = make_fx(op)(*inps0)
_ = compile_fx_inner(mod, inps0)
pass_ops = [
lambda *x: fn(*x) for fn in [aten.sum, aten.prod, aten.any, aten.all]
]
for op in pass_ops:
compiled = torch.compile(op, backend="inductor")
expected = op(*inps0)
actual = compiled(*inps0)
self.assertTrue(torch.allclose(actual, expected, atol=1e-3, rtol=1e-3))
def test_unfold_zero_dimension_tensor(self):
def forward(x):
return torch.unfold_copy(dimension=1, input=x, size=0, step=7)
x = torch.rand([1, 0], dtype=torch.float32)
y = forward(x)
compiled_y = torch.compile(forward, fullgraph=True)(x)
self.assertEqual(y, compiled_y)
def test_zero_element_mutation(self):
class CustomModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer1 = nn.LeakyReLU(negative_slope=5.2955089, inplace=True)
def forward(self, inputs):
return self.layer1(inputs)
ip_size = [0]
input_tensor = torch.randn(ip_size)
mymodel = CustomModel()
self.common(mymodel, (input_tensor,))
def test_lerp(self):
# non-contiguous inputs for lerp
def fn0(i0, i1):
x1 = i0.transpose(-2, -3)
return torch.lerp(i1, x1, 70000)
# contiguous inputs for lerp
def fn1(i0, i1):
return torch.lerp(i1, i0, 70000)
self.common(fn0, [torch.rand(10, 3, 10), torch.rand(3, 10, 10)])
self.common(fn1, [torch.rand(3, 10, 10), torch.rand(3, 10, 10)])
@parametrize(
"dtype",
test_dtypes,
)
def test_unspec_inputs(self, dtype):
if self.device == "cpu":
raise unittest.SkipTest("Testing mixed devices")
if (
is_halide_backend(self.device)
and getattr(self.device, "type", self.device) == "cuda"
):
# https://github.com/halide/Halide/issues/8318
raise unittest.SkipTest("halide not supported")
if not self.is_dtype_supported(dtype):
raise unittest.SkipTest(
f"dtype {dtype} not supported for device {self.device}"
)
def fn(x, y):
return x + y, x * y, x / y
opt = torch.compile(fn, backend="inductor")
inputs = (
rand_strided((2, 3), (3, 1), dtype=torch.float32, device=GPU_TYPE),
rand_strided((), (), dtype=dtype, device="cpu"),
)
self.assertTrue(same(opt(*inputs), fn(*inputs)))
inputs = (inputs[1], inputs[0])
self.assertTrue(same(opt(*inputs), fn(*inputs)))
@dynamo_config.patch(automatic_dynamic_shapes=True)
def test_list_clearing(self):
if self.device == "cpu":
contexts = [contextlib.nullcontext]
else:
contexts = [
contextlib.nullcontext,
lambda: config.patch({"triton.cudagraphs": True}),
]
for context in contexts:
with context():
inps = [
torch.rand([5, 5]).to(self.device),
torch.rand([5, 5]).to(self.device),
]
inp_refs = [weakref.ref(inp) for inp in inps]
def fn(x, y):
a = x + y
return (a @ a,)
fn_fx = make_fx(fn)(inps[0], inps[1])
fn_compiled = compile_fx_inner(fn_fx, inps)
test_self = self
matmul_seen = False
class TestRefMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
nonlocal inps
nonlocal inp_refs
nonlocal test_self
nonlocal matmul_seen
# by matmul, inputs should be deallocated
# TODO: should not be necessary, ref-cycle ?
gc.collect()
if func is aten.mm.out:
matmul_seen = True
test_self.assertEqual(len(inps), 0)
test_self.assertIsNone(inp_refs[0]())
test_self.assertIsNone(inp_refs[1]())
return func(*args, **kwargs)
with TestRefMode():
fn_compiled(inps)
# do an extra run to make sure we are deallocating on warmup and record
if self.device == GPU_TYPE:
inps.extend(
[
torch.rand([5, 5]).to(self.device),
torch.rand([5, 5]).to(self.device),
]
)
inp_refs.extend([weakref.ref(inp) for inp in inps])
matmul_seen = False
with TestRefMode():
fn_compiled(inps)
# for some reason, TorchDispatch doesn't capture the
# cuda mm call (even without cudagraphs)
if self.device == "cpu":
self.assertTrue(matmul_seen)
else:
self.assertEqual(len(inps), 0)
def test_dtype_mismatch_issue(self):
def fn(x):
attn = torch.nn.functional.pad(x, [0, 1])
return attn.softmax(dim=-1)
x = torch.rand(128, 32, 63)
self.common(fn, (x,))
def test_vectorized_ops_masked(self):
def fn(x):
index = torch.arange(64, device=x.device)
mask = index.view(1, 1, 64) < 63
indices = [None, None, index]
return torch.ops.aten._unsafe_masked_index(x, mask, indices, 7)
x = torch.rand(128, 32, 63)
self.common(fn, (x,))
@xfail_if_mps
def test_vectorized_ops_masked_var_novec(self):
def fn(x):
index = torch.arange(10, device=x.device)
mask = (index < 5).view(1, 1, 1, 10)
indices = [None, None, None, index]
return torch.ops.aten._unsafe_masked_index(x, mask, indices, 7)
x = torch.rand(1, 1, 8, 8)
self.common(fn, (x,))
def test_diagonal_copy(self):
def fn(x):
return torch.diagonal_copy(x)
for x in (torch.randn(2, 3), torch.randn(2, 2), torch.randn(3, 2)):
self.common(fn, (x,))
def test_copy_with_scalar_src(self):
def fn(x):
buffer = torch.zeros_like(x)
buffer.copy_(2)
result = x + buffer
return result
x = torch.randn(64, 64, dtype=torch.float32, device=self.device)
self.common(fn, (x,))
def test_kwargs(self):
if self.device == GPU_TYPE:
raise unittest.SkipTest("histogramdd only supports cpu")
def fn(x, y):
return torch.histogramdd(
x,
bins=[3, 3],
weight=y,
)
self.common(
fn,
[torch.randn((4, 2)), torch.randn(4)],
)
# Shape padding causes the inputs to all get specialized, so the codegen
# test fails
@expectedFailureCodegenDynamic
@requires_gpu()
@torch._inductor.config.patch("shape_padding", True)
def test_shape_padding(self):
dtypes = [
torch.float16,
torch.float32,
]
b, m, n, k = 7, 11, 13, 15
def gen(*shape, dtype=torch.float32):
return torch.randn(*shape, device=GPU_TYPE, dtype=dtype) / k + 1.0
for dtype in dtypes:
x = gen(m, k, dtype=dtype)
y = gen(k, n, dtype=dtype)
z = gen(n, dtype=dtype)
self.common(lambda x, y: torch.mm(x, y), (x, y))
self.common(lambda x, y: torch.matmul(x, y), (x, y))
self.common(lambda x, y, z: torch.addmm(z, x, y), (x, y, z))
for dtype in dtypes:
x = gen(b, m, k, dtype=dtype)
y = gen(b, k, n, dtype=dtype)
z = gen(n, dtype=dtype)
self.common(lambda x, y: torch.bmm(x, y), (x, y))
self.common(lambda x, y: torch.matmul(x, y), (x, y))
self.common(lambda x, y, z: torch.baddbmm(z, x, y), (x, y, z))
@requires_gpu()
@torch._inductor.config.patch("layout_optimization", True)
@tf32_on_and_off(0.005)
def test_inductor_layout_optimization_input_mutations(self):
# channel dim must be > 64 for inductor to do layout optimization and use NHWC
mod = nn.Conv2d(3, 128, 1, stride=1, bias=False).to(self.device)
def f(x):
x.mul_(2)
out = mod(x)
return out
f_compiled = torch.compile(f)
x_ref = torch.rand(2, 3, 128, 128, device=self.device)
x_test = x_ref.detach().clone()
with torch.no_grad():
out_ref = f(x_ref)
out_test = f_compiled(x_test)
self.assertEqual(out_ref, out_test)
self.assertEqual(out_ref.shape, out_test.shape)
# Importantly, since inductor._config.keep_output_stride is True,
# the outputs should have matching strides here.
self.assertEqual(out_ref.stride(), out_test.stride())
self.assertEqual(x_ref, x_test)
@requires_gpu()
@skip_if_not_triton
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
def test_inductor_multiple_specializations(self):
@torch.compile(
options={
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
},
dynamic=False,
)
def inductor_matmul(a, b):
torch._check(a.shape[0] == b.shape[1])
return (m, torch.mm(a, b))
m = 16
k = 1280
dynamic_a = torch.randn(m, k, device=GPU_TYPE, dtype=torch.bfloat16)
dynamic_specialized_a = dynamic_a.clone()
b = torch.randn(k, m, device=GPU_TYPE, dtype=torch.bfloat16)
torch._dynamo.decorators.mark_dynamic(
dynamic_a,
0,
)
torch._dynamo.decorators.mark_dynamic(
dynamic_specialized_a,
0,
specialize_on=[lambda x0: x0 == 16],
)
torch._dynamo.decorators.mark_dynamic(
b,
1,
)
dynamic = inductor_matmul(dynamic_a, b)
torch._dynamo.reset()
dynamic_specialized = inductor_matmul(dynamic_specialized_a, b)
self.assertEqual(dynamic, dynamic_specialized)
@requires_gpu()
@skip_if_not_triton
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
@config.patch({"force_disable_caches": True})
def test_mark_dynamic_with_hint_override(self):
@torch.compile
def no_override(x):
return x.sum(dim=0)
@torch.compile
def override(x):
return x.sum(dim=0)
x_small = torch.randn(4096, 512, device=GPU_TYPE)
torch._dynamo.decorators.mark_dynamic(x_small, 0)
code1 = run_and_get_triton_code(no_override, x_small)
torch._dynamo.reset_code_caches()
torch._dynamo.decorators.mark_dynamic(x_small, 0, hint_override=4096 * 10)
code2 = run_and_get_triton_code(override, x_small)
self.assertNotEqual(code1, code2)
self.assertEqual(no_override(x_small), override(x_small))
@requires_gpu()
@skip_if_not_triton
@unittest.skipIf(
not IS_BIG_GPU, "Skipping triton backend only since not big GPU (not enough SM)"
)
@config.patch({"force_disable_caches": True})
def test_mark_unbacked_with_hint_override(self):
@torch.compile
def no_override(x):
return x.sum(dim=0)
@torch.compile
def override(x):
return x.sum(dim=0)
@torch.compile(fullgraph=True)
def branching(x):
if x.shape[0] > 4096:
return 1
return 2
x_small = torch.randn(4096, 512, device=GPU_TYPE)
torch._dynamo.decorators.mark_unbacked(x_small, 0)
code1 = run_and_get_triton_code(no_override, x_small)
torch._dynamo.reset_code_caches()
torch._dynamo.decorators.mark_unbacked(x_small, 0, hint_override=4096 * 10)
code2 = run_and_get_triton_code(override, x_small)
self.assertNotEqual(code1, code2)
self.assertEqual(no_override(x_small), override(x_small))
with self.assertRaisesRegex(
RuntimeError, "Could not guard on data-dependent expression"
):
branching(x_small)
@requires_gpu()
def test_stride_preservation_with_stride_modifying_fx_pass(self):
def f(x):
return x + 1
def custom_pass(g: torch.fx.Graph) -> None:
"""
Applies `lambda x: x.t().contiguous().t()` to the output.
"""
output_node = g.find_nodes(op="output")[0]
assert len(output_node.args) == 1
output = output_node.args[0][0]
with g.inserting_before(output_node):
output = g.call_function(
torch.ops.aten.permute.default, args=(output, [1, 0])
)
output = g.call_function(
torch.ops.aten.clone.default,
args=(output,),
kwargs={"memory_format": torch.contiguous_format},
)
output = g.call_function(
torch.ops.aten.permute.default, args=(output, [1, 0])
)
output_node.args = ((output,),)
return g
with config.patch(
post_grad_custom_post_pass=custom_pass,
):
f_compiled = torch.compile(f)
x = torch.rand(4, 4, device=GPU_TYPE)
y = f(x)
y_compiled = f_compiled(x)
self.assertEqual(y, y_compiled)
self.assertEqual(y.stride(), y_compiled.stride())
def test_int_input_dynamic_shapes(self):
@torch.compile(dynamic=True)
def fn(x, i):
y = x * i
return y
# Constant must not get matched as constant
self.common(fn, [torch.randn(3, 1, 1, 1, 1), 9132])
def test_float_repr_dynamic_shapes(self):
@torch.compile(dynamic=True)
def fn(x):
return F.interpolate(x, scale_factor=1 / 300, mode="linear")
self.common(fn, [torch.randn(1, 8, 396 * 300)])
@torch._dynamo.config.patch("capture_scalar_outputs", True)
def test_pattern_matcher_unbacked(self):
@torch.compile(fullgraph=True)
def get_mask(W: torch.Tensor, percentage_nonzeros: torch.Tensor):
total_elements = W.numel()
k = total_elements * percentage_nonzeros
top_k_indices = torch.topk(torch.abs(W).flatten(), k.int())[1]
mask = torch.zeros(total_elements, dtype=torch.bool, device=W.device)
mask.scatter_(0, top_k_indices, True)
mask = mask.view(W.shape)
return mask
x = torch.randn((128, 64), device=self.device)
p = torch.tensor(0.50, device=self.device)
get_mask(x, p)
def test_flexible_layout_immutable_free_symbols(self):
import sympy
x = sympy.Symbol("x")
y = sympy.Symbol("y")
z = sympy.Symbol("z")
layout = torch._inductor.ir.FlexibleLayout(
self.device, torch.float32, size=(x, y)
)
# pad_strides works since it does not add new symints
layout.pad_strides()
# same symints and different order should work
layout.size = (y, x)
# adding new symints should fail
with self.assertRaisesRegex(
AssertionError, "Expected free symbols unchanged, but got"
):
layout.size = (z,)
def test_sqrt_dynamic_shapes(self):
# TIMM convit_base model: https://github.com/pytorch/pytorch/issues/97877.
# TODO: support cuda path.
if self.device == GPU_TYPE:
raise unittest.SkipTest("sqrt dynamic shapes only supports cpu")
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
B, N, C = x.shape
return self.get_rel_indices(N)
def get_rel_indices(self, num_patches: int) -> torch.Tensor:
img_size = int(num_patches**0.5)
ind = torch.arange(img_size)
return ind
self.common(
Model(),
[
torch.randn(8, 4, 4),
],
)
def test_rsqrt_dynamic_shapes(self):
# From HF hf_BigBird model.
@torch.compile(dynamic=True)
def fn(a, b):
r = 1 / math.sqrt(a.size(1))
return torch.bmm(a, b) / r
self.common(
fn,
[
torch.randn(2, 4, 4),
torch.randn(2, 4, 4),
],
)
@xfail_if_triton_cpu
def test_index_dynamic_shapes(self):
# Repro from vision_maskrcnn
def fn(arg0_1):
unsqueeze = arg0_1.unsqueeze(0)
sym_size = arg0_1.size(1)
ceil = math.ceil(sym_size * 1.8735363483428955)
iota = torch.ops.prims.iota.default(
ceil,
start=0,
step=1,
dtype=torch.int64,
device=arg0_1.device,
requires_grad=False,
)
convert_element_type_1 = iota.to(torch.float32)
sym_size_1 = arg0_1.size(2)
floor_1 = math.floor(sym_size_1 * 1.8735363483428955)
ceil_1 = math.ceil(floor_1)
iota_1 = torch.ops.prims.iota.default(
ceil_1,
start=0,
step=1,
dtype=torch.int64,
device=arg0_1.device,
requires_grad=False,
)
convert_element_type_3 = iota_1.to(torch.float32)
sub_2 = (convert_element_type_1 + 0.5) * (sym_size / ceil) - 0.5
clamp_min = sub_2.clamp_min(0.0)
sub_3 = (convert_element_type_3 + 0.5) * (sym_size_1 / floor_1) - 0.5
clamp_min_1 = sub_3.clamp_min(0.0)
convert_element_type_4 = clamp_min.to(torch.int64)
sub_4 = sym_size - 1
clamp_max = clamp_min.ceil().clamp_max(sub_4)
convert_element_type_5 = clamp_max.to(torch.int64)
convert_element_type_6 = clamp_min_1.to(torch.int64)
unsqueeze_2 = convert_element_type_4.unsqueeze(1)
index = torch.ops.aten.index.Tensor(
unsqueeze, [None, None, unsqueeze_2, convert_element_type_6]
)
index_1 = torch.ops.aten.index.Tensor(
unsqueeze,
[
None,
None,
convert_element_type_5.unsqueeze(1),
convert_element_type_6,
],
)
sub_6 = clamp_min.unsqueeze(1) - unsqueeze_2
mul_10 = (index * (1.0 - sub_6) + index_1 * (sub_6)) * (
1.0 - (clamp_min_1 - convert_element_type_6)
)
select = torch.ops.aten.select.int(mul_10, 0, 0)
return (select,)
x = torch.randn(15, 20, 3)
self.common(
fn,
[x],
)
@skip_if_halide # log2 not yet implemented
@skip_if_triton_cpu # log2 implemented only in Dec 2024
def test_pow_by_natural_log2_dynamic_shapes(self):
@torch.compile(dynamic=True)
def fn(x):
return x + 2 ** (math.floor(math.log2(x.shape[0]) + 1))
self.common(fn, [torch.randn(5)])
def test_setitem_with_int_parameter(self):
x = torch.zeros(7, device=self.device)
def fn(n, a):
a[n] = -1
return a
cnts = CompileCounterWithBackend("inductor")
opt_fn = torch.compile(fn, backend=cnts, fullgraph=True)
for n in range(2, x.shape[0]):
opt_fn(n, x)
self.assertEqual(x[n], -1)
# If assume_static_by_default is set, the calls above will trigger
# 3 function compilation:
# 1. assuming 'n' is static (equals 2)
# 2. making 'n' dynamic, but with the guard 'end <= x.shape[0]'
# (from: torch._inductor.ir.SliceView.create)
frame_count = 2 if torch._dynamo.config.assume_static_by_default else 1
self.assertEqual(cnts.frame_count, frame_count)
# Negative index triggers new compilation.
opt_fn(-x.shape[0], x)
self.assertEqual(x[0], -1)
self.assertEqual(cnts.frame_count, frame_count + 1)
@config.patch({"triton.autotune_at_compile_time": False})
@config.patch(profiler_mark_wrapper_call=True)
def test_profiler_mark_wrapper_call(self):
from torch.profiler import profile
@torch.compile(backend="inductor", fullgraph=True)
def fn(a, b):
return a + b
a = torch.rand((100,), device=self.device)
b = torch.rand((100,), device=self.device)
with profile() as prof:
fn(a, b)
assert any(
"inductor_wrapper_call" in e.name for e in prof.profiler.function_events
)
def test_insignificant_strides(self):
def f(x):
tmp = x + 1
return tmp.view(-1, 1, 2)
x = torch.arange(8, device=self.device, dtype=torch.float32)
out = f(x)
compiled_out = torch.compile(f)(x)
self.assertEqual(out.stride(), compiled_out.stride())
self.assertEqual(out, compiled_out)
@unittest.skipIf(IS_X86 and not HAS_AVX2, "Requires AVX2")
def test_pixel_shuffle_channels_last(self):
def fn(x):
x = torch.nn.functional.pixel_shuffle(x, 2)
x = torch.nn.functional.relu(x)
return x
self.common(
fn,
(torch.randn(1, 16, 64, 72).to(memory_format=torch.channels_last),),
)
def test_where_broadcast(self):
# https://github.com/pytorch/pytorch/issues/93374
def fn(x, p1, p0):
o = torch.where(x, p1, p0)
return o
# https://github.com/pytorch/pytorch/issues/94725
class Repro(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self._tensor_constant0 = nn.Buffer(torch.randn([], dtype=torch.float32))
def forward(self, arg0_1, arg1_1):
convert_element_type = torch.ops.prims.convert_element_type.default(
arg1_1, torch.bool
)
bitwise_not = torch.ops.aten.bitwise_not.default(convert_element_type)
_tensor_constant0 = self._tensor_constant0
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(
_tensor_constant0
)
where = torch.ops.aten.where.self(bitwise_not, lift_fresh_copy, arg0_1)
return (where, bitwise_not)
self.common(
fn,
(torch.tensor([[True]]), torch.rand(13, 7, 3), torch.rand(1, 1)),
)
args = [
torch.randn(1, 4, 64, 64),
torch.zeros(1, 1, 64, 64, dtype=torch.uint8),
]
args[1][:, :, :32, :32] = 1
eager_args = [x.clone() for x in args]
eager_mod = Repro()
mod = make_fx(eager_mod, tracing_mode="real")(*args)
compiled = compile_fx_inner(mod, args)
inductor_out = compiled(args)
eager_out = eager_mod(*eager_args)
self.assertEqual(inductor_out, eager_out)
def test_require_stride_expanded(self):
def forward(arg6, arg7, arg16):
convolution = torch.ops.aten.convolution(
arg16.unsqueeze(0), arg7, arg6, [4, 4], [2, 2], [1, 1], False, [0, 0], 1
)
return (convolution,)
self.common(
forward,
(
None,
rand_strided(
(64, 3, 11, 11),
(363, 121, 11, 1),
torch.float32,
device=self.device,
).to(memory_format=torch.channels_last),
rand_strided(
(1, 3, 224, 224),
(150528, 50176, 224, 1),
torch.float32,
device=self.device,
)
.to(memory_format=torch.channels_last)
.squeeze(0),
),
atol=1e-3,
rtol=0.001,
)
# expanded dim should not cause copy in require_stride_order
assertGeneratedKernelCountEqual(self, 0)
@requires_gpu()
@parametrize("prefer_nd_tiling", (False, True))
@parametrize("use_block_ptr", (False, True))
@unittest.skipIf(
not PLATFORM_SUPPORTS_FLASH_ATTENTION,
"Does not support SDPA or pre-SM80 hardware",
)
def test_sdpa(self, use_block_ptr: bool, prefer_nd_tiling: bool):
def foo(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1):
view = torch.ops.aten.view.default(arg3_1, [23760, 128])
arg3_1 = None
mm = torch.ops.aten.mm.default(view, arg4_1)
view = arg4_1 = None
view_1 = torch.ops.aten.view.default(mm, [3, 99, 80, 8])
mm = None
view_2 = torch.ops.aten.view.default(view_1, [3, 99, 80, 8])
view_1 = None
permute = torch.ops.aten.permute.default(view_2, [0, 3, 1, 2])
view_2 = None
view_3 = torch.ops.aten.view.default(permute, [3, 8, 99, 80])
permute = None
clone = torch.ops.aten.clone.default(
view_3, memory_format=torch.contiguous_format
)
view_3 = None
expand = torch.ops.aten.expand.default(clone, [3, 8, 99, 80])
clone = None
_scaled_dot_product_efficient_attention = (
torch.ops.aten._scaled_dot_product_efficient_attention.default(
arg0_1, arg1_1, arg2_1, expand, False
)
)
arg0_1 = arg1_1 = arg2_1 = expand = None
getitem = _scaled_dot_product_efficient_attention[0]
_scaled_dot_product_efficient_attention = None
return (getitem,)
if self.device == "cpu":
raise unittest.SkipTest(f"requires {GPU_TYPE}")
DEVICE = torch.device(f"{GPU_TYPE}:0")
DTYPE = torch.float16
B = 3
H = 8
Q = 99
K = 80
D = 32
C_bias = 128
# inputs
query = torch.randn((B, H, Q, D), device=DEVICE, dtype=DTYPE)
key = torch.randn((B, H, K, D), device=DEVICE, dtype=DTYPE)
value = torch.randn((B, H, K, D), device=DEVICE, dtype=DTYPE)
bias = torch.randn((B, Q, K, C_bias), device=DEVICE, dtype=DTYPE)
weights = torch.randn((C_bias, H), device=DEVICE, dtype=DTYPE)
inps = (query, key, value, bias, weights)
with config.patch(
{
"triton.prefer_nd_tiling": prefer_nd_tiling,
"triton.use_block_ptr": use_block_ptr,
"triton.native_matmul": False,
}
):
# Check accuracy
self.common(
foo,
inps,
atol=0.02,
rtol=1e4,
)
# Check code for block pointers
foo_opt = torch.compile(foo, backend="inductor")
code = run_and_get_triton_code(foo_opt, *inps)
have_block_ptr = code.count("tl.make_block_ptr") > 0
if not is_halide_backend(self.device):
self.assertEqual(have_block_ptr, use_block_ptr)
@requires_gpu()
@unittest.skipIf(
not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
"Does not support mem_eff_attention",
)
def test_sdpa_unaligned_mask(self):
def foo(
arg0_1: "f32[8, 8, 16, 16]",
arg1_1: "f32[8, 8, 15, 16]",
arg2_1: "f32[8, 8, 15, 16]",
arg3_1: "f32[1, 1, 16, 15]",
):
constant_pad_nd: "f32[1, 1, 16, 16]" = (
torch.ops.aten.constant_pad_nd.default(arg3_1, [0, 1], 0.0)
)
arg3_1 = None
slice_1: "f32[1, 1, 16, 15]" = torch.ops.aten.slice.Tensor(
constant_pad_nd, -1, 0, 15
)
constant_pad_nd = None
expand: "f32[8, 8, 16, 15]" = torch.ops.aten.expand.default(
slice_1, [8, 8, 16, 15]
)
slice_1 = None
_scaled_dot_product_efficient_attention = (
torch.ops.aten._scaled_dot_product_efficient_attention.default(
arg0_1, arg1_1, arg2_1, expand, False
)
)
arg0_1 = arg1_1 = arg2_1 = expand = None
getitem: "f32[8, 8, 16, 16]" = _scaled_dot_product_efficient_attention[0]
_scaled_dot_product_efficient_attention = None
return (getitem,)
query = torch.rand(8, 8, 16, 16, device=GPU_TYPE)
key = torch.rand(8, 8, 15, 16, device=GPU_TYPE)
value = torch.rand(8, 8, 15, 16, device=GPU_TYPE)
bias = torch.rand(1, 1, 16, 15, device=GPU_TYPE)
self.common(
foo,
(query, key, value, bias),
atol=0.02,
rtol=1e4,
)
@requires_gpu()
@unittest.skipIf(
not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION,
"Does not support mem_eff_attention",
)
@config.patch(freezing=True)
def test_sdpa_unaligned_mask_freezing(self):
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.arg3_1 = torch.rand(1, 1, 16, 15, device=GPU_TYPE)
def forward(
self,
arg0_1: "f32[8, 8, 16, 16]",
arg1_1: "f32[8, 8, 15, 16]",
arg2_1: "f32[8, 8, 15, 16]",
):
arg3_1 = self.arg3_1
constant_pad_nd: "f32[1, 1, 16, 16]" = (
torch.ops.aten.constant_pad_nd.default(arg3_1, [0, 1], 0.0)
)
arg3_1 = None
slice_1: "f32[1, 1, 16, 15]" = torch.ops.aten.slice.Tensor(
constant_pad_nd, -1, 0, 15
)
constant_pad_nd = None
expand: "f32[8, 8, 16, 15]" = torch.ops.aten.expand.default(
slice_1, [8, 8, 16, 15]
)
slice_1 = None
_scaled_dot_product_efficient_attention = (
torch.ops.aten._scaled_dot_product_efficient_attention.default(
arg0_1, arg1_1, arg2_1, expand, False
)
)
arg0_1 = arg1_1 = arg2_1 = expand = None
getitem: "f32[8, 8, 16, 16]" = _scaled_dot_product_efficient_attention[
0
]
_scaled_dot_product_efficient_attention = None
return (getitem,)
query = torch.rand(8, 8, 16, 16, device=GPU_TYPE)
key = torch.rand(8, 8, 15, 16, device=GPU_TYPE)
value = torch.rand(8, 8, 15, 16, device=GPU_TYPE)
mod = Mod()
out_eager = mod(query, key, value)
with torch.no_grad():
out_compiled = torch.compile(mod)(query, key, value)
self.assertEqual(out_eager, out_compiled, atol=0.02, rtol=1e4)
def test_where_with_logical_op(self):
def fn_and(x, y):
return torch.where(torch.logical_and(x, y), 1.0, 0.0)
def fn_or(x, y):
return torch.where(torch.logical_or(x, y), 1.0, 0.0)
self.common(
fn_and,
(torch.randn(32), torch.randn(32)),
)
self.common(
fn_or,
(torch.randn(32), torch.randn(32)),
)
@skipIfRocm
def test_conv_with_as_strided(self):
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.kv = torch.nn.Conv2d(
256, 384, kernel_size=(1, 1), stride=(1, 1), bias=False
)
def forward(self, x):
convolution = self.kv(x)
constant_pad_nd = torch.ops.aten.constant_pad_nd.default(
convolution, [2, 2, 2, 2], 0.0
)
# as_strided inputs are depend on input's size and stide.
as_strided = torch.ops.aten.as_strided.default(
constant_pad_nd, [8, 384, 2, 20, 12], [153600, 400, 160, 1, 20]
)
as_strided_1 = torch.ops.aten.as_strided.default(
as_strided, [8, 384, 2, 2, 12, 12], [153600, 400, 160, 8, 20, 1]
)
clone = torch.ops.aten.clone.default(
as_strided_1, memory_format=torch.contiguous_format
)
return clone
self.common(
Model(),
(torch.randn(8, 256, 16, 16),),
check_lowp=not is_halide_backend(self.device),
)
def test_inplace_where_pointwise(self):
# https://github.com/pytorch/pytorch/issues/96446
def fn(a, b):
a[0] = 2
return a * b
self.common(fn, (torch.rand(1), torch.rand(2)))
@xfail_if_triton_cpu
def test_view_on_aliased(self):
# https://github.com/pytorch/pytorch/issues/96728
def fn1(a, b):
a = a.max(0).values
c = torch.cat((a, b))
c = c.round()
b >= a[0] # noqa: B015
return c
some_const = torch.tensor(6324)
def fn2():
a = torch.tensor([[0.6324]])
ret = torch.cat((a, a), dim=0)
some_const >= a[0] # noqa: B015
return ret
self.common(fn1, (torch.tensor([[4.0]]), torch.tensor([5.0])))
self.common(fn2, ())
def test_argmax_to_float(self):
# https://github.com/pytorch/pytorch/issues/97127
def fn():
a = torch.zeros([2, 2])
b = a.argmax(0)
return b.float().mean()
self.common(fn, ())
def test_const_int32_to_float(self):
# https://github.com/pytorch/pytorch/issues/97124
def fn():
a = torch.zeros([1, 2], dtype=torch.int32)
a = a + a
b = a.to(dtype=torch.float32)
return b * 0.8
self.common(fn, ())
def test_getitem(self):
out_features = ["p3", "p4", "p5", "p6", "p7"]
in_feature = "p5"
def fn(a):
return a[out_features.index(in_feature)]
x = [
torch.rand([1, 256, 100, 152], device=self.device),
torch.rand([1, 256, 50, 76], device=self.device),
torch.rand([1, 256, 25, 38], device=self.device),
]
opt_fn = torch.compile(fn, backend="inductor")
same(fn(x), opt_fn(x))
def test_pad_view(self):
def fn(a):
y = torch.nn.functional.pad(a, (0, 0, 0, 1))
y = y.view(*y.size()[:-2], y.size(-1), y.size(-2))
return y
x = torch.rand(48, 3, 512, 512)
self.common(fn, (x,))
def test_pad_single(self):
def fn(a):
y = torch.nn.functional.pad(a, (10, 10))
return y
x = torch.rand(1, 1, 1)
self.common(fn, (x,))
def test_pad_cast(self):
def fn(x):
return torch.nn.functional.pad(x.to(torch.float32), (0, 3, 0, 0))
for dtype in [torch.int32, torch.int64]:
self.common(fn, (torch.ones(1, 1, 13, dtype=dtype),))
@unittest.skipIf(not HAS_CPU, "requires C++ compiler")
@skip_if_triton # No inductor data type propagation pass on scheduler nodes
@skip_if_halide # bf16
def test_data_type_propogation(self):
from torch._dynamo.utils import detect_fake_mode
from torch._inductor.codegen.common import boolean_ops
from torch._inductor.compile_fx import shape_env_from_inputs
from torch._inductor.debug import DebugContext
from torch._inductor.decomposition import decompositions
from torch._inductor.graph import GraphLowering
from torch._inductor.virtualized import V
from torch.fx.passes.fake_tensor_prop import FakeTensorProp
def get_data_type(node: torch.fx.Node):
if OptimizationContext.key in node.meta:
return node.meta[OptimizationContext.key].dtype
else:
return None
def func(arg0_1):
max_pool2d_with_indices = torch.ops.aten.max_pool2d_with_indices.default(
arg0_1, [3, 3], [2, 2], [1, 1]
)
arg0_1 = None
getitem = max_pool2d_with_indices[0]
max_pool2d_with_indices = None
return (getitem,)
example_inputs = [
torch.randn(10, 32, 20, 20, dtype=torch.bfloat16).to(
memory_format=torch.channels_last
)
]
gm = make_fx(func, decomposition_table=decompositions, tracing_mode="fake")(
*example_inputs
)
shape_env = shape_env_from_inputs(example_inputs)
fake_mode = detect_fake_mode(example_inputs)
if not fake_mode:
fake_mode = torch._subclasses.FakeTensorMode(allow_non_fake_inputs=True)
FakeTensorProp(gm, mode=fake_mode).propagate(*example_inputs)
else:
FakeTensorProp(gm, mode=fake_mode).propagate_dont_convert_inputs(
*example_inputs
)
with V.set_fake_mode(fake_mode):
graph = GraphLowering(
gm,
shape_env=shape_env,
)
with V.set_graph_handler(graph), V.set_debug_handler(DebugContext()):
graph.run(*example_inputs)
graph.compile_to_module()
scheduler_node = graph.scheduler.nodes[0]
DataTypePropagation.propagate_scheduler_node(scheduler_node)
root_graph = scheduler_node._body.root_block.graph
for node in root_graph.nodes:
if node.op == "placeholder":
self.assertEqual(get_data_type(node), None)
elif node.target in boolean_ops():
self.assertEqual(get_data_type(node), torch.bool)
elif node.target in (
"constant",
"to_dtype",
"index_expr",
):
self.assertEqual(get_data_type(node), node.args[-1])
elif node.target in (
"get_index",
"index_expr",
):
self.assertEqual(get_data_type(node), torch.int64)
elif node.target in (
"load",
"store",
):
self.assertEqual(
get_data_type(node), V.graph.get_dtype(node.args[1])
)
elif node.target == "reduction":
_, _, dtype, _, _, _, _ = node.args
self.assertEqual(get_data_type(node), dtype)
elif node.target.startswith("masked_subblock"):
"""
masked_subblocks:
opcode name target args kwargs
----------- --------- --------- -------------------------- --------
placeholder ops ops () {}
call_module get_index get_index ('index2',) {}
call_method load load (ops, 'arg0_1', get_index) {}
call_method to_dtype to_dtype (ops, load, torch.float32) {}
output output output (to_dtype,) {}
"""
self.assertEqual(get_data_type(node), torch.float)
elif node.target == "and_":
"""
and_'s input is boolean_ops:
----------- --------- --------- -------------------------- --------
call_method and__22 and_ (ops, ge_15, lt_15)
----------- --------- --------- -------------------------- --------
"""
self.assertEqual(get_data_type(node), torch.bool)
elif node.target == "maximum":
"""
maximum's input is maximum or masked_subblock:
----------- --------- --------- -------------------------- --------
call_method maximum_6 maximum (ops, masked_subblock8, maximum_5)
----------- --------- --------- -------------------------- --------
"""
self.assertEqual(get_data_type(node), torch.float)
elif node.target == "output":
self.assertEqual(get_data_type(node), torch.bfloat16)
# Calling div only torch.SymInt arguments is not yet supported.
# To support this behavior, we need to allow const-propping tensors that store symint data.
# For now, dynamo will explicitly graph break when it encounters user code with this behavior.
@expectedFailureCodegenDynamic
@xfailIfS390X
@skip_if_gpu_halide # accuracy error
def test_AllenaiLongformerBase_repro(self):
def fn(query, scores, window_overlap):
batch_size, seq_len, num_heads, _ = query.size()
chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1
diagonal_attention_scores = scores.new_zeros(
(
batch_size * num_heads,
chunks_count + 1,
window_overlap,
window_overlap * 2 + 1,
)
)
diagonal_attention_scores[:, :-1, :, window_overlap:] = scores[
:, :, :window_overlap, : window_overlap + 1
]
input_tensor = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
beginning_input = input_tensor[:, :window_overlap, :, : window_overlap + 1]
input_tensor[:, :window_overlap, :, : window_overlap + 1] = torch.full_like(
beginning_input, -float("inf")
)
return input_tensor
args = [
((4, 1024, 12, 64), (768, 3072, 64, 1)),
((48, 3, 512, 513), (787968, 262656, 513, 1)),
]
args = [rand_strided(sh, st) for (sh, st) in args]
args.append(256)
if is_cpp_backend(self.device):
opt_fn = torch.compile(fn, backend="inductor")
_, code = run_and_get_cpp_code(opt_fn, *args)
num = (
2
if cpu_vec_isa.valid_vec_isa_list()
and os.getenv("ATEN_CPU_CAPABILITY") != "default"
else 1
)
FileCheck().check_count(
"static_cast<int64_t>(256)",
num,
exactly=True,
).run(code)
self.common(fn, args)
def test_cumsum_pattern_matcher_issue(self):
def fn(input_ids) -> torch.Tensor:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size, seq_length = input_shape
past_key_values_length = 0
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(
batch_size, mask_seq_length, device=input_ids.device
)
attention_mask = attention_mask.long()
return torch.cumsum(attention_mask, dim=1)
x = torch.randn(2, 2)
self.common(fn, (x,), atol=0, rtol=0)
@staticmethod
def _check_resize_common(
self, fn, x, size_or_y, memory_format, inplace, deterministic
):
x = x.to(self.device)
x_ref_arg = x.clone()
x_opt_arg = x.clone()
x_numel = x.numel()
torch._dynamo.reset_code_caches()
opt_fn = torch._dynamo.optimize_assert(compile_fx)(fn)
correct = fn(x_ref_arg, size_or_y, memory_format)
actual = opt_fn(x_opt_arg, size_or_y, memory_format)
def get_numel(size_or_y):
if isinstance(size_or_y, torch.Tensor):
return size_or_y.numel()
else:
# assume shape
return functools.reduce(lambda x, y: x * y, size_or_y, 1)
if deterministic:
nele_check = correct.numel()
else:
nele_check = min(x_numel, get_numel(size_or_y))
correct_values = correct.as_strided((nele_check,), (1,))
actual_values = actual.as_strided((nele_check,), (1,))
self.assertTrue(same(correct_values, actual_values, equal_nan=deterministic))
correct_strides = correct.stride()
actual_strides = actual.stride()
self.assertEqual(correct_strides, actual_strides)
@staticmethod
def _cases_resize_common():
sizes = [
((2,), (1, 3, 2, 3)),
((100,), (1, 3, 2, 3)),
((1, 3, 2, 3), (1, 3, 2, 3)),
((2,), (1, 3, 2, 3, 1)),
((100,), (1, 3, 2, 3, 1)),
((1, 3, 2, 3, 1), (1, 3, 2, 3, 1)),
((2, 0, 1), (2, 2)),
]
for x_size, y_size in sizes:
memory_formats = [torch.contiguous_format]
if len(y_size) == 4:
memory_formats.append(torch.channels_last)
if len(y_size) == 5:
memory_formats.append(torch.channels_last_3d)
for memory_format in memory_formats:
x = torch.randn(*x_size)
yield x, y_size, memory_format
# check some non-contiguous tensors
if x.numel() == 100:
x_strided = x[::2].reshape(25, 2).transpose(0, 1)
yield x_strided, y_size, memory_format
def test_resize(self):
def fn(x, size, memory_format):
# NOTE: Tensor.resize() =/= aten::resize()
return torch.ops.aten.resize(x, size, memory_format=memory_format)
for deterministic in [True, False]:
with DeterministicGuard(
deterministic, fill_uninitialized_memory=deterministic
):
for x, y_size, memory_format in CommonTemplate._cases_resize_common():
CommonTemplate._check_resize_common(
self,
fn,
x,
y_size,
memory_format,
inplace=False,
deterministic=deterministic,
)
@staticmethod
def _cases_resize_as_common():
for x, y_size, memory_format in CommonTemplate._cases_resize_common():
# each sizes /memory_format combination tested in 2 ways:
# 1. y is contiguous fn gets memory_format kwargs
# 2. y has memory_format contiguity and fn gets preserve kwarg
# 3. y has some other strides (not contiguous or channels last) and fn gets preserve
yield x, torch.randn(*y_size), memory_format
yield (
x,
torch.randn(*y_size).contiguous(memory_format=memory_format),
torch.preserve_format,
)
yield (
x,
torch.randn(*y_size).permute(tuple(reversed(range(len(y_size))))),
torch.preserve_format,
)
def test_resize_as(self):
def fn(x, y, memory_format):
return torch.ops.aten.resize_as(x, y, memory_format=memory_format)
for deterministic in [True, False]:
with DeterministicGuard(
deterministic, fill_uninitialized_memory=deterministic
):
for x, y, memory_format in CommonTemplate._cases_resize_as_common():
CommonTemplate._check_resize_common(
self,
fn,
x,
y,
memory_format,
inplace=False,
deterministic=deterministic,
)
def test_inplace_resize_as(self):
def fn(x, y):
x.resize_as_(y)
return x
x = torch.randn(2, 3)
y = torch.randn(200, 300)
x_clone = x.clone()
opt_fn = torch.compile(fn, backend="inductor")
same(fn(x, y), opt_fn(x_clone, y))
@xfail_if_triton_cpu
def test_erfc(self):
def fn(x):
return torch.erfc(x)
self.common(fn, (torch.randn(8, 8),))
@skip_if_halide # erfinv not implemented
@xfail_if_triton_cpu
def test_erfinv(self):
def fn(x):
return torch.erfinv(x)
# domain for erfinv is (-1, 1)
x = torch.empty(8, 8).uniform_(-1, 1)
self.common(fn, (x,))
def test_uint(self):
def fn(z):
x = torch.tensor(5, device=z.device, dtype=torch.uint8)
y = torch.neg(x)
return x < y
self.common(fn, (torch.randn(26),))
def test_scaled_dot_product_attention(self):
if self.device == "cuda" and not PLATFORM_SUPPORTS_FLASH_ATTENTION:
raise unittest.SkipTest("Can't run flash attention on this platform")
def fn(q, k, v):
return torch.nn.functional.scaled_dot_product_attention(
q.transpose(1, 2).contiguous(),
k.transpose(1, 2),
v.transpose(1, 2),
scale=0.125,
)[:2]
self.common(
fn,
(
torch.randn(4, 2, 4, 2),
torch.randn(4, 2, 4, 2),
torch.randn(4, 2, 4, 2),
),
atol=2e-4, # to pass lowp check on GPU
rtol=1e-2, # to pass lowp check on GPU
)
@xfail_if_mps_unimplemented
@expectedFailureXPU
@unittest.skipIf(
not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Some archs don't support mem eff SDPA"
)
def test_scaled_dot_product_efficient_attention(self):
if self.device == "cpu":
raise unittest.SkipTest(f"requires {GPU_TYPE}")
# The first two values should be the same, attention output
# and logsumexp since dropout is not being set
def fn(q, k, v, attn_bias, compute_log_sumexp):
return aten._scaled_dot_product_efficient_attention(
q, k, v, attn_bias, compute_log_sumexp
)[:2]
self.common(
fn,
(
torch.randn(4, 4, 36, 36),
torch.randn(4, 4, 36, 36),
torch.randn(4, 4, 36, 36),
torch.randn(4, 4, 36, 36),
False,
),
check_lowp=False,
)
def test_fft_real_input(self):
def fn(x):
return torch.fft.fftn(x)
self.common(fn, (torch.randn((16, 16, 16)),), check_lowp=False)
def test_fft_real_input_real_output(self):
def fn(x):
return torch.fft.fftn(x).real
self.common(fn, (torch.randn((16, 16, 16)),), check_lowp=False)
def test_searchsorted(self):
def fn(sorted_sequence, values, out_int32, right, side, sorter):
return torch.searchsorted(
sorted_sequence,
values,
out_int32=out_int32,
right=right,
side=side,
sorter=sorter,
)
shapes = (
((1,), (16, 16)), # scalar sorted_sequence
((16,), ()), # scalar values
((32,), (16, 16)), # 1-D sorted_sequence
((16, 32), (16, 16)), # N-D sorted_sequence
((3, 5), (3, 7)), # prime dimensioned sequence, to flush out indexing bugs
)
booleans = (False, True)
for (seq_shape, value_shape), out_int32, right in itertools.product(
shapes, booleans, booleans
):
unsorted_sequence = torch.rand(seq_shape)
sorted_sequence, sorting_indices = torch.sort(unsorted_sequence)
values = torch.rand(value_shape)
side = "right" if right else "left"
self.common(
fn,
(sorted_sequence, values, out_int32, right, side, None),
check_lowp=False,
)
self.common(
fn,
(
unsorted_sequence,
values,
out_int32,
right,
side,
sorting_indices,
),
check_lowp=False,
)
@requires_gpu()
@skip_if_gpu_halide
@skip_if_not_triton
def test_searchsorted_broadcast(self):
def fn(sorted_sequence, values):
return (
torch.searchsorted(
sorted_sequence,
values,
)
.unsqueeze(-1)
.expand(-1, 64)
.contiguous()
)
unsorted_sequence = torch.rand((32,))
sorted_sequence, sorting_indices = torch.sort(unsorted_sequence)
values = torch.rand((64,))
self.common(fn, (sorted_sequence, values), check_lowp=False)
cfn = torch.compile(fn)
_, code = run_and_get_code(
cfn, sorted_sequence.to(GPU_TYPE), values.to(GPU_TYPE)
)
# make sure that we did not fuse the broadcast and the bucketize,
# because bucketize is computationally expensive.
FileCheck().check("def triton").check("def triton").run(code[0])
@parametrize("nd_tiling", (False, True))
def test_bucketize(self, nd_tiling: bool):
def fn(input, boundaries, out_int32, right):
return torch.bucketize(input, boundaries, out_int32=out_int32, right=right)
input = torch.rand((64, 64)) * 2 - 1
boundaries = torch.tensor([-0.9, -0.8, 0.1, 0.2, 0.5, 0.9])
for out_int32 in [True, False]:
for right in [True, False]:
out_int32 = True
right = False
with config.patch("triton.prefer_nd_tiling", nd_tiling):
self.common(
fn, (input, boundaries, out_int32, right), check_lowp=False
)
def test_bucketize_default_kwargs(self):
def fn(input, offsets):
return torch.bucketize(input, offsets)
input = torch.tensor(
[-1.0, -0.9, -0.8, -0.5, 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.9, 0.91]
)
offsets = torch.tensor([-0.9, -0.8, 0.1, 0.2, 0.5, 0.9])
self.common(fn, (input, offsets), check_lowp=False)
@parametrize(
"dtype_input, dtype_boundaries",
list(itertools.product(test_int_dtypes, test_int_dtypes)),
)
def test_bucketize_int(
self, dtype_input: torch.dtype, dtype_boundaries: torch.dtype
):
def fn(input, offsets, out_int32, right):
return torch.bucketize(input, offsets, out_int32=out_int32, right=right)
input = torch.randint(-(2**10), 2**10, (64, 64)).to(dtype_input)
offsets = (torch.arange(10, dtype=torch.int32) ** 2 - 512).to(dtype_boundaries)
for out_int32 in [True, False]:
for right in [True, False]:
self.common(fn, (input, offsets, out_int32, right), check_lowp=False)
@patch.object(config.triton, "autotune_pointwise", True)
def test_bucketize_add_autotune(self):
# Causes a @pointwise(size_hints) where size_hints is 2D
def fn(input, offsets, add_value):
return torch.bucketize(input, offsets) + add_value
input = torch.rand((16, 16, 64, 64))
boundaries = torch.tensor([-0.9, -0.8, 0.1, 0.2, 0.5, 0.9])
add_value = torch.randint(0, 1024, (16, 16, 64, 64)).to(
memory_format=torch.channels_last
)
self.common(fn, (input, boundaries, add_value), check_lowp=False)
assertGeneratedKernelCountEqual(self, 1)
def test_bucketize_computed_offsets(self):
def fn(inp, offsets):
return torch.bucketize(inp, offsets + 0.01)
inp = torch.tensor(
[-1.0, -0.9, -0.8, -0.5, 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.9, 0.91]
)
offsets = torch.tensor([-0.9, -0.8, 0.1, 0.2, 0.5, 0.9]) - 0.01
self.common(fn, (inp, offsets), check_lowp=False)
@requires_gpu()
@skip_if_gpu_halide
@skip_if_not_triton
def test_bucketize_broadcast(self):
def fn(input, boundaries):
return (
torch.bucketize(input, boundaries)
.unsqueeze(-1)
.expand(-1, -1, 64)
.contiguous()
)
inp = torch.rand((64, 64)) * 2 - 1
boundaries = torch.tensor([-0.9, -0.8, 0.1, 0.2, 0.5, 0.9])
self.common(fn, (inp, boundaries), check_lowp=False)
cfn = torch.compile(fn)
_, code = run_and_get_code(cfn, inp.to(GPU_TYPE), boundaries.to(GPU_TYPE))
# make sure that we did not fuse the broadcast and the bucketize,
# because bucketize is computationally expensive.
FileCheck().check("def triton").check("def triton").run(code[0])
@requires_gpu()
@config.patch(assume_aligned_inputs=False)
def test_config_option_dont_assume_alignment(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return x.sin() + x.cos()
# Inductor specializes on the (unguarded) alignment of the initial input.
# Make sure that for different configurations, nothing breaks.
for offset in (0, 1, 2, 3, 4):
base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=self.device)
inp = torch.as_strided(base, (64, 64), (64, 1), offset)
torch._dynamo.reset()
fn_c = torch.compile(fn)
ref = fn(inp)
res = fn_c(inp)
self.assertEqual(ref, res)
for offset2 in (0, 1, 2, 3, 4):
base2 = torch.randn(
64 * 64 + 64, dtype=torch.float32, device=self.device
)
inp2 = torch.as_strided(base2, (64, 64), (64, 1), offset2)
ref2 = fn(inp2)
res2 = fn_c(inp2)
self.assertEqual(ref2, res2, atol=1e-5, rtol=1e-5)
@requires_gpu()
@config.patch(assume_aligned_inputs=False)
def test_config_option_dont_assume_alignment_recompiles(self):
# Inputs:
# 1. (32, 32) shape
# 2. (64, 64) shape -> causes a recompile
# 3. (64, 64) shape with different storage offset -> should NOT cause a recompile
failed_guards = []
def fail(guard):
nonlocal failed_guards
failed_guards.append(guard)
def fn(x: torch.Tensor) -> torch.Tensor:
return x.sin() + x.cos()
base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=self.device)
inp1 = torch.as_strided(base, (32, 32), (32, 1), 4)
inp2 = torch.as_strided(base, (64, 64), (64, 1), 4)
inp3 = torch.as_strided(base, (64, 64), (64, 1), 5)
torch._dynamo.reset()
fn_c = torch._dynamo.optimize("inductor", guard_fail_fn=fail)(fn)
ref1 = fn(inp1)
res1 = fn_c(inp1)
self.assertEqual(ref1, res1)
self.assertEqual(0, len(failed_guards))
ref2 = fn(inp2)
res2 = fn_c(inp2)
self.assertEqual(ref2, res2)
# if dynamic shapes isn't already turned on, we might have a guard failure as we turn
# on dynamic shapes
self.assertLessEqual(len(failed_guards), 1)
failed_guard_count_iteration_2 = len(failed_guards)
failed_guards = []
ref3 = fn(inp3)
res3 = fn_c(inp3)
self.assertEqual(ref3, res3)
# we might still have the dynamics shapes failure, but offset change shouldn't be guarded on
# see Note: [Input Alignment handling in Inductor]
self.assertLessEqual(len(failed_guards), failed_guard_count_iteration_2)
@requires_gpu()
@config.patch(assume_aligned_inputs=False)
def test_config_option_dont_assume_alignment_cudagraphs(self):
def fn(x):
return x.cos() * x.sin()
fn_c = torch.compile(fn, mode="reduce-overhead", dynamic=True)
for size, stride, offset in (
((32, 32), (32, 1), 4),
((48, 48), (48, 1), 4),
((64, 64), (64, 1), 5),
):
torch.manual_seed(42)
base = torch.randn(64 * 64 + 64, dtype=torch.float32, device=self.device)
torch.manual_seed(42)
base_ref = torch.randn(
64 * 64 + 64, dtype=torch.float32, device=self.device
)
inp = torch.as_strided(base, size, stride, offset)
inp_ref = torch.as_strided(base_ref, size, stride, offset)
inp.requires_grad_(True)
inp_ref.requires_grad_(True)
res = fn_c(inp)
ref = fn(inp_ref)
self.assertEqual(ref, res)
res.sum().backward()
ref.sum().backward()
self.assertEqual(base.grad, base_ref.grad)
@config.patch(implicit_fallbacks=True)
def test_custom_op_1(self):
import torch.library
def foo(x):
return 3 * x
def foo_meta(x):
return torch.empty_like(x)
define_custom_op_for_test("foo", foo, foo_meta)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.test.foo(a)
c = torch.cos(b)
return c
self.common(fn, (torch.randn((16, 32)),), check_lowp=False)
@config.patch(implicit_fallbacks=True)
def test_custom_op_2(self):
import torch.library
def foo(x, scale: float):
return scale * x, torch.cos(x)
def foo_meta(x, scale: float):
return torch.empty_like(x), torch.empty_like(x)
define_custom_op_2_for_test("foo2", foo, foo_meta)
def fn(x, scale: float):
a = torch.nn.functional.relu(x)
return torch.ops.test.foo2(a, scale)
self.common(fn, (torch.randn((16, 32)), 2.0), check_lowp=False)
@config.patch(implicit_fallbacks=True)
def test_custom_op_3(self):
def foo(x):
result = torch.zeros_like(x[0])
for t in x:
result += t
return result
def foo_meta(x):
return torch.empty_like(x[0])
define_custom_op_3_for_test("foo3", foo, foo_meta)
def fn(x):
return torch.ops.test.foo3(x)
self.common(
fn,
([torch.randn((16, 32)), torch.randn((16, 32)), torch.randn((16, 32))],),
check_lowp=False,
)
@requires_gpu()
@skip_if_not_triton
@skip_if_cpp_wrapper("skip cpp_wrapper tests")
@config.patch(implicit_fallbacks=True)
def test_generated_code_has_size_stride_assert(self):
def foo(x):
return 3 * x
def foo_meta(x):
return torch.empty_like(x)
define_custom_op_for_test("foo", foo, foo_meta)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.test.foo(a)
return b
a = torch.randn((16, 32), device=self.device)
_, code = run_and_get_code(
torch.compile(fn),
a,
)
if not is_dynamic_shape_enabled():
if code and len(code) > 0 and "assert_size_stride(" in code[0]:
try:
FileCheck().check_regex(
r"assert_size_stride\s*\(\s*[^,]+,\s*\([^\)]*\),\s*\([^\)]*\),\s*'[^']+'\s*\)"
).run(code[0])
except Exception as e:
print(f"Failed regex match for assert_size_stride: {e}")
print(code[0])
raise e
else:
print("Skipping: No assert_size_stride found.")
@requires_gpu()
@skip_if_not_triton
@skip_if_cpp_wrapper("skip cpp_wrapper tests")
@config.patch(implicit_fallbacks=True)
def test_generated_code_has_alignment_assert(self):
def foo(x):
return 3 * x
def foo_meta(x):
return torch.empty_like(x)
define_custom_op_for_test("foo", foo, foo_meta)
def fn(x):
a = torch.nn.functional.relu(x)
b = torch.ops.test.foo(a)
return b
a = torch.randn((16, 32), device=self.device)
_, code = run_and_get_code(
torch.compile(fn),
a,
)
if not is_dynamic_shape_enabled():
if code and len(code) > 0 and "assert_alignment(" in code[0]:
try:
FileCheck().check_regex(
r"assert_alignment\s*\(\s*[^,]+,\s*[^,]+,\s*'[^']+'\s*\)"
).run(code[0])
except Exception as e:
print(f"Failed regex match for assert_alignment: {e}")
print(code[0])
raise e
else:
print("Skipping: No assert_alignment found.")
def test_assert_size_stride_op_name_pass(self):
tensor = torch.empty((16, 32))
assert_size_stride(tensor, (16, 32), (32, 1), "torch.ops.dummy.op_name")
def test_assert_size_stride_op_name_fail(self):
tensor = torch.empty((16, 32))
with self.assertRaisesRegex(AssertionError, "torch.ops.dummy.op_name"):
assert_size_stride(tensor, (32, 64), (32, 1), "torch.ops.dummy.op_name")
def test_assert_alignment_op_name_pass(self):
tensor = torch.empty((16, 32))
assert_alignment(tensor, 16, "torch.ops.dummy.op_name")
def test_assert_alignment_op_name_fail(self):
tensor = torch.empty((16, 32))
with self.assertRaisesRegex(AssertionError, "torch.ops.dummy.op_name"):
assert_alignment(tensor, 0, "torch.ops.dummy.op_name")
@torch._dynamo.config.patch(capture_dynamic_output_shape_ops=True)
@torch._inductor.config.patch(implicit_fallbacks=True)
def test_custom_op_unbacked_symints(self):
@torch.library.custom_op("test_unbacked_symints::foo", mutates_args={})
def foo(x: torch.Tensor) -> torch.Tensor:
return x.clone()
@foo.register_fake
def _(x):
u0 = torch.library.get_ctx().new_dynamic_size()
u1 = torch.library.get_ctx().new_dynamic_size()
u2 = torch.library.get_ctx().new_dynamic_size()
return x.new_empty(u0, u1, u2)
@torch.library.custom_op("test_unbacked_symints::bar", mutates_args={})
def bar(x: torch.Tensor) -> torch.Tensor:
return x.clone()
@bar.register_fake
def _(x):
return torch.empty_like(x)
x = torch.randn(2, 3, 4)
@torch.compile(fullgraph=True)
def f(x):
y = foo(x)
z = bar(y)
return z
# No error
f(x)
@requires_gpu()
@torch._inductor.config.patch("layout_optimization", True)
@torch._inductor.config.patch("keep_output_stride", False)
@config.patch(implicit_fallbacks=True)
@tf32_on_and_off(0.005)
def test_custom_op_fixed_layout_sequential(self):
import torch.library
mod = nn.Conv2d(3, 128, 1, stride=1, bias=False).to(device=self.device)
inp = torch.rand(2, 3, 128, 128, device=self.device)
expected_stride = mod(inp).stride()
def bar(x):
self.assertEqual(x.stride(), expected_stride)
return x.clone()
def bar_meta(x):
return torch.empty_like(x)
define_custom_op_for_test(
"bar",
bar,
bar_meta,
tags=[torch._C.Tag.needs_fixed_stride_order],
)
def fn(x):
z = mod(x)
output = torch.ops.test.bar(z)
return output
with torch.no_grad():
# With keep_output_stride False, inductor would normally have different layout from eager execution
# But because our custom op needs fixed layout, the assertions in the custom op will pass
self.common(fn, (inp,), check_lowp=False)
@requires_gpu()
@config.patch(implicit_fallbacks=True)
@skip_if_cpp_wrapper(
"Without major redesign, cpp_wrapper will not support custom ops that are "
"defined in Python."
)
@tf32_on_and_off(0.005)
def test_mutable_custom_op_fixed_layout2(self):
with torch.library._scoped_library("mylib", "DEF") as lib:
mod = nn.Conv2d(3, 128, 1, stride=1, bias=False).to(device=self.device)
inp = torch.rand(2, 3, 128, 128, device=self.device)
expected_stride = mod(inp).clone().stride()
lib.define(
"bar(Tensor x, bool is_compiling) -> Tensor",
tags=torch.Tag.flexible_layout,
)
bar_strides = []
@torch.library.impl(lib, "bar", "CompositeExplicitAutograd")
def _(x, is_compiling):
if is_compiling:
bar_strides.append(x.stride())
result = x.clone()
assert x.stride() == result.stride()
return result
@torch.library.impl(lib, "bar", "Meta")
def _(x, is_compiling):
return x.clone()
lib.define(
"add_one(Tensor(a!) x) -> ()",
tags=torch.Tag.needs_fixed_stride_order,
)
@torch.library.impl(lib, "add_one", "CompositeExplicitAutograd")
def _(x):
self.assertEqual(x.stride(), expected_stride)
x.copy_(x + 1)
def fn(x):
# Inductor changes the conv to be channels-last
z = mod(x)
output = torch.ops.mylib.bar(z, torch._dynamo.is_compiling())
torch.ops.mylib.add_one(output)
return output**2
with torch.no_grad():
self.common(fn, (inp,), check_lowp=False)
# Dynamic shapes and rocm invalidate this test case
if torch._dynamo.config.assume_static_by_default and not TEST_WITH_ROCM:
# For this test to be valid, Inductor must have changed the conv
# to be channels-last. If this assertion ever fails then we need
# a new test case.
self.assertEqual(len(bar_strides), 1)
self.assertNotEqual(bar_strides[0], expected_stride)
@config.patch(implicit_fallbacks=True)
@skip_if_cpp_wrapper(
"Without major redesign, cpp_wrapper will not support custom ops that are "
"defined in Python."
)
def test_mutable_custom_op_fixed_layout(self):
with torch.library._scoped_library("mylib", "DEF") as lib:
lib.define(
"copy_(Tensor(a!) dst, Tensor src) -> ()",
tags=torch.Tag.needs_fixed_stride_order,
)
@torch.library.impl(lib, "copy_", "Meta")
def _(dst, src):
return None
@torch.library.impl(lib, "copy_", "CompositeExplicitAutograd")
def _(dst, src):
dst.copy_(src)
def f(x):
full_default_3 = torch.full([3], 7.0, device="cpu")
chunk_cat_default_1 = torch.ops.mylib.copy_.default(full_default_3, x)
mul_out = torch.mul(full_default_3, full_default_3)
return mul_out
x = torch.arange(3, dtype=torch.float, device="cpu")
eager_out = f(x)
compiled_inductor_f = torch.compile(f, backend="inductor", fullgraph=True)
compiled_inductor_out = compiled_inductor_f(x)
self.assertEqual(compiled_inductor_out, eager_out)
@requires_gpu()
@config.patch(implicit_fallbacks=True)
def test_custom_op_fixed_layout_channels_last(self):
class Block(nn.Module):
def __init__(
self,
):
super().__init__()
self.in_layers = nn.Sequential(
nn.Dropout(p=0.1),
)
def helper(self, x):
out = F.gelu(x)
out = self.in_layers(out)
return out
def forward(self, x):
out = self.helper(x)
out = torch.ops.test.baz(out)
return out
model = Block()
model = model.to(GPU_TYPE).to(memory_format=torch.channels_last)
input_t = torch.randn([1, 320, 128, 128], dtype=torch.float32, device=GPU_TYPE)
input_t = input_t.to(memory_format=torch.channels_last)
expected_strides = model.helper(input_t).stride()
def baz(x):
self.assertEqual(expected_strides, x.stride())
return x.clone()
def baz_meta(x):
return torch.empty_like(x)
define_custom_op_for_test(
"baz",
baz,
baz_meta,
tags=[torch._C.Tag.needs_fixed_stride_order],
)
with torch.no_grad():
net = torch.compile(model)
out = net(input_t)
@skip_if_cpp_wrapper(
"Without major redesign, cpp_wrapper will not support custom ops that are "
"defined in Python."
)
@config.patch(implicit_fallbacks=True)
def test_custom_op_default_layout_constraint(self):
with torch.library._scoped_library("mylib", "DEF") as lib:
lib.define(
"copy_(Tensor(a!) dst, Tensor src) -> ()",
# No need to pass in an explicit tag since the default
# behavior for custom op works.
# tags=torch.Tag.needs_fixed_stride_order,
)
@torch.library.impl(lib, "copy_", "Meta")
def _(dst, src):
return None
@torch.library.impl(lib, "copy_", "CompositeExplicitAutograd")
def _(dst, src):
if src.is_contiguous():
dst.copy_(src + 1)
else:
dst.copy_(src)
def f(x):
full_default_3 = torch.full([3, 3], 7.0, device=self.device)
chunk_cat_default_1 = torch.ops.mylib.copy_.default(full_default_3, x)
mul_out = torch.mul(full_default_3, full_default_3)
return mul_out
x = (
torch.arange(9, dtype=torch.float, device=self.device)
.view(3, 3)
.t()
.contiguous()
.t()
)
eager_out = f(x)
compiled_inductor_f = torch.compile(f, backend="inductor", fullgraph=True)
compiled_inductor_out = compiled_inductor_f(x)
self.assertTrue(torch.allclose(compiled_inductor_out, eager_out))
@skip_if_gpu_halide # cuda error
def test_buffer_use_after_remove(self):
# https://github.com/pytorch/pytorch/issues/102857
def rotvec_to_rotmat(rotvec) -> torch.Tensor:
"""Simplified rotvec to rotmat code from RoMa
(https://github.com/naver/roma/blob/06e4b0cdc1c802a60a012bb19c581d6600c63358/roma/mappings.py#L371)
"""
theta = torch.norm(rotvec, dim=-1)
axis = rotvec / theta[..., None]
kx, ky, kz = axis[:, 0], axis[:, 1], axis[:, 2]
sin_theta = torch.sin(theta)
cos_theta = torch.cos(theta)
one_minus_cos_theta = 1 - cos_theta
xs = kx * sin_theta
ys = ky * sin_theta
zs = kz * sin_theta
xyc = kx * ky * one_minus_cos_theta
xzc = kx * kz * one_minus_cos_theta
yzc = ky * kz * one_minus_cos_theta
xxc = kx**2 * one_minus_cos_theta
yyc = ky**2 * one_minus_cos_theta
zzc = kz**2 * one_minus_cos_theta
R_rodrigues = torch.stack(
[
1 - yyc - zzc,
xyc - zs,
xzc + ys,
xyc + zs,
1 - xxc - zzc,
-xs + yzc,
xzc - ys,
xs + yzc,
1 - xxc - yyc,
],
dim=-1,
).reshape(-1, 3, 3)
R = R_rodrigues
return R
def f(coord, rot, trans):
rot_mat = rotvec_to_rotmat(rot)
coord = torch.einsum("...ij,...bj->...bi", rot_mat, coord) + trans
return coord.sum()
foo_c = torch.compile(f, dynamic=True)
def run(fn):
coord = torch.ones((2, 3), device=self.device)
rot = nn.Parameter(torch.ones((2, 3), device=self.device))
trans = nn.Parameter(torch.ones((2, 3), device=self.device))
U = fn(coord, rot, trans)
U.backward()
return U, rot, trans
U_e, rot_e, trans_e = run(f)
U, rot, trans = run(foo_c)
self.assertEqual(U, U_e)
self.assertEqual(rot.grad, rot_e.grad)
self.assertEqual(trans.grad, trans_e.grad)
# If we serve from the cache, the init hook isn't called
@config.patch({"fx_graph_cache": False, "fx_graph_remote_cache": False})
@skipIfWindows(msg="torch._dynamo.exc.Unsupported")
def test_inner_fn_str_and_stride(self):
def f(x):
x = x + 1
x = test_operators.realize(x)
x = x * 2
x = test_operators.realize(x)
return x
x = torch.rand(3, 2, device=self.device).t()
ref = f(x)
called = False
def hook_fn(scheduler, nodes):
nonlocal called
called = True
if self.device != "cpu":
self.assertEqual(len(nodes), 3)
_, mul_buf, _ = nodes
self.assertTrue(
all(
V.graph.sizevars.size_hints(buf.get_stride()) == (1, 2)
for buf in nodes
)
)
# before the fix, the wrong index expression
# 'i1 + 3 * i0' is cached.
self.assertTrue(
"i0 + 2 * i1" in mul_buf.data.inner_fn_str()
or "i0 + i1 * s64" in mul_buf.data.inner_fn_str()
)
with add_scheduler_init_hook(hook_fn):
actual = torch.compile(f, fullgraph=True)(x)
self.assertEqual(ref, actual)
self.assertTrue(called)
@skip_if_gpu_halide # cuda error
def test_mutations_loop_fusion(self):
def fn(tensor, index, source):
out = tensor.index_add(0, index, source, alpha=2.0) / 2
return out
device = "cpu"
dtype = torch.double if self.device != "mps" else torch.float32
tensor = torch.rand((1,), dtype=dtype, device=device)
index = torch.tensor([0], dtype=torch.long, device=device)
source = torch.rand((1,), dtype=dtype, device=device)
self.common(
fn,
(
tensor,
index,
source,
),
)
@config.patch(
"triton.autotune_pointwise", True
) # needed to introduce config that exceed max shared memory usage
@serialTest()
@largeTensorTest("13GB", inductor=True)
def test_large_block_sizes(self):
"""
Inductor will try triton configs like x = 64 and y = 1024 which will
result in out of shared memory if dtype is fp32.
Currently inductor will skip such bad configs and pick the best one
from the remaining configs.
"""
@torch.compile
def fn(x, y):
return x.t() + y
# Use shape (2**24, 65) rather than (2**24, 128) potentially avoid OOM in
# CI while still keep the same up-rounded size-hints.
a = torch.randn(2**24, 65, device=self.device)
b = torch.randn(65, 2**24, device=self.device)
fn(a, b)
# Skipped on ROCm until https://github.com/ROCm/triton/issues/443 resolved
@slowTest
def test_fuse_large_params(self):
def pt2_optimizer_step(optimizer):
@torch.compile()
def f():
optimizer.step()
f()
params = [
torch.rand(10, 10, dtype=torch.float32, device=self.device)
for _ in range(194)
]
for p in params:
p.grad = torch.rand_like(p)
o = torch.optim.AdamW(params)
pt2_optimizer_step(o)
# Skipped on MPS because avgpool size is not divisible
@xfail_if_mps
@skip_if_gpu_halide
def test_adaptive_avg_pool1d_argmax(self):
# https://github.com/pytorch/pytorch/issues/113013
def fn(x):
x = torch.adaptive_avg_pool1d(input=x, output_size=2)
x = torch.argmax(input=x)
return x
x = torch.rand([4, 4, 3], dtype=torch.float64)
self.common(fn, (x,))
@skipCUDAIf(not SM80OrLater, "uses bfloat16 which requires SM >= 80")
@parametrize(
"dtype_x, dtype_y",
list(itertools.product(test_dtypes, test_dtypes)),
)
def test_dtypeview(self, dtype_x, dtype_y):
if TEST_WITH_ASAN:
return
if is_triton_cpu_backend(self.device):
raise unittest.SkipTest("Compile time crash in Triton CPU CI")
# https://github.com/pytorch/pytorch/issues/126338
def fn(x, y, x_dtype, x2):
x = x.view(x_dtype)
y = y.view(x_dtype) + 1
x2 = x2.view(x_dtype) + 1
return x @ y, x2 @ x
# @ operation needs arguments to be the same dtype
for view_dtype in test_dtypes:
try:
x = rand_strided((2, 2), (2, 1), device=self.device, dtype=dtype_x)
y = rand_strided((2, 2), (2, 1), device=self.device, dtype=dtype_y)
x2 = x.clone()
fn(x, y, view_dtype, x2)
except Exception as e:
continue
self.common(
fn,
(x, y, view_dtype, x2),
reference_in_float=False,
check_lowp=False,
)
def test_dtypeview_fusion(self):
@torch.compile
def fn(x):
x = x + 1
x = torch.ops.aten.view.dtype(x, torch.int16)
x = x * 2
return x
torch._inductor.metrics.generated_kernel_count = 0
x = torch.randn([1024], dtype=torch.float16, device=self.device)
self.common(fn, (x,), reference_in_float=False)
assertGeneratedKernelCountEqual(self, 1)
@expectedFailureCodegenDynamic
def test_reinterpret_dtypeview(self):
@torch.compile
def fn(x, x2):
return x.view([10, 10]).view(torch.int32), x2.view(torch.int32).view(
[10, 10]
)
x = torch.randn([100, 1], device=self.device)
x2 = x.clone()
self.common(fn, (x, x2), reference_in_float=False, check_lowp=False)
# The cpp_wrapper code is significantly more complex, so skip checking for exact
# code lines.
if not config.cpp_wrapper:
x = torch.randn([100, 1], device=self.device)
x2 = x.clone()
_, code = run_and_get_code(fn, x, x2)
FileCheck().check("aten.view.dtype(reinterpret_tensor").run(code[0])
@xfail_if_triton_cpu
@requires_gpu()
def test_scalar_cpu_tensor_arg(self):
def fn(x, y):
return x + y.sum()
test_dtypes = [
torch.float32,
torch.float64,
torch.float16,
torch.bfloat16,
]
for cpu_dtype in test_dtypes:
if not self.is_dtype_supported(cpu_dtype):
continue
x = torch.rand([20], device=self.device)
y = torch.rand([4], device="cpu", dtype=cpu_dtype)
self.common(
fn,
(x, y),
check_lowp=False,
copy_to_gpu=False,
reference_in_float=False,
)
def test_float16_to_int16(self):
def fn(x):
x_view = x.view(dtype=torch.int16)
return x_view.mul(2) + x_view.bitwise_and(2)
x = torch.ones(4, dtype=torch.float16, device=self.device)
ref = fn(x)
actual = torch.compile(fn)(x)
self.assertEqual(ref, actual)
@skipCUDAIf(not SM80OrLater, "uses bfloat16 which requires SM >= 80")
@skip_if_gpu_halide # https://github.com/halide/Halide/issues/8311
def test_bfloat16_to_int16(self):
def fn(a, b):
x = a + b
x_view = x.view(dtype=torch.int16)
return x_view.mul(2) + x_view.bitwise_and(2)
if not self.is_dtype_supported(torch.bfloat16):
raise unittest.SkipTest("bfloat16 is not supported on {self.device}")
a = torch.ones(4, dtype=torch.bfloat16, device=self.device)
b = torch.ones(4, dtype=torch.bfloat16, device=self.device)
ref = fn(a, b)
actual = torch.compile(fn)(a, b)
self.assertEqual(ref, actual)
def test_float32_to_int32(self):
def fn(a, b):
x = a + b
x_view = x.view(dtype=torch.int32)
return x_view.mul(2) + x_view.bitwise_and(2)
a = 0.5 * torch.ones(4, dtype=torch.float32, device=self.device)
b = 0.5 * torch.ones(4, dtype=torch.float32, device=self.device)
ref = fn(a, b)
actual = torch.compile(fn)(a, b)
self.assertEqual(ref, actual)
def test_randint_int64_mod(self):
# This used to not compile due to a wrong return type of randint64_cpu
# See https://github.com/pytorch/pytorch/issues/117435
def fn(n):
return (
torch.randint(
low=-5, high=5, size=(n,), dtype=torch.int64, device=self.device
)
% 10
)
res = torch.compile(fn)(20)
self.assertTrue(torch.all((res >= 0) & (res < 10)).item())
@torch._inductor.config.patch(force_shape_pad=True)
@skip_if_gpu_halide # correctness issue
def test_should_pad_bench_for_bmm(self):
B = 2
M = 1024
N = 1024
K = 1024 + 1 # a size that requires padding
mat1 = torch.rand(B, M, K, device=self.device)
mat2 = torch.rand(B, K, N, device=self.device)
should_pad = pad_mm.should_pad_bench(None, mat1, mat2, torch.ops.aten.bmm)
self.assertTrue(should_pad)
@parametrize(
"name, op",
[
subtest((name, getattr(torch.special, name)), name=name)
for name in torch.special.__all__
if name not in {"softmax", "log_softmax", "logsumexp"}
],
)
def test_pointwise(self, name, op):
dtype = torch.float32
check_lowp = True
if self.device == GPU_TYPE and name in {
"airy_ai",
"bessel_i0",
"bessel_i1",
"bessel_j0",
"bessel_j1",
"bessel_y0",
"bessel_y1",
"erfcx",
"gammainc",
"gammaincc",
"i1",
"i1e",
"modified_bessel_i0",
"modified_bessel_i1",
"modified_bessel_k0",
"modified_bessel_k1",
"ndtri",
"scaled_modified_bessel_k0",
"scaled_modified_bessel_k1",
"spherical_bessel_j0",
"zeta",
"chebyshev_polynomial_t",
"chebyshev_polynomial_v",
"chebyshev_polynomial_u",
"chebyshev_polynomial_w",
"legendre_polynomial_p",
"shifted_chebyshev_polynomial_t",
"shifted_chebyshev_polynomial_u",
"shifted_chebyshev_polynomial_v",
"shifted_chebyshev_polynomial_w",
"hermite_polynomial_h",
"hermite_polynomial_he",
"laguerre_polynomial_l",
}:
# <func>_cuda not implemented for Half
check_lowp = False
if (
is_halide_backend(self.device)
or is_triton_cpu_backend(self.device)
and name
in (
"erfinv",
"airy_ai",
"bessel_j0",
"bessel_j1",
"bessel_y0",
"bessel_y1",
"chebyshev_polynomial_t",
"chebyshev_polynomial_u",
"chebyshev_polynomial_v",
"chebyshev_polynomial_w",
"digamma",
"gammainc",
"gammaincc",
"gammaln",
"hermite_polynomial_h",
"hermite_polynomial_he",
"i0",
"i0e",
"i1",
"i1e",
"laguerre_polynomial_l",
"legendre_polynomial_p",
"modified_bessel_i0",
"modified_bessel_i1",
"modified_bessel_k0",
"modified_bessel_k1",
"multigammaln",
"ndtri",
"polygamma",
"psi",
"scaled_modified_bessel_k0",
"scaled_modified_bessel_k1",
"shifted_chebyshev_polynomial_t",
"shifted_chebyshev_polynomial_u",
"shifted_chebyshev_polynomial_v",
"shifted_chebyshev_polynomial_w",
"spherical_bessel_j0",
"zeta",
)
):
raise unittest.SkipTest(f"Halide & Triton CPU do not support {name}")
if is_triton_cpu_backend(self.device) and name in [
"erfc",
"erfcx",
"round",
"log_ndtr",
]:
raise unittest.SkipTest(f"Triton CPU does not support {name}")
if name in {"gammainc", "gammaincc"}:
args = (
torch.randn(8, 8, dtype=dtype, device=self.device),
torch.empty(8, 8, dtype=dtype, device=self.device).uniform_(1, 2),
)
def fn(x, y):
return op(x, y)
elif name in {"xlog1py", "xlogy", "zeta"}:
args = (
torch.randn(8, 8, dtype=dtype, device=self.device),
torch.empty(8, 8, dtype=dtype, device=self.device).uniform_(1, 2),
)
def fn(x, y):
return op(x, y)
elif name == "multigammaln":
args = (
torch.empty(8, 8, dtype=dtype, device=self.device).uniform_(1, 2),
2,
)
def fn(x, p):
return op(x, p)
elif name == "polygamma":
args = (
1,
torch.empty(8, 8, dtype=dtype, device=self.device).uniform_(1, 10),
)
def fn(n, x):
return op(n, x)
elif "_polynomial_" in name:
args = (
torch.randn(8, 8, dtype=dtype, device=self.device),
2,
)
def fn(x, n):
return op(x, n)
else:
args = (torch.randn(8, 8, dtype=dtype, device=self.device),)
def fn(x):
return op(x)
ctx = (
contextlib.nullcontext()
if self.device != "mps"
or name
not in [
"airy_ai",
"erfcx",
"laguerre_polynomial_l",
"legendre_polynomial_p",
"log_ndtr",
"ndtri",
]
else self.assertRaises(NotImplementedError)
)
with ctx:
self.common(fn, args, check_lowp=check_lowp, atol=1e-4, rtol=1e-4)
# codegen test fails with no dynamic for loop in dynamic shape tests
@expectedFailureCodegenDynamic
def test_view_uint8_through_differing_bitwidths(self):
# https://github.com/pytorch/pytorch/issues/120998
def fn(x, view_dtype):
return x.view(view_dtype).view(torch.uint8)
view_dtypes = [torch.int16, torch.int32, torch.int64]
for dtype in view_dtypes:
x = torch.randint(0, 2**4, [4096, 4096], dtype=torch.uint8)
self.common(
fn,
(
x,
dtype,
),
)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_split_with_sizes_with_unbacked_symints(self):
@torch.compile()
def f(sz, x):
s0, s1 = sz.tolist()
r0, r1 = torch.ops.aten.split_with_sizes.default(x, [s0, s1])
return torch.ops.aten.sort.default(r1)
N = 7312
S0 = 420
S1 = N - S0
result = f(torch.tensor([S0, S1]), torch.randn(N))
self.assertTrue(len(result) == 2)
@torch.compile()
def f2(x):
y = torch.arange(x.item())
return torch.ops.aten.split_with_sizes.default(y, [5, 5, 10])
result = f2(torch.tensor([20]))
self.assertTrue(len(result) == 3)
@torch._dynamo.config.patch(capture_scalar_outputs=True)
def test_split_with_unbacked_symints(self):
# https://github.com/pytorch/pytorch/issues/122937
@torch.compile()
def f(x):
y = torch.arange(x.item())
return torch.split(y, [5, 5, 10])
result = f(torch.tensor([20]))
self.assertTrue(len(result) == 3)
def test_complex_memory_overlap(self):
t = rand_strided((8, 1500, 1), (1504, 1, 1), device=self.device)
self.assertFalse(complex_memory_overlap(t))
@xfail_if_mps
def test_generate_rand_fp8(self):
"""
PyTorch can not generate fp8 tensors with a normal distribution because of
missing needed kernels.
We work around that in rand_strided by generating an fp16 tensor first and
then do casting.
"""
t = rand_strided((2, 3), (3, 1), device=self.device, dtype=torch.float8_e4m3fn)
self.assertTrue(t.dtype is torch.float8_e4m3fn)
@largeTensorTest("1GB", inductor=True)
@parametrize(
"use_block_ptr",
[subtest(False), subtest(True, decorators=[skip_if_not_triton])],
)
def test_large_grid(self, use_block_ptr):
# https://github.com/pytorch/pytorch/issues/123210
def fn(primals_5):
view = torch.ops.aten.reshape.default(primals_5, [-1, 2, 4])
primals_5 = None
permute = torch.ops.aten.permute.default(view, [0, 2, 1])
clone = torch.ops.aten.clone.default(
permute, memory_format=torch.contiguous_format
)
return clone
s0 = 16777472
s1 = 8
with config.patch({"triton.use_block_ptr": use_block_ptr}):
compiled_fn = torch.compile(fn)
actual = compiled_fn(torch.ones(s0, s1, device=self.device))
self.assertTrue((actual == 1).all())
@skip_if_gpu_halide
def test_pattern_matcher_multi_user(self):
# Reproducer for https://github.com/pytorch/pytorch/issues/129685
def forward(float_1, view_1):
logits = float_1 / 64.0
loss = torch.nn.functional.cross_entropy(logits, view_1, ignore_index=5)
logsumexp = logits.logsumexp(dim=-1)
return [loss, logsumexp]
a = torch.randn(512, 4096, requires_grad=True)
b = torch.randint(size=(512,), low=0, high=4095)
self.common(forward, (a, b))
def test_isin_tensor_scalar(self):
for invert in [True, False]:
torch._dynamo.reset()
elements = 1
test_elements = torch.tensor([1, 2, 3, 4])
self.common(torch.isin, (elements, test_elements), {"invert": invert})
torch._dynamo.reset()
elements = torch.tensor([1, 2, 3, 4])
test_elements = 1
self.common(torch.isin, (elements, test_elements), {"invert": invert})
def test_mul_index_expr(self):
# Minified repro from https://github.com/pytorch/pytorch/issues/111884
def forward():
iota = torch.ops.prims.iota.default(
16,
start=0,
step=1,
dtype=torch.int64,
device=self.device,
requires_grad=False,
)
unsqueeze = torch.ops.aten.unsqueeze.default(iota, -1)
mul = torch.ops.aten.mul.Tensor(unsqueeze, iota)
unsqueeze = iota = None
neg = torch.ops.aten.neg.default(mul)
mul = None
div = torch.ops.aten.div.Tensor(neg, 16)
neg = None
return (div,)
self.common(forward, ())
def test_flip_cat(self):
def forward(unsqueeze, unsqueeze_1):
cat_1 = torch.ops.aten.cat.default([unsqueeze, unsqueeze_1], 1)
view = torch.ops.aten.view.default(cat_1, [4])
slice_5 = torch.ops.aten.slice.Tensor(view, 0, 0, 3)
rev_1 = torch.ops.aten.flip.default(slice_5, [0])
return (rev_1,)
a = torch.randn(2, 1, requires_grad=True)
b = torch.randn(2, 1, requires_grad=True)
self.common(forward, (a, b))
@config.patch(implicit_fallbacks=True)
def test_weight_norm_bwd(self):
"""
Weight norm backward eager kernel does not support non-contiguous
inputs. Eager kernel silently produces incorrect results when
inputs are non-contiguous. Inductor implicitly fallback to eager
for weight norm backward. Fix that by requiring contiguous inputs
for any implicit fallback kernels.
Check: https://github.com/pytorch/pytorch/issues/140452
"""
class Repro(nn.Module):
def __init__(self, in_features):
super().__init__()
self.weight_normed_linear = nn.utils.parametrizations.weight_norm(
nn.Linear(in_features, out_features=2)
)
self.linear = nn.Linear(in_features=2, out_features=1)
def forward(self, x):
return self.linear(self.weight_normed_linear(x))
def f(m, x):
with torch.amp.autocast(device_type=self.device, dtype=torch.half):
loss = m(x).sum()
loss.backward()
return loss
# odd number on purpose to trigger comprehensive padding
in_features = 1025
x = torch.randn(2, in_features, dtype=torch.half, requires_grad=True).to(
device=self.device
)
m = Repro(in_features)
m = m.to(self.device)
f(m, x)
ref_grad_list = [p.grad for p in m.parameters()]
for p in m.parameters():
p.grad = None
opt_f = torch.compile(f)
opt_f(m, x)
act_grad_list = [p.grad for p in m.parameters()]
self.assertTrue(
same(ref_grad_list, act_grad_list, tol=1e-3),
f"Ref:\n{ref_grad_list}\nAct:\n{act_grad_list}",
)
def test_chunk_recompiles(self):
def f(x):
return x.chunk(4)
# Runs f and its torch.compile-d version with a fresh 1D tensor
# of a specific size, and checks that the result is correct.
def run(size):
input = torch.randn(size)
expected_out = f(input)
actual_out = optf(input)
self.assertEqual(expected_out, actual_out)
cnts = CompileCounterWithBackend("inductor")
optf = torch.compile(f, backend=cnts, fullgraph=True)
# The first run should compile once with static shapes.
run(4)
self.assertEqual(cnts.frame_count, 1)
# Varying the input size should trigger a recompilation.
# Since the input size is a multiple of 4 (i.e. all runs shall
# generate 4 output tensors), there should be no further
# recompilation.
for i in range(2, 12):
run(4 * i)
self.assertEqual(cnts.frame_count, 2)
# Input size: 11
# Not a multiple of 4, but still generates 4 output tensors,
# where the last one has size > 1.
run(11)
self.assertEqual(cnts.frame_count, 2)
# Input size: 10
# Even though it still generates 4 output tensors, the last
# one has size 1, falling into our 0/1 specialization. Thus,
# this one also triggers recompilation.
run(10)
self.assertEqual(cnts.frame_count, 3)
# Input size: 9
# Yields one less output tensor, which should trigger a
# recompilation.
run(9)
self.assertEqual(cnts.frame_count, 4)
@dynamo_config.patch(error_on_recompile=True)
def test_no_specization_over_symbolic_value(self):
def fn(x):
s0 = x.shape[0]
y = torch.full((1,), s0)
return x + y
arg1 = torch.ones(10)
arg2 = torch.ones(11)
ref1 = fn(arg1)
ref2 = fn(arg2)
opt_fn = torch.compile(fn, fullgraph=True, dynamic=True, backend="inductor")
res1 = opt_fn(arg1)
res2 = opt_fn(arg2)
self.assertEqual(res1, ref1)
self.assertEqual(res2, ref2)
def test_conv_shape_check(self):
# https://github.com/pytorch/pytorch/issues/144013
class Model(torch.nn.Module):
def __init__(self, dim):
super().__init__()
conv_t_cls = eval(f"torch.nn.ConvTranspose{dim}d")
self.conv_t = conv_t_cls(
1, 1, kernel_size=(2,) * dim, padding=(1,) * dim
)
def forward(self, x):
x = self.conv_t(x)
x = torch.sigmoid(x) # trigger condition
return x
for dim in (1, 2, 3):
inputs = torch.randn((1,) * (dim + 2))
model = Model(dim)
with self.assertRaisesRegex(RuntimeError, "Output size is too small"):
_ = model(inputs)
with self.assertRaisesRegex(RuntimeError, "Output size is too small"):
_ = torch.compile(model)(inputs)
@requires_gpu()
@config.patch(fallback_random=True)
@unittest.skipIf(
config.cpp_wrapper,
"cpp wrapper does not support sort properly: https://gist.github.com/shunting314/e58f637f9972f1ad1a033d73cee6e42a",
)
def test_mix_device_index(self):
"""
A tiny repro for this meta internal issue: https://fb.workplace.com/groups/1075192433118967/posts/1567334737238065
whose root cause is Inductor having wrong assumption of index.Tensor's output
stride.
"""
image_latent = (
torch.randn((24, 16, 32, 32), device=GPU_TYPE)
.to(memory_format=torch.channels_last)
.view(2, 12, 16, 32, 32)
)
def f(image_latent):
indices = torch.argsort(torch.rand(2, 12), dim=-1)
tar_latent = image_latent[torch.arange(2).unsqueeze(-1), indices[:, :3]]
# The original model uses einops. In this unit test, we use view op directly
# to avoid importing einops
# tar_latent_rearranged = einops.rearrange(
# tar_latent, "b n c h w -> (b n) c h w"
# )
tar_latent_rearranged = tar_latent.view(-1, *tar_latent.size()[2:])
return tar_latent_rearranged
reset_rng_state()
ref = f(image_latent)
opt_f = torch.compile(f)
code = run_and_get_triton_code(opt_f, image_latent)
reset_rng_state()
act = opt_f(image_latent)
torch.testing.assert_close(ref, act, atol=1e-3, rtol=1e-3)
if is_dynamic_shape_enabled():
size_assert_pattern = r"assert_size_stride.[a-z]+[0-9]+, .2, 3, s12, s80, s80., .3\*s12\*s80\*s80, s12\*s80\*s80, 1, s12\*s80, s1.." # noqa: B950
else:
size_assert_pattern = r"assert_size_stride.[a-z]+[0-9]+, .2, 3, 16, 32, 32., .49152, 16384, 1, 512, 16.."
FileCheck().check_regex(size_assert_pattern).run(code)
def test_lite_mode_fallback(self):
def f(x):
z = x.sin()
return z.cos()
f = torch.compile(f, mode="lite")
_, code = run_and_get_code(f, torch.randn(2, device=self.device))
# Checks that aten ops are kept and run
if config.cpp_wrapper:
FileCheck().check("aoti_torch_call_dispatcher(").check("aten::sin").check(
"aoti_torch_call_dispatcher("
).check("aten::cos").run(code[0])
else:
FileCheck().check("torch.ops.aten.sin.default(").check(
"torch.ops.aten.cos.default("
).run(code[0])
# Checks that no triton code run in the generated code
self.assertFalse(".run(" in code[0])
# skip cpu test since rms norm is always decomposed on cpu
def test_lite_mode_not_decompose(self):
if self.device != GPU_TYPE or self.device == "mps":
raise unittest.SkipTest("requires GPU")
def f(x, shape):
y = x + 1
z = torch.ops.aten._fused_rms_norm(y, shape, None, None)
return z[0] + z[1]
f = torch.compile(f, mode="lite")
x = torch.randn(2, 3, device=self.device)
_, code = run_and_get_code(f, x, [2, 3])
if config.cpp_wrapper:
FileCheck().check(
"AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_cuda__fused_rms_norm("
).run(code[0])
else:
FileCheck().check("torch.ops.aten._fused_rms_norm.default(").run(code[0])
if config.cpp_wrapper:
# arg type List[int] is not yet supported by custom_op_wrapper
pass
else:
x = torch.randn(2, 3, device=self.device, requires_grad=True)
_, codes = run_fw_bw_and_get_code(lambda: f(x, [2, 3]))
self.assertEqual(len(codes), 2)
FileCheck().check("torch.ops.aten._fused_rms_norm.default(").run(code[0])
def test_lite_regional_compile_flex_attention(self):
if self.device != GPU_TYPE or self.device == "mps":
raise unittest.SkipTest("requires GPU")
from torch.nn.attention.flex_attention import create_block_mask, flex_attention
def _squared(score, b, h, m, n):
return score * score
def mask_mod(b, h, q, k):
return q >= 0
a = 12
b = 64
block_mask = create_block_mask(
mask_mod, None, None, a * b, a * b, device=self.device
)
def fn(x):
x = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
x = flex_attention(x, x, x, block_mask=block_mask, score_mod=_squared)
return torch.cos(x)
x = torch.randn(
1,
1,
a * b,
b,
dtype=torch.bfloat16,
device=self.device,
requires_grad=True,
)
opt_fn = torch.compile(
fn,
mode="lite",
fullgraph=True,
)
# Check that inductor compilation is called twice
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x))
self.assertEqual(len(codes), 2)
@unittest.skipIf(
config.cpp_wrapper,
"codegen invoke_subgraph is not implemented for cpp wrapper",
)
def test_lite_regional_compile_invoke_subgraph(self):
# Checks that get_attr nodes custom metadata is propagated
@torch.compiler.nested_compile_region
def gn(x):
return torch.sin(x)
def fn(x):
x = x + 1
with fx_traceback.annotate({"compile_with_inductor": 0}):
z = gn(x)
return torch.sigmoid(z)
opt_fn = torch.compile(fn, mode="lite", fullgraph=True)
x = torch.randn(10, requires_grad=True)
_, codes = run_fw_bw_and_get_code(lambda: opt_fn(x))
self.assertEqual(len(codes), 2)
@unittest.skipIf(
config.cpp_wrapper,
"codegen triton_kernel_wrapper_functional is not implemented for cpp wrapper",
)
def test_lite_triton_kernel_wrapper_functional(self):
if self.device != GPU_TYPE or self.device == "mps":
raise unittest.SkipTest("requires GPU")
from torch._higher_order_ops.triton_kernel_wrap import (
kernel_side_table,
triton_kernel_wrapper_functional,
)
from torch.testing._internal.triton_utils import mul2_kernel
kernel_side_table.reset_table()
def f(x, output):
out = triton_kernel_wrapper_functional(
kernel_idx=kernel_side_table.add_kernel(mul2_kernel),
constant_args_idx=kernel_side_table.add_constant_args(
{"n_elements": output.numel(), "BLOCK_SIZE": 16}
),
grid=[(x.numel(),)],
tma_descriptor_metadata={},
kwargs={
"in_ptr0": x,
"out_ptr": output,
},
tensors_to_clone=["in_ptr0", "out_ptr"],
)
return out["out_ptr"]
t1 = torch.rand(5, device=self.device)
t2 = torch.rand(5, device=self.device)
compiled_f = torch.compile(f, mode="lite")
out = compiled_f(t1, t2)
# Make sure t2 was not modified
self.assertNotEqual(out, t2)
def test_lite_regional_compile_repeated_blocks(self):
def fn(x, y):
sin = torch.sin(x)
with fx_traceback.annotate({"compile_with_inductor": 0}):
mul = sin * y
add = mul + 1
return torch.sin(add)
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
a = fn(x, y)
return fn(a, y)
mod = Mod()
opt_mod = torch.compile(
mod,
mode="lite",
fullgraph=True,
)
x = torch.randn(10, requires_grad=True)
y = torch.randn(10, requires_grad=True)
_, codes = run_fw_bw_and_get_code(lambda: opt_mod(x, y))
self.assertEqual(len(codes), 2)
def test_lite_dynamic_shape_assertion(self):
class Model(torch.nn.Module):
def forward(self, c):
d = torch.concat([c, c], dim=0)
with fx_traceback.annotate({"compile_with_inductor": "my_region"}):
d = d + 1
return d
model = Model()
model = torch.compile(
model,
mode="lite",
fullgraph=True,
)
c = torch.randn((64, 32), device=self.device)
torch._dynamo.decorators.mark_unbacked(c, 0)
_, code = run_and_get_code(model, c)
# Checks that unbacked symint assertions are kept
if config.cpp_wrapper:
FileCheck().check_regex(r"if \(!\(u.* >= 0L\)\)").check_regex(
"Expected u.* >= 0 but receive"
).run(code[0])
else:
FileCheck().check_regex(r"if not \(u.* >= 0\):").check_regex(
r"raise RuntimeError\('u.* >= 0'\)"
).run(code[0])
@lowering.force_fallback(aten.sort.default)
@unittest.skipIf(
config.cpp_wrapper,
"Inductor does not generate size/stride asserts for cpp_wrapper",
)
def test_size_asserts_for_multi_output_fallback(self):
@torch.compile
def f(x):
return x.sort()
x = torch.randn(16, 32, device=self.device)
code = run_and_get_triton_code(f, x)
if is_dynamic_shape_enabled():
FileCheck().check("assert_size_stride(buf1, (s77, s27), (s27, 1)").check(
"assert_size_stride(buf2, (s77, s27), (s27, 1)"
).run(code)
else:
FileCheck().check("assert_size_stride(buf1, (16, 32), (32, 1)").check(
"assert_size_stride(buf2, (16, 32), (32, 1)"
).run(code)
@requires_gpu_and_triton
@config.patch(use_fast_math=True)
def test_prepare_softmax_with_fast_math(self):
"""
Measure on a A100, perf is 3.487ms v.s. 3.358ms without or with flushing to zero. A 4% speedup.
"""
if DO_PERF_TEST:
M = 32768
N = 50304
else:
# Use small shapes if not doing perf test
M = 128
N = 128
x = torch.randn(M, N, dtype=torch.bfloat16, device=GPU_TYPE)
def f(x):
"""
Not calling softmax directly to generate kernel just for
computation of max & sum.
If we call softmax directly, the computation of the final
result will double the membw usage. In that case saving
computation does not matter much.
In reality during training, since max & sum need to be saved
for bwd and the computation of softmax result is fused with
other kernels, we do see such prepare_softmax kernel appear
in real models.
"""
x_max = x.amax(dim=-1, keepdim=True)
x_sum = (x - x_max).exp().sum(dim=-1, keepdim=True).log()
return x_max, x_sum
opt_f = torch.compile(f)
ref = f(x)
act = opt_f(x)
self.assertTrue(same(ref, act, tol=1e-2), f"Ref:\n{ref}\nAct:\n{act}")
if DO_PERF_TEST:
from triton.testing import do_bench
ms = do_bench(lambda: opt_f(x))
print(f"{ms=:.3f}")
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_no_inputs(self):
def foo():
torch.manual_seed(3)
return torch.randint(0, 5, (5,))
foo = torch.compile(foo)
foo()
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_mutation_real_name(self):
def f(x, y, z, other):
mul = x * y
diag = torch.diagonal(mul)
diag.copy_(other)
# force grah partition by device copy
u = diag.cpu().to(self.device)
return torch.mm(mul, z) + u + diag
inps = (
torch.randn(3, 3, device=self.device),
torch.randn(3, 3, device=self.device),
torch.randn(3, 3, device=self.device),
torch.randn(3, device=self.device),
)
eager_out = f(*inps)
compiled_f = torch.compile(f)
compiled_out = compiled_f(*inps)
torch.testing.assert_close(eager_out, compiled_out)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_arange1(self):
def fn(step, device):
return torch.arange(512, -512, step, device=device)
compiled_fn = torch.compile(fn)
for step in (-1, -1.0):
expect = fn(step, "cpu")
actual = compiled_fn(step, "cpu")
self.assertEqual(expect, actual)
self.assertEqual(expect, actual)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_arange2(self):
def fn(x):
return torch.arange(0.1, 8.0001, 1, dtype=x.dtype, device=x.device)
make_arg = functools.partial(
make_tensor, device=self.device, requires_grad=False
)
compiled_fn = torch.compile(fn)
x = make_arg(1, dtype=torch.float32)
self.assertEqual(fn(x), compiled_fn(x))
x = make_arg(1, dtype=torch.int64)
self.assertEqual(fn(x), compiled_fn(x))
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_argmax(self):
def fn():
a = torch.zeros([2, 2])
b = a.argmax(0)
return b.float().mean()
compiled_fn = torch.compile(fn)
self.assertEqual(fn(), compiled_fn())
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_both_scalars(self):
def fn(a, b):
return (
aten.add(a, b),
aten.add(b, a),
aten.sub(a, b),
aten.sub(b, a),
aten.mul(a, b),
aten.mul(b, a),
)
compiled_fn = torch.compile(fn)
self.assertEqual(fn(4, 3.3), compiled_fn(4, 3.3))
@torch._inductor.config.patch("graph_partition", True)
@config.patch(assume_aligned_inputs=False)
def test_graph_partition_misaligned_input(self):
def fn(x):
return x.cos() * x.sin()
fn_c = torch.compile(fn, mode="reduce-overhead", dynamic=True)
for size, stride, offset in (
((32, 32), (32, 1), 4),
((48, 48), (48, 1), 4),
((64, 64), (64, 1), 5),
):
torch.manual_seed(42)
base = torch.randn(
64 * 64 + 64,
dtype=torch.float32,
device=self.device,
requires_grad=True,
)
torch.manual_seed(42)
base_ref = torch.randn(
64 * 64 + 64,
dtype=torch.float32,
device=self.device,
requires_grad=True,
)
inp = torch.as_strided(base, size, stride, offset)
inp_ref = torch.as_strided(base_ref, size, stride, offset)
inp.requires_grad_(True)
inp_ref.requires_grad_(True)
res = fn_c(inp)
ref = fn(inp_ref)
self.assertEqual(ref, res)
res.sum().backward()
ref.sum().backward()
self.assertEqual(base.grad, base_ref.grad)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_constant_tensor1(self):
def fn():
a = torch.zeros([1, 2], dtype=torch.int32)
a = a + a
b = a.to(dtype=torch.float32)
return b * 0.8
compiled_fn = torch.compile(fn)
self.assertEqual(fn(), compiled_fn())
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_constant_tensor2(self):
def fn(x):
return torch.tensor(list(range(2, 40, 2)), device=self.device) + x
compiled_fn = torch.compile(fn)
x = torch.randn(1, device=self.device)
self.assertEqual(fn(x), compiled_fn(x))
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_scalar_inputs(self):
def fn(a, b):
return (
aten.div(a, b, rounding_mode=None),
aten.div(a * 0.5, b, rounding_mode=None),
aten.div(a, b * 1.0, rounding_mode=None),
aten.div(a, b, rounding_mode="floor"),
aten.div(a, b, rounding_mode="trunc"),
a / b,
a // b,
)
compiled_fn = torch.compile(fn)
self.assertEqual(fn(1024, 100), compiled_fn(1024, 100))
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_unbacked_symint_as_output(self):
def nested(x, repeats):
rank = torch.arange(repeats.numel(), device=x.device)
index = rank.repeat_interleave(repeats, dim=0)
return torch.index_select(x, index=index, dim=0)
example_inputs = (
torch.randn((32, 64), device=self.device),
repeats := torch.tensor([5, 10, 15], device=self.device),
)
torch._dynamo.mark_dynamic(repeats, 0)
nested_opt = torch.compile(nested, backend="inductor")
expect = nested(*example_inputs)
actual = nested_opt(*example_inputs)
self.assertEqual(expect, actual)
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_refcount(self):
contexts = [
contextlib.nullcontext,
lambda: torch._inductor.config.patch({"triton.cudagraphs": True}),
]
for context in contexts:
with context():
inps = [
torch.rand([5, 5]).to(self.device),
torch.rand([5, 5]).to(self.device),
]
inp_refs = [weakref.ref(inp) for inp in inps]
def fn(x, y):
a = x + y
return (a @ a,)
fn_fx = make_fx(fn)(inps[0], inps[1])
fn_compiled = compile_fx_inner(fn_fx, inps)
matmul_seen = False
class TestRefMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
nonlocal inps
nonlocal inp_refs
nonlocal matmul_seen
gc.collect()
if func is aten.mm.out:
matmul_seen = True
assert len(inps) == 0
assert inp_refs[0]() is None
assert inp_refs[1]() is None
return func(*args, **kwargs)
with TestRefMode():
fn_compiled(inps)
# do an extra run to make sure we are deallocating on warmup and record
inps.extend(
[
torch.rand([5, 5]).to(self.device),
torch.rand([5, 5]).to(self.device),
]
)
inp_refs.extend([weakref.ref(inp) for inp in inps])
matmul_seen = False
with TestRefMode():
fn_compiled(inps)
assert len(inps) == 0
@torch._inductor.config.patch("graph_partition", True)
def test_graph_partition_pad_dynamic(self):
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
def pad_same(x, k, s, d=(1, 1), value=0):
ih, iw = x.size()[-2:]
pad_h, pad_w = (
get_same_padding(ih, k[0], s[0], d[0]),
get_same_padding(iw, k[1], s[1], d[1]),
)
if pad_h > 0 or pad_w > 0:
x = torch.nn.functional.pad(
x,
[pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2],
value=value,
)
return x
x = torch.randn(2, 24, 110, 110, device=self.device)
opt = torch.compile(pad_same, dynamic=True)
res = opt(x, (5, 5), (2, 2))
ref = pad_same(x, (5, 5), (2, 2))
self.assertEqual(res, ref, atol=0, rtol=0)
@skip_if_halide # only 32-bit indexing
@largeTensorTest("16GB", inductor=True)
def test_split_reduction_with_int64_size(self):
if torch._inductor.config.cpu_backend == "triton":
raise unittest.SkipTest(
"Fail for triton cpu backend with error: https://gist.github.com/shunting314/a873fb32b6b7b5a437f44280ae86839f"
)
if self.device == "cpu":
raise unittest.SkipTest(
"The test fails some times on CI: "
"https://github.com/pytorch/pytorch/actions/runs/15333913377/job/43153170162. "
"Skip for now."
)
size = (30000, 100000)
# rand rather than randn since the mean for the latter is close to 0
# which happens to be close to the value generated by the bug.
t = torch.rand(size, dtype=torch.float, device=self.device)
op = torch.mean
expected = op(t)
actual = torch.compile(op)(t)
# self.common takes more GPU memory. Do the check directly
self.assertTrue(
torch.allclose(expected, actual, atol=1e-2, rtol=1e-2),
f"{expected=} {actual=}",
)
def test_remove_noop_view_default(self):
def f(x):
batch_size = x.shape[0]
x = x.transpose(1, 2) # (batch_size, 2, 3)
x = x.reshape(batch_size, 2, 3) # noop
return x
f = torch.compile(f)
x = torch.randn((2, 3, 2), device=self.device)
expected_graph1 = f"""\
def forward(self, arg0_1: "f32[2, 3, 2][6, 2, 1]{str(x.device)}"):
permute: "f32[2, 2, 3][6, 1, 2]{str(x.device)}" = torch.ops.aten.permute.default(arg0_1, [0, 2, 1]); arg0_1 = None
return (permute,)""" # noqa: B950
post_grad_graph = get_post_grad_graph(f, (x,))
self.assertExpectedInline(
post_grad_graph,
expected_graph1,
ignore_comments=True,
ignore_empty_lines=True,
)
# dynamic shape
x = torch.randn((4, 3, 2), device=self.device)
expected_graph2 = f"""\
def forward(self, arg0_1: "Sym(s77)", arg1_1: "f32[s77, 3, 2][6, 2, 1]{str(x.device)}"):
permute: "f32[s77, 2, 3][6, 1, 2]{str(x.device)}" = torch.ops.aten.permute.default(arg1_1, [0, 2, 1]); arg1_1 = None
return (permute,)""" # noqa: B950
post_grad_graph = get_post_grad_graph(f, (x,))
self.assertExpectedInline(
post_grad_graph,
expected_graph2,
ignore_comments=True,
ignore_empty_lines=True,
)
def test_remove_noop_view_dtype(self):
def f(x):
x = x.transpose(1, 2) # (batch_size, 2, 3)
x = x.view(torch.uint8) # noop
return x
f = torch.compile(f)
x = torch.ones((2, 3, 2), device=self.device, dtype=torch.uint8)
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(x, 1)
torch._dynamo.mark_dynamic(x, 2)
post_grad_graph = get_post_grad_graph(f, (x,))
expected_graph = f"""\
def forward(self, arg0_1: "Sym(s77)", arg1_1: "Sym(s27)", arg2_1: "Sym(s53)", arg3_1: "u8[s77, s27, s53][s27*s53, s53, 1]{str(x.device)}"):
permute: "u8[s77, s53, s27][s27*s53, 1, s53]{str(x.device)}" = torch.ops.aten.permute.default(arg3_1, [0, 2, 1]); arg3_1 = None
return (permute,)""" # noqa: B950
self.assertExpectedInline(
post_grad_graph,
expected_graph,
ignore_comments=True,
ignore_empty_lines=True,
)
@config.patch("min_num_split", 256)
def test_split_reduction_dynamic_shape(self):
from torch._dynamo.decorators import mark_dynamic
def f(x):
# outer reduction
return x.sum(dim=0)
N = 512
x_small = torch.randn(4096, N, device=self.device)
mark_dynamic(x_small, 0)
expect = f(x_small)
opt_f = torch.compile(f, dynamic=True)
actual = opt_f(x_small)
self.assertTrue(torch.allclose(expect, actual, atol=1e-3, rtol=1e-3))
if DO_PERF_TEST:
from triton.testing import do_bench
# benchmark for a much larger input
x_large = torch.randn(4096 * 1000, N, device=self.device)
ms = do_bench(lambda: opt_f(x_large))
print(f"{ms=:.3f}")
@expectedFailureCodegenDynamic
def test_special_polygamma(self):
fn = torch.special.polygamma
x = torch.tensor(2, dtype=torch.float32)
self.common(fn, (0, x))
self.common(fn, (1, x))
self.common(fn, (2, x))
@skip_if_triton
@skip_if_halide
@config.patch({"freezing": True})
def test_dont_constant_fold(self):
from torch._inductor.constant_folding import (
add_dont_constant_fold,
clear_dont_constant_fold,
)
m = 5
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.randn(m)
self.s = torch.randn(m)
def forward(self, x):
return self.w * self.s + x
x = torch.rand(m)
mod = M()
for dont_constant_fold in [True, False]:
clear_dont_constant_fold()
if dont_constant_fold:
add_dont_constant_fold(torch.ops.aten.mul.Tensor)
with torch.no_grad():
refe_out = mod(x)
mod = torch.compile(mod)
test_out, (code,) = run_and_get_code(mod, x)
if dont_constant_fold:
FileCheck().check("cpp_fused_add_mul").run(code)
else:
FileCheck().check("cpp_fused_add_0").run(code)
self.assertEqual(refe_out, test_out)
def test_triton_kernel_bool_param(self):
if self.device != GPU_TYPE or self.device == "mps":
raise unittest.SkipTest("requires GPU")
from torch.testing._internal.triton_utils import add_kernel_with_boolean_param
class Model(torch.nn.Module):
def forward(self, x):
out = torch.zeros_like(x)
add_kernel_with_boolean_param[1,](
in_ptr0=x,
in_ptr1=x,
out_ptr=out,
n_elements=x.numel(),
add_xy=True,
BLOCK_SIZE=1,
)
return out
inputs = (torch.randn(4, device=self.device),)
self.common(Model(), inputs)
@skipIfXpu(
msg="Profile not enabled on XPU CI, "
"https://github.com/intel/torch-xpu-ops/issues/2334"
)
@requires_gpu_and_triton
@parametrize("use_cat", [True, False])
def test_copy_non_blocking_is_pinned(self, use_cat):
def f(a_list):
a_cpu_list = []
a_to_cpu_event_list = []
for a in a_list:
a_cpu = a.to(device="cpu", non_blocking=True)
a_to_cpu_event = torch.Event()
a_to_cpu_event.record()
a_cpu_list.append(a_cpu)
a_to_cpu_event_list.append(a_to_cpu_event)
for e in a_to_cpu_event_list:
e.synchronize()
if use_cat:
return torch.cat(a_cpu_list)
else:
return a_cpu_list
f_compiled = torch.compile(f)
inputs = [
torch.rand(1000, dtype=torch.float16, device=GPU_TYPE) for _ in range(100)
]
outputs = f(inputs)
warmup_compiled = f_compiled(inputs)
with torch.profiler.profile(
activities=[
getattr(torch.profiler.ProfilerActivity, GPU_TYPE.upper()),
],
) as p:
outputs_compiled = f_compiled(inputs)
self.assertEqual(outputs, outputs_compiled)
profile_output = str(p.key_averages())
print(profile_output)
self.assertFalse("Pageable" in profile_output)
@unittest.skipIf(
config.cpp_wrapper,
"cpp_wrapper samples will lead to invalid indexing",
)
def test_inductor_triton_bucketize_respects_masking(self):
def fn(inp, repeats, output_size):
# return torch.repeat_interleave(inp, repeats, dim=0, output_size=output_size)
idx = torch.searchsorted(
repeats.cumsum(0),
torch.arange(0, output_size, device=repeats.device),
right=True,
)
return torch.index_select(inp, 0, idx)
inp = torch.arange(0, 4, device=self.device)
repeats = torch.tensor([1, 2, 3, 4], device=self.device)
output_size = repeats.sum().item()
args = (inp, repeats, output_size)
self.assertEqual(fn(*args), torch.compile(fn)(*args))
@parametrize("dtype", [torch.int32, torch.int64])
@parametrize("nd", [1, 2])
def test_repeat_interleave_Tensor_decomp(self, dtype, nd):
# https://github.com/pytorch/pytorch/issues/147160
def f(input, repeats):
return torch.repeat_interleave(input, repeats, dim=0, output_size=3) + 1
input = torch.tensor([[1, 2], [3, 4]], dtype=dtype, device=self.device)
input = torch.arange(1, 2**nd + 1, dtype=dtype, device=self.device).reshape(
[2] * nd
)
repeat = torch.tensor([1, 2], device=self.device)
f_compiled = torch.compile(f)
output, (code,) = run_and_get_code(f_compiled, input, repeat)
reference = f(input, repeat)
self.assertEqual(output, reference)
# we don't lower when the cpp_wrapper is used because it cannot generate
# proper examples during autotune
can_lower = (not config.cpp_wrapper) and (input.device.type != "mps")
has_lowered = not re.search(r"repeat_interleave.Tensor", code)
self.assertEqual(has_lowered, can_lower)
@staticmethod
def _is_triggering_buffer_reuse(fn, *inputs):
with config.patch(allow_buffer_reuse=True):
_, (code_allowed,) = run_and_get_code(fn, *inputs)
with config.patch(allow_buffer_reuse=False):
_, (code_disallowed,) = run_and_get_code(fn, *inputs)
code_allowed = re.sub(r"AOT ID: .*", "AOT ID: ['test']", code_allowed)
code_disallowed = re.sub(r"AOT ID: .*", "AOT ID: ['test']", code_disallowed)
return code_allowed != code_disallowed
# If matmul is implemented by triton there is more reuse
@config.patch(max_autotune_gemm_backends="ATEN")
@unittest.skipIf(config.triton.native_matmul, "matmul is now generated")
def test_allow_reuse_disable_if_exceed_peak(self):
@torch.compile
def fn(inp): # 1*N^2
a = inp.mean(-1) # 1*N^2 + N
b = (inp - a) ** 2 # 2*N^2 + N
c = b @ b # 3*N^2 (!!) since this is the peak, can not reuse across
d = c.mean(-1) # 2*N^2 + N
return d # 1*N^2 + N
inp = torch.randn(100, 100, device=self.device)
self.assertFalse(CommonTemplate._is_triggering_buffer_reuse(fn, inp))
def test_allow_reuse_active_if_under_peak(self):
def g(inp):
return (inp - torch.logsumexp(inp, -1)) ** 2
@torch.compile
def fn(m, inp):
inp = m @ g(inp)
inp = m @ g(inp)
inp = m @ g(inp)
inp = m @ g(inp)
inp = m @ g(inp)
return inp
m = torch.randn(100, 100, device=self.device)
inp = torch.randn(100, 100, device=self.device)
self.assertTrue(CommonTemplate._is_triggering_buffer_reuse(fn, m, inp))
@requires_gpu_and_triton
def test_cpu_scalar_with_gpu_tensor(self):
def fn(a, b):
return a + b[0]
a = torch.rand(20, device=GPU_TYPE)
b = torch.rand(4, device="cpu")
torch._inductor.metrics.generated_kernel_count = 0
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor")(a, b)
self.assertEqual(eager, compiled)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
@requires_gpu_and_triton
@torch._inductor.config.patch(cpp_wrapper=True)
def test_cpu_scalar_with_gpu_tensor_cpp(self):
def fn(a, b):
return a + b[0]
a = torch.rand(20, device=GPU_TYPE)
b = torch.rand(4, device="cpu")
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor")(a, b)
self.assertEqual(eager, compiled)
@requires_gpu_and_triton
def test_cpu_scalar_with_gpu_tensor_dynamic(self):
def fn(a, b):
return a + b[0]
a = torch.rand(20, device=GPU_TYPE)
b = torch.rand(4, device="cpu")
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor", dynamic=True)(a, b)
self.assertEqual(eager, compiled)
def test_cpu_scalar_with_cpu_tensor(self):
def fn(a, b):
return a + b[0]
a = torch.rand(20, device="cpu")
b = torch.rand(4, device="cpu")
torch._inductor.metrics.generated_kernel_count = 0
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor")(a, b)
self.assertEqual(eager, compiled)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
@requires_gpu_and_triton
def test_gpu_scalar_with_gpu_tensor(self):
def fn(a, b):
return a + b[0]
a = torch.rand(20, device=GPU_TYPE)
b = torch.rand(4, device=GPU_TYPE)
torch._inductor.metrics.generated_kernel_count = 0
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor")(a, b)
self.assertEqual(eager, compiled)
self.assertEqual(torch._inductor.metrics.generated_kernel_count, 1)
@requires_gpu_and_triton
def test_cpu_tensor_with_gpu_tensor(self):
def fn(a, b):
return a + b
a = torch.rand(20, device=GPU_TYPE)
b = torch.rand(20, device="cpu")
with self.assertRaises(RuntimeError):
compiled = torch.compile(fn, backend="inductor")(a, b)
def test_cpu_tensor_with_cpu_tensor(self):
def fn(a, b):
return a + b
a = torch.rand(20, device="cpu")
b = torch.rand(20, device="cpu")
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor")(a, b)
self.assertEqual(eager, compiled)
def test_cpu_scalar_with_cpu_scalar(self):
def fn(a, b):
return a[0] + b[0]
a = torch.rand(20, device="cpu")
b = torch.rand(20, device="cpu")
eager = fn(a, b)
compiled = torch.compile(fn, backend="inductor")(a, b)
self.assertEqual(eager, compiled)
@requires_gpu_and_triton
def test_gpu_scalar_with_cpu_tensor(self):
def fn(a, b):
return a[0] + b
a = torch.rand(20, device=GPU_TYPE)
b = torch.rand(20, device="cpu")
with self.assertRaises(RuntimeError):
compiled = torch.compile(fn, backend="inductor")(a, b)
@requires_gpu_and_triton
@config.patch(emulate_precision_casts=True)
def test_emulate_precision_triton_fp_fusion(self):
def fn(a, b):
return 2.001 * a + b
a = torch.full([256], 0.5001, device=GPU_TYPE, dtype=torch.float16)
b = torch.full([256], -1, device=GPU_TYPE, dtype=torch.float16)
compiled = torch.compile(fn)
out, (code,) = run_and_get_code(compiled, a, b)
self.assertTrue("'enable_fp_fusion': False" in code)
torch.testing.assert_close(out, fn(a, b), atol=0, rtol=0)
@requires_gpu_and_triton
@config.patch(runtime_triton_nan_asserts=True)
def test_nan_assert_inside_triton_kernel(self):
def fn(x):
x = x - 1
# Uncomment the following line can trigger the failure of
# the device size assertion
# x = torch.log(x)
return torch.where(x.isnan(), 3.14, x)
compiled = torch.compile(fn)
x = torch.randn(4096, device=GPU_TYPE)
out, (code,) = run_and_get_code(compiled, x)
self.assertTrue("'NaN or Inf found'" in code)
torch.testing.assert_close(out, fn(x))
@skip_if_cpp_wrapper("skip cpp wrapper")
@requires_gpu_and_triton
def test_repeat_interleave_decomposition_has_clamp(self):
repeat = torch.ones(2560, dtype=torch.int64, device=GPU_TYPE)
output_size = 505450
data = torch.arange(2560, device=GPU_TYPE)
if is_dynamic_shape_enabled():
raise unittest.SkipTest(
"repeat_interleave decomp doesn't support dynamic output size"
)
@torch.compile
def fn(repeat, output_size, data):
indices = torch.ops.aten.repeat_interleave.Tensor(
repeat, output_size=output_size
)
return data[indices]
result, code = run_and_get_code(fn, repeat, output_size, data)
self.assertEqual(result.shape[0], output_size)
self.assertTrue(torch.all(result >= 0).item())
self.assertTrue(torch.all(result < 2560).item())
code_str = "\n".join(code)
if torch.version.hip:
triton_str = "tl.minimum"
else:
triton_str = "triton_helpers.minimum"
self.assertIn(
triton_str,
code_str,
"Generated Triton code should use triton_helpers.minimum for clamping",
)
@skipIfMPS # Accuracy issue on MPS
def test_weight_norm_conv2d(self):
"""
Verify fix for https://github.com/pytorch/pytorch/issues/165749
"""
from torch.nn.utils.parametrizations import weight_norm
d = 65
x = torch.randn((2, 2, 32, 32), device=self.device)
conv = weight_norm(nn.Conv2d(2, d, 2)).to(device=self.device)
ref = conv(x)
grad_out = torch.randn_like(ref)
ref_grad = torch.autograd.grad(ref, list(conv.parameters()), grad_out)
compiled_conv = torch.compile(conv)
act = compiled_conv(x)
act_grad = torch.autograd.grad(act, list(compiled_conv.parameters()), grad_out)
self.assertTrue(same((ref, ref_grad), (act, act_grad), tol=1e-3))
@skip_if_halide
@requires_cuda_and_triton
@skip_if_cpp_wrapper("skip cpp wrapper")
def test_triton_argmin_argmax_transpose_logical_index(self):
def fn(x):
x.tan_()
x = x.t()
return x.argmin()
self.common(fn, (torch.randn(6, 4, device=GPU_TYPE),))
def fn(x):
return (x.t().argmin(), x.t().argmax())
self.common(fn, (torch.randn(6, 4, device=GPU_TYPE),))
self.common(fn, (torch.randn(128, 64, device=GPU_TYPE),))
self.common(fn, (torch.randn(8, 6, device=GPU_TYPE, dtype=torch.float16),))
def fn(x):
# Permute: (A, B, C) -> (C, A, B)
permuted = x.permute(2, 0, 1)
return (permuted.argmin(), permuted.argmax())
self.common(fn, (torch.randn(4, 6, 8, device=GPU_TYPE),))
def fn(x):
# sliced tensor with gaps in memory
sliced = x[:, :10]
return (sliced.argmin(), sliced.argmax())
self.common(fn, (torch.randn(10, 20, device=GPU_TYPE),))
# Test column major passed as input
def fn(x):
return (x.argmin(), x.argmax())
self.common(fn, (torch.randn(6, 4, device=GPU_TYPE).t().contiguous().t(),))
@skip_if_halide
@requires_cuda_and_triton
def test_unbacked_float_item(self):
def fn(x, max_val):
return torch.clamp(x, 0, max_val.item())
self.common(
fn,
(
torch.randn(10, 20, 30, device=self.device),
torch.tensor(5.0, device=self.device),
),
)
# end of class CommonTemplate - add new tests here
@dataclasses.dataclass
| CommonTemplate |
python | tiangolo__fastapi | docs_src/body_nested_models/tutorial002_py310.py | {
"start": 78,
"end": 369
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
tags: list[str] = []
@app.put("/items/{item_id}")
async def update_item(item_id: int, item: Item):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | has2k1__plotnine | plotnine/scales/scale_manual.py | {
"start": 1967,
"end": 2478
} | class ____(_scale_manual):
"""
Custom discrete shape scale
See Also
--------
[](`matplotlib.markers`)
"""
_aesthetics = ["shape"]
values: InitVar[Sequence[Any] | dict[Any, Any]]
"""
Shapes that make up the palette. See [](`matplotlib.markers`) for list
of all possible shapes. The values will be matched with the `limits`
of the scale or the `breaks` if provided. If it is a dict then it
should map data values to shapes.
"""
@dataclass
| scale_shape_manual |
python | pydantic__pydantic | tests/test_validate_call.py | {
"start": 36588,
"end": 37300
} | class ____[T]:
@validate_call(validate_return=True)
def f(self, a: T) -> T:
return str(a)
"""
)
A = module.A
a = A[int]()
# these two are undesired behavior, but it's what happens now
assert a.f(1) == '1'
assert a.f('1') == '1'
@pytest.mark.skipif(sys.version_info < (3, 12), reason='requires Python 3.12+ for PEP 695 syntax with generics')
def test_pep695_with_nested_scopes(create_module):
"""Nested scopes generally cannot be caught by `parent_frame_namespace`,
so currently this test is expected to fail.
"""
module = create_module(
"""
from __future__ import annotations
from pydantic import validate_call
| A |
python | ipython__ipython | tests/test_interactivshell.py | {
"start": 3311,
"end": 4870
} | class ____(object):
"""Machinery for tests of the main interact loop.
Used by the mock_input decorator.
"""
def __init__(self, testgen):
self.testgen = testgen
self.exception = None
self.ip = get_ipython()
def __enter__(self):
self.orig_prompt_for_code = self.ip.prompt_for_code
self.ip.prompt_for_code = self.fake_input
return self
def __exit__(self, etype, value, tb):
self.ip.prompt_for_code = self.orig_prompt_for_code
def fake_input(self):
try:
return next(self.testgen)
except StopIteration:
self.ip.keep_running = False
return ""
except:
self.exception = sys.exc_info()
self.ip.keep_running = False
return ""
def mock_input(testfunc):
"""Decorator for tests of the main interact loop.
Write the test as a generator, yield-ing the input strings, which IPython
will see as if they were typed in at the prompt.
"""
def test_method(self):
testgen = testfunc(self)
with mock_input_helper(testgen) as mih:
mih.ip.interact()
if mih.exception is not None:
# Re-raise captured exception
etype, value, tb = mih.exception
import traceback
traceback.print_tb(tb, file=sys.stdout)
del tb # Avoid reference loop
raise value
return test_method
# Test classes -----------------------------------------------------------------
| mock_input_helper |
python | pypa__virtualenv | src/virtualenv/run/session.py | {
"start": 2248,
"end": 2487
} | class ____:
"""lazily populate debug."""
def __init__(self, creator) -> None:
self.creator = creator
def __repr__(self) -> str:
return json.dumps(self.creator.debug, indent=2)
__all__ = [
"Session",
]
| _Debug |
python | pytorch__pytorch | torch/_inductor/codegen/memory_planning.py | {
"start": 6122,
"end": 6376
} | class ____(Protocol):
get_live_ranges: CachedMethod[[], LiveRanges]
get_size_hint: CachedMethod[[], int]
get_symbolic_size: CachedMethod[[], sympy.Expr]
def _allocate(self, block: Allocation, is_last: bool) -> bool: ...
| MemorySplitProtocol |
python | kubernetes-client__python | kubernetes/client/models/v1_stateful_set.py | {
"start": 383,
"end": 7226
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1StatefulSetSpec',
'status': 'V1StatefulSetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSet - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1StatefulSet. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1StatefulSet. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1StatefulSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1StatefulSet. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1StatefulSet. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1StatefulSet. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1StatefulSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1StatefulSet. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1StatefulSet. # noqa: E501
:return: The metadata of this V1StatefulSet. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1StatefulSet.
:param metadata: The metadata of this V1StatefulSet. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1StatefulSet. # noqa: E501
:return: The spec of this V1StatefulSet. # noqa: E501
:rtype: V1StatefulSetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1StatefulSet.
:param spec: The spec of this V1StatefulSet. # noqa: E501
:type: V1StatefulSetSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1StatefulSet. # noqa: E501
:return: The status of this V1StatefulSet. # noqa: E501
:rtype: V1StatefulSetStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1StatefulSet.
:param status: The status of this V1StatefulSet. # noqa: E501
:type: V1StatefulSetStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSet):
return True
return self.to_dict() != other.to_dict()
| V1StatefulSet |
python | pytorch__pytorch | test/inductor/test_cutlass_evt.py | {
"start": 2526,
"end": 2697
} | class ____(BaseSchedulerNode):
def __init__(self, node, last_usage=None):
self.node = node
self.last_usage = last_usage or OrderedSet()
| MockSchedulerNode |
python | pydata__xarray | xarray/tests/test_utils.py | {
"start": 1438,
"end": 1816
} | class ____:
def test_0d(self):
# verify our work around for pd.isnull not working for 0-dimensional
# object arrays
assert duck_array_ops.array_equiv(0, np.array(0, dtype=object))
assert duck_array_ops.array_equiv(np.nan, np.array(np.nan, dtype=object))
assert not duck_array_ops.array_equiv(0, np.array(1, dtype=object))
| TestArrayEquiv |
python | ansible__ansible | hacking/create-bulk-issues.py | {
"start": 3936,
"end": 4199
} | class ____(metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def parse(message: str) -> Deprecation:
pass
@abc.abstractmethod
def create_bug_report(self) -> BugReport:
pass
@dataclasses.dataclass(frozen=True)
| Deprecation |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/llms/test_cache.py | {
"start": 2083,
"end": 3834
} | class ____(BaseCache):
"""In-memory cache used for testing purposes."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
"""Look up based on `prompt` and `llm_string`."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on `prompt` and `llm_string`."""
msg = "This code should not be triggered"
raise NotImplementedError(msg)
@override
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
def test_no_cache_generate_sync() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = llm.generate(["foo"])
assert output.generations[0][0].text == "foo"
output = llm.generate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
async def test_no_cache_generate_async() -> None:
global_cache = InMemoryCacheBad()
try:
set_llm_cache(global_cache)
llm = FakeListLLM(cache=False, responses=["foo", "bar"])
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "foo"
output = await llm.agenerate(["foo"])
assert output.generations[0][0].text == "bar"
assert global_cache._cache == {}
finally:
set_llm_cache(None)
| InMemoryCacheBad |
python | chroma-core__chroma | chromadb/telemetry/product/posthog.py | {
"start": 340,
"end": 2176
} | class ____(ProductTelemetryClient):
def __init__(self, system: System):
if not system.settings.anonymized_telemetry or "pytest" in sys.modules:
posthog.disabled = True
else:
logger.info(
"Anonymized telemetry enabled. See \
https://docs.trychroma.com/telemetry for more information."
)
posthog.project_api_key = "phc_YeUxaojbKk5KPi8hNlx1bBKHzuZ4FDtl67kH1blv8Bh"
posthog_logger = logging.getLogger("posthog")
# Silence posthog's logging
posthog_logger.disabled = True
self.batched_events: Dict[str, ProductTelemetryEvent] = {}
self.seen_event_types: Set[Any] = set()
super().__init__(system)
@override
def capture(self, event: ProductTelemetryEvent) -> None:
if event.max_batch_size == 1 or event.batch_key not in self.seen_event_types:
self.seen_event_types.add(event.batch_key)
self._direct_capture(event)
return
batch_key = event.batch_key
if batch_key not in self.batched_events:
self.batched_events[batch_key] = event
return
batched_event = self.batched_events[batch_key].batch(event)
self.batched_events[batch_key] = batched_event
if batched_event.batch_size >= batched_event.max_batch_size:
self._direct_capture(batched_event)
del self.batched_events[batch_key]
def _direct_capture(self, event: ProductTelemetryEvent) -> None:
try:
posthog.capture(
self.user_id,
event.name,
{**event.properties, **POSTHOG_EVENT_SETTINGS, **self.context},
)
except Exception as e:
logger.error(f"Failed to send telemetry event {event.name}: {e}")
| Posthog |
python | ansible__ansible | lib/ansible/_internal/_templating/_datatag.py | {
"start": 527,
"end": 752
} | class ____(AnsibleSingletonTagBase):
# deprecated: description='embedded Jinja constant string template support' core_version='2.23'
pass
@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
| _JinjaConstTemplate |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/cholesky_op_test.py | {
"start": 11928,
"end": 15230
} | class ____(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 2, 2),
(513, 8, 8),
(513, 256, 256),
(4, 513, 2, 2),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return np.tile(matrix, batch_shape + (1, 1))
def benchmarkCholeskyOp(self):
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_cpu_{shape}".format(shape=shape))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix = variables.Variable(self._GenerateMatrix(shape))
l = linalg_ops.cholesky(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(
l,),
min_iters=25,
name="cholesky_gpu_{shape}".format(shape=shape))
def benchmarkGradVariants(self):
def _BenchmarkGrad(grad_fn, name, device):
for shape in self.shapes:
matrix = self._GenerateMatrix(shape)
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device):
l = variables.Variable(np.linalg.cholesky(matrix))
grad_matrix = variables.Variable(
np.random.randn(*matrix.shape).astype(np.float32))
grad = grad_fn(l, grad_matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(
grad,),
min_iters=25,
name="{name}_{dev}_{shape}".format(
name=name, dev=grad.device, shape=shape))
if test.is_gpu_available(True):
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/device:GPU:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/device:GPU:0")
_BenchmarkGrad(MatrixInverseCompositeGrad, "composite_matrix_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngInvCompositeGrad, "composite_tri_ang_inverse",
"/cpu:0")
_BenchmarkGrad(TriAngSolveCompositeGrad, "composite_triangular_solve",
"/cpu:0")
if __name__ == "__main__":
test.main()
| CholeskyBenchmark |
python | huggingface__transformers | tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py | {
"start": 1495,
"end": 14344
} | class ____:
def __init__(
self,
parent,
input_modality="speech",
batch_size=2,
seq_length=4,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
max_new_tokens=None,
num_labels=3,
num_choices=4,
scope=None,
vocab_size=20,
t2u_vocab_size=20,
hidden_size=6,
num_hidden_layers=2,
intermediate_size=6,
max_position_embeddings=256,
encoder_layers=2,
decoder_layers=2,
encoder_ffn_dim=6,
decoder_ffn_dim=6,
encoder_layerdrop=0.0,
speech_encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
t2u_encoder_layers=2,
t2u_decoder_layers=2,
t2u_encoder_ffn_dim=6,
t2u_decoder_ffn_dim=6,
num_heads=2,
vocoder_num_spkrs=5,
vocoder_num_langs=5,
upsample_initial_channel=32,
unit_embed_dim=25,
spkr_embed_dim=6,
lang_embed_dim=6,
num_conv_pos_embeddings=8,
unit_hifi_gan_vocab_size=20,
t2u_num_langs=0,
t2u_offset_tgt_lang=0,
vocoder_offset=0,
t2u_variance_predictor_hidden_dim=4,
char_vocab_size=4,
left_max_position_embeddings=2,
right_max_position_embeddings=1,
speech_encoder_chunk_size=2,
speech_encoder_left_chunk_num=1,
):
self.parent = parent
self.input_modality = input_modality
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.vocab_size = vocab_size
self.t2u_vocab_size = t2u_vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.decoder_ffn_dim = decoder_ffn_dim
self.t2u_encoder_layers = t2u_encoder_layers
self.t2u_decoder_layers = t2u_decoder_layers
self.t2u_encoder_ffn_dim = t2u_encoder_ffn_dim
self.t2u_decoder_ffn_dim = t2u_decoder_ffn_dim
self.num_heads = num_heads
self.num_attention_heads = num_heads
self.vocoder_num_spkrs = vocoder_num_spkrs
self.vocoder_num_langs = vocoder_num_langs
self.upsample_initial_channel = upsample_initial_channel
self.unit_embed_dim = unit_embed_dim
self.spkr_embed_dim = spkr_embed_dim
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.lang_embed_dim = lang_embed_dim
self.max_new_tokens = max_new_tokens
self.unit_hifi_gan_vocab_size = unit_hifi_gan_vocab_size
self.t2u_num_langs = t2u_num_langs
self.t2u_offset_tgt_lang = t2u_offset_tgt_lang
self.vocoder_offset = vocoder_offset
self.t2u_variance_predictor_hidden_dim = t2u_variance_predictor_hidden_dim
self.char_vocab_size = char_vocab_size
self.left_max_position_embeddings = left_max_position_embeddings
self.right_max_position_embeddings = right_max_position_embeddings
self.speech_encoder_chunk_size = speech_encoder_chunk_size
self.speech_encoder_left_chunk_num = speech_encoder_left_chunk_num
self.encoder_layerdrop = encoder_layerdrop
self.speech_encoder_layerdrop = speech_encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
def prepare_config_and_inputs(self):
if self.input_modality == "text":
inputs = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1)
else:
inputs = ids_tensor([self.batch_size, self.seq_length, 160], self.vocab_size - 1).float()
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size - 1)
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return config, inputs, decoder_input_ids, input_mask, lm_labels
def get_config(self):
return SeamlessM4Tv2Config(
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
initializer_range=self.initializer_range,
vocab_size=self.vocab_size,
t2u_vocab_size=self.t2u_vocab_size,
hidden_size=self.hidden_size,
speech_encoder_layers=self.num_heads,
speech_encoder_intermediate_size=self.intermediate_size,
max_position_embeddings=self.max_position_embeddings,
encoder_layers=self.encoder_layers,
decoder_layers=self.decoder_layers,
encoder_ffn_dim=self.encoder_ffn_dim,
decoder_ffn_dim=self.decoder_ffn_dim,
t2u_encoder_layers=self.t2u_encoder_layers,
t2u_decoder_layers=self.t2u_decoder_layers,
t2u_encoder_ffn_dim=self.t2u_encoder_ffn_dim,
t2u_decoder_ffn_dim=self.t2u_decoder_ffn_dim,
num_attention_heads=self.num_heads,
encoder_attention_heads=self.num_heads,
decoder_attention_heads=self.num_heads,
t2u_encoder_attention_heads=self.num_heads,
t2u_decoder_attention_heads=self.num_heads,
speech_encoder_attention_heads=self.num_heads,
unit_hifigan_vocab_vise=self.t2u_vocab_size,
vocoder_num_spkrs=self.vocoder_num_spkrs,
vocoder_num_langs=self.vocoder_num_langs,
upsample_initial_channel=self.upsample_initial_channel,
unit_embed_dim=self.unit_embed_dim,
spkr_embed_dim=self.spkr_embed_dim,
num_conv_pos_embeddings=self.num_conv_pos_embeddings,
lang_embed_dim=self.lang_embed_dim,
max_new_tokens=self.max_new_tokens,
unit_hifi_gan_vocab_size=self.unit_hifi_gan_vocab_size,
t2u_num_langs=self.t2u_num_langs,
t2u_offset_tgt_lang=self.t2u_offset_tgt_lang,
vocoder_offset=self.vocoder_offset,
t2u_variance_predictor_embed_dim=self.hidden_size,
t2u_variance_predictor_hidden_dim=self.t2u_variance_predictor_hidden_dim,
char_vocab_size=self.char_vocab_size,
left_max_position_embeddings=self.left_max_position_embeddings,
right_max_position_embeddings=self.right_max_position_embeddings,
speech_encoder_chunk_size=self.speech_encoder_chunk_size,
speech_encoder_left_chunk_num=self.speech_encoder_left_chunk_num,
encoder_layerdrop=self.encoder_layerdrop,
speech_encoder_layerdrop=self.speech_encoder_layerdrop,
decoder_layerdrop=self.decoder_layerdrop,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
decoder_input_ids,
input_mask,
lm_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
decoder_input_ids,
input_mask,
lm_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(self, config, input_ids, decoder_input_ids, input_mask, labels):
model = SeamlessM4Tv2Model(config=config)
model.to(torch_device)
model.eval()
if self.input_modality == "text":
result = model(input_ids=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids)
result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
else:
result = model(input_features=input_ids, attention_mask=input_mask, decoder_input_ids=decoder_input_ids)
result = model(input_features=input_ids, decoder_input_ids=decoder_input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
decoder_output = result.logits
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
if self.input_modality == "text":
seq_length = self.seq_length
else:
# if speech, expected length has been subsampled.
seq_length = model._compute_sub_sample_lengths_from_attention_mask(input_mask).max().item()
self.parent.assertEqual(encoder_output.size(), (self.batch_size, seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size(), (self.batch_size, decoder_input_ids.shape[1], self.vocab_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(decoder_past), config.decoder_layers)
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
decoder_input_ids,
input_mask,
lm_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
model = SeamlessM4Tv2Model(config=config)
model.to(torch_device)
model.eval()
# make sure no pad token in decoder_input_ids
decoder_input_ids = torch.clamp(decoder_input_ids, config.pad_token_id + 1)
# first forward pass
outputs = model(
input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=input_mask, use_cache=True
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([decoder_input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
input_ids,
decoder_input_ids=next_input_ids,
decoder_attention_mask=next_attention_mask,
output_hidden_states=True,
)
output_from_no_past = output_from_no_past["decoder_hidden_states"][0]
output_from_past = model(
input_ids,
decoder_input_ids=next_tokens,
decoder_attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["decoder_hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
input_mask,
lm_labels,
) = config_and_inputs
input_name = "input_ids" if self.input_modality == "text" else "input_features"
inputs_dict = {
input_name: input_ids,
"attention_mask": input_mask,
"decoder_input_ids": decoder_input_ids,
"labels": lm_labels,
}
return config, inputs_dict
@require_torch
| SeamlessM4Tv2ModelTester |
python | getsentry__sentry | tests/sentry/tasks/test_commit_context.py | {
"start": 39352,
"end": 58554
} | class ____(IntegrationTestCase, TestCommitContextIntegration):
provider = GitHubIntegrationProvider
base_url = "https://api.github.com"
def setUp(self) -> None:
super().setUp()
self.pull_request = PullRequest.objects.create(
organization_id=self.commit.organization_id,
repository_id=self.repo.id,
key="99",
author=self.commit.author,
message="foo",
title="bar",
merge_commit_sha=self.commit.key,
date_added=before_now(days=1),
)
self.repo.provider = "integrations:github"
self.repo.save()
self.pull_request_comment = PullRequestComment.objects.create(
pull_request=self.pull_request,
external_id=1,
created_at=before_now(days=1),
updated_at=before_now(days=1),
group_ids=[],
)
self.blame = FileBlameInfo(
repo=self.repo,
path="sentry/models/release.py",
ref="master",
code_mapping=self.code_mapping,
lineno=39,
commit=CommitInfo(
commitId="asdfwreqr",
committedDate=(datetime.now(tz=datetime_timezone.utc) - timedelta(days=7)),
commitMessage="placeholder commit message",
commitAuthorName="",
commitAuthorEmail="admin@localhost",
),
)
OrganizationOption.objects.set_value(
organization=self.project.organization, key="sentry:github_pr_bot", value=True
)
def add_responses(self):
responses.add(
responses.GET,
self.base_url + f"/repos/example/commits/{self.commit.key}/pulls",
status=200,
json=[{"merge_commit_sha": self.pull_request.merge_commit_sha, "state": "closed"}],
)
def test_gh_comment_not_github(
self, mock_comment_workflow: MagicMock, mock_get_commit_context: MagicMock
) -> None:
"""Non github repos shouldn't be commented on"""
mock_get_commit_context.return_value = [self.blame]
self.repo.provider = "integrations:gitlab"
self.repo.save()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
def test_gh_comment_org_option(
self, mock_comment_workflow: MagicMock, mock_get_commit_context: MagicMock
) -> None:
"""No comments on org with organization option disabled"""
mock_get_commit_context.return_value = [self.blame]
OrganizationOption.objects.set_value(
organization=self.project.organization, key="sentry:github_pr_bot", value=False
)
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_no_pr_from_api(
self, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""No comments on suspect commit with no pr returned from API response"""
mock_get_commit_context.return_value = [self.blame]
self.pull_request.delete()
responses.add(
responses.GET,
self.base_url + f"/repos/example/commits/{self.commit.key}/pulls",
status=200,
json={"message": "No commit found for SHA"},
)
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@patch("sentry_sdk.capture_exception")
@responses.activate
def test_gh_comment_api_error(
self, mock_capture_exception, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""Captures exception if Github API call errors"""
mock_get_commit_context.return_value = [self.blame]
responses.add(
responses.GET,
self.base_url + f"/repos/example/commits/{self.commit.key}/pulls",
status=400,
json={"message": "error"},
)
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert mock_capture_exception.called
assert not mock_comment_workflow.called
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_commit_not_in_default_branch(
self, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""No comments on commit not in default branch"""
mock_get_commit_context.return_value = [self.blame]
responses.add(
responses.GET,
self.base_url + f"/repos/example/commits/{self.commit.key}/pulls",
status=200,
json=[{"merge_commit_sha": "abcd"}, {"merge_commit_sha": "efgh"}],
)
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_no_pr_from_query(
self, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""No comments on suspect commit with no pr row in table"""
mock_get_commit_context.return_value = [self.blame]
self.pull_request.delete()
self.add_responses()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_pr_too_old(
self,
get_jwt: MagicMock,
mock_comment_workflow: MagicMock,
mock_get_commit_context: MagicMock,
) -> None:
"""No comment on pr that's older than PR_COMMENT_WINDOW"""
mock_get_commit_context.return_value = [self.blame]
self.pull_request.date_added = before_now(days=PR_COMMENT_WINDOW + 1)
self.pull_request.save()
self.add_responses()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
assert len(PullRequestCommit.objects.all()) == 0
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_pr_info_level_issue(
self, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""No comment on pr that's has info level issue"""
mock_get_commit_context.return_value = [self.blame]
self.pull_request.date_added = before_now(days=1)
self.pull_request.save()
self.add_responses()
self.event.group.update(level=logging.INFO)
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
assert len(PullRequestCommit.objects.all()) == 0
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_repeat_issue(
self,
get_jwt: MagicMock,
mock_comment_workflow: MagicMock,
mock_get_commit_context: MagicMock,
) -> None:
"""No comment on a pr that has a comment with the issue in the same pr list"""
mock_get_commit_context.return_value = [self.blame]
self.pull_request_comment.group_ids.append(self.event.group_id)
self.pull_request_comment.save()
self.add_responses()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
assert len(PullRequestCommit.objects.all()) == 0
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_create_queued(
self, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""Task queued if no prior comment exists"""
mock_get_commit_context.return_value = [self.blame]
self.pull_request_comment.delete()
self.add_responses()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert mock_comment_workflow.called
pr_commits = PullRequestCommit.objects.all()
assert len(pr_commits) == 1
assert pr_commits[0].commit == self.commit
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_create_queued_existing_pr_commit(
self, get_jwt, mock_comment_workflow, mock_get_commit_context
):
"""Task queued if no prior comment exists"""
mock_get_commit_context.return_value = [self.blame]
pr_commit = PullRequestCommit.objects.create(
commit=self.commit, pull_request=self.pull_request
)
self.pull_request_comment.delete()
self.add_responses()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert mock_comment_workflow.called
pr_commits = PullRequestCommit.objects.all()
assert len(pr_commits) == 1
assert pr_commits[0] == pr_commit
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_update_queue(
self,
get_jwt: MagicMock,
mock_comment_workflow: MagicMock,
mock_get_commit_context: MagicMock,
) -> None:
"""Task queued if new issue for prior comment"""
mock_get_commit_context.return_value = [self.blame]
self.add_responses()
with self.tasks():
assert not GroupOwner.objects.filter(group=self.event.group).exists()
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert mock_comment_workflow.called
pr_commits = PullRequestCommit.objects.all()
assert len(pr_commits) == 1
assert pr_commits[0].commit == self.commit
def test_gh_comment_no_repo(
self, mock_comment_workflow: MagicMock, mock_get_commit_context: MagicMock
) -> None:
"""No comments on suspect commit if no repo row exists"""
mock_get_commit_context.return_value = [self.blame]
self.repo.delete()
with self.tasks():
event_frames = get_frame_paths(self.event)
process_commit_context(
event_id=self.event.event_id,
event_platform=self.event.platform,
event_frames=event_frames,
group_id=self.event.group_id,
project_id=self.event.project_id,
)
assert not mock_comment_workflow.called
assert len(PullRequestCommit.objects.all()) == 0
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_debounces(
self, get_jwt, mock_record, mock_comment_workflow, mock_get_commit_context
):
mock_get_commit_context.return_value = [self.blame]
self.add_responses()
assert not GroupOwner.objects.filter(group=self.event.group).exists()
groupowner = GroupOwner.objects.create(
group_id=self.event.group_id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=1,
project_id=self.event.project_id,
organization_id=self.project.organization_id,
context={
"commitId": self.commit.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
},
date_added=timezone.now(),
)
integration = integration_service.get_integration(
organization_id=self.code_mapping.organization_id
)
assert integration
install = integration.get_installation(organization_id=self.code_mapping.organization_id)
assert isinstance(install, CommitContextIntegration)
with self.tasks():
install.queue_pr_comment_task_if_needed(
project=self.project,
commit=self.commit,
group_owner=groupowner,
group_id=self.event.group_id,
)
install.queue_pr_comment_task_if_needed(
project=self.project,
commit=self.commit,
group_owner=groupowner,
group_id=self.event.group_id,
)
assert mock_comment_workflow.call_count == 1
start_1, success_1, start_2, halt_2 = mock_record.mock_calls
assert start_1.args[0] == EventLifecycleOutcome.STARTED
assert success_1.args[0] == EventLifecycleOutcome.SUCCESS
assert start_2.args[0] == EventLifecycleOutcome.STARTED
assert halt_2.args[0] == EventLifecycleOutcome.HALTED
assert_halt_metric(mock_record, CommitContextHaltReason.ALREADY_QUEUED)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch("sentry.integrations.github.client.get_jwt", return_value="jwt_token_1")
@responses.activate
def test_gh_comment_multiple_comments(
self, get_jwt, mock_record, mock_comment_workflow, mock_get_commit_context
):
self.add_responses()
assert not GroupOwner.objects.filter(group=self.event.group).exists()
groupowner = GroupOwner.objects.create(
group_id=self.event.group_id,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=1,
project_id=self.event.project_id,
organization_id=self.project.organization_id,
context={
"commitId": self.commit.id,
"suspectCommitStrategy": "scm_based",
},
date_added=timezone.now(),
)
integration = integration_service.get_integration(
organization_id=self.code_mapping.organization_id
)
assert integration
install = integration.get_installation(organization_id=self.code_mapping.organization_id)
assert isinstance(install, CommitContextIntegration)
# open PR comment
PullRequestComment.objects.create(
external_id=1,
pull_request=self.pull_request,
created_at=before_now(days=1),
updated_at=before_now(days=1),
group_ids=[],
comment_type=CommentType.OPEN_PR,
)
with self.tasks():
install.queue_pr_comment_task_if_needed(
project=self.project,
commit=self.commit,
group_owner=groupowner,
group_id=self.event.group_id,
)
install.queue_pr_comment_task_if_needed(
project=self.project,
commit=self.commit,
group_owner=groupowner,
group_id=self.event.group_id,
)
assert mock_comment_workflow.call_count == 1
start_1, success_1, start_2, halt_2 = mock_record.mock_calls
assert start_1.args[0] == EventLifecycleOutcome.STARTED
assert success_1.args[0] == EventLifecycleOutcome.SUCCESS
assert start_2.args[0] == EventLifecycleOutcome.STARTED
assert halt_2.args[0] == EventLifecycleOutcome.HALTED
assert_halt_metric(mock_record, CommitContextHaltReason.ALREADY_QUEUED)
| TestGHCommentQueuing |
python | doocs__leetcode | solution/0500-0599/0565.Array Nesting/Solution2.py | {
"start": 0,
"end": 331
} | class ____:
def arrayNesting(self, nums: List[int]) -> int:
ans, n = 0, len(nums)
for i in range(n):
cnt = 0
while nums[i] != n:
j = nums[i]
nums[i] = n
i = j
cnt += 1
ans = max(ans, cnt)
return ans
| Solution |
python | fluentpython__example-code | 21-class-metaprog/bulkfood/bulkfood_v8.py | {
"start": 1787,
"end": 2124
} | class ____(model.Entity):
description = model.NonBlank()
weight = model.Quantity()
price = model.Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
| LineItem |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 10612,
"end": 11869
} | class ____(BaseField):
"""32-bit integer field."""
def __init__(self, min_value=None, max_value=None, **kwargs):
"""
:param min_value: (optional) A min value that will be applied during validation
:param max_value: (optional) A max value that will be applied during validation
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.min_value, self.max_value = min_value, max_value
super().__init__(**kwargs)
def to_python(self, value):
try:
value = int(value)
except (TypeError, ValueError):
pass
return value
def validate(self, value):
try:
value = int(value)
except (TypeError, ValueError):
self.error("%s could not be converted to int" % value)
if self.min_value is not None and value < self.min_value:
self.error("Integer value is too small")
if self.max_value is not None and value > self.max_value:
self.error("Integer value is too large")
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, int(value))
| IntField |
python | Textualize__textual | src/textual/binding.py | {
"start": 5594,
"end": 6053
} | class ____(NamedTuple):
"""Information about an active binding (returned from [active_bindings][textual.screen.Screen.active_bindings])."""
node: DOMNode
"""The node where the binding is defined."""
binding: Binding
"""The binding information."""
enabled: bool
"""Is the binding enabled? (enabled bindings are typically rendered dim)"""
tooltip: str = ""
"""Optional tooltip shown in Footer."""
@rich.repr.auto
| ActiveBinding |
python | PyCQA__pylint | doc/data/messages/a/arguments-differ/good/no_inheritance.py | {
"start": 517,
"end": 646
} | class ____:
def mix(self, fluid_one, fluid_two, alcoholic_fluid):
return fluid_one + fluid_two + alcoholic_fluid
| Cocktail |
python | jazzband__django-waffle | waffle/tests/test_admin.py | {
"start": 4517,
"end": 7197
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.site = AdminSite()
self.switch_admin = SwitchAdmin(Switch, self.site)
def test_enable_switches(self):
s1 = Switch.objects.create(name="switch1", active=False)
request = FakeRequest()
enable_switches(None, request, Switch.objects.all())
s1.refresh_from_db()
self.assertTrue(s1.active)
log_entry = LogEntry.objects.get(user=request.user)
self.assertEqual(log_entry.action_flag, CHANGE)
self.assertEqual(log_entry.object_repr, "switch1 on")
def test_disable_switches(self):
s1 = Switch.objects.create(name="switch1", active=True)
request = FakeRequest()
disable_switches(None, request, Switch.objects.all())
s1.refresh_from_db()
self.assertFalse(s1.active)
log_entry = LogEntry.objects.get(user=request.user)
self.assertEqual(log_entry.action_flag, CHANGE)
self.assertEqual(log_entry.object_repr, "switch1 off")
@skip_if_admin_permissions_not_available
def test_switch_no_actions_without_permissions(self):
request = FakeRequest()
actions = self.switch_admin.get_actions(request)
self.assertEqual(actions.keys(), set())
@skip_if_admin_permissions_not_available
def test_switch_action_change(self):
request = FakeRequest()
request.user.user_permissions.add(Permission.objects.get(codename="change_switch"))
actions = self.switch_admin.get_actions(request)
self.assertEqual(actions.keys(), {"enable_switches", "disable_switches"})
@skip_if_admin_permissions_not_available
def test_switch_action_delete(self):
request = FakeRequest()
request.user.user_permissions.add(Permission.objects.get(codename="delete_switch"))
actions = self.switch_admin.get_actions(request)
self.assertEqual(actions.keys(), {"delete_individually"})
def test_model_can_be_registered_by_default(self):
config = get_setting("ENABLE_ADMIN_PAGES")
_register_model_to_admin_site(admin_site=self.site, config_setting=config, model=Switch)
self.assertTrue(self.site.is_registered(Switch))
@override_settings(WAFFLE_ENABLE_ADMIN_PAGES=False)
def test_admin_page_can_be_disabled(self):
config = get_setting("ENABLE_ADMIN_PAGES")
_register_model_to_admin_site(admin_site=self.site, config_setting=config, model=Switch)
self.assertFalse(self.site.is_registered(Switch))
def _register_model_to_admin_site(admin_site, config_setting, model):
if config_setting:
admin_site.register(model)
| SwitchAdminTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels16.py | {
"start": 315,
"end": 1355
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels16.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "radar"})
chart.axis_ids = [45858816, 45860352]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {"value": 1, "position": "center"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pytorch__pytorch | test/distributed/tensor/test_dtensor.py | {
"start": 25540,
"end": 41860
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 8
def sub_mesh_assert_equal(self, mesh, exp_in_mesh, exp_out_of_mesh, tensor):
if self.rank in mesh:
self.assertEqual(tensor, exp_in_mesh)
else:
self.assertEqual(tensor, exp_out_of_mesh)
@with_comms
def test_dtensor_device_mesh_device_conversion(self):
# construct a gpu device mesh
mesh = self.build_device_mesh()
# construct from a cpu local tensor with gpu device mesh
# should automatically convert the dist tensor to gpu
placements = [Shard(0)]
local_tensor = torch.randn(3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, placements)
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
@with_comms
def test_dtensor_api_device_mesh_context_manager(self):
with self.build_device_mesh() as mesh:
placements = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(
local_tensor, device_mesh=mesh, placements=placements
)
with self.build_device_mesh():
placements = [Shard(0)]
local_tensor = torch.randn(3, 3)
sharded_tensor = DTensor.from_local(local_tensor, placements=placements)
replica_spec = [Replicate()]
replica_tensor = sharded_tensor.redistribute(placements=replica_spec)
self.assertEqual(
replica_tensor.size(), torch.Size([3 * self.world_size, 3])
)
with self.build_device_mesh():
placements = [Shard(0)]
global_shape = torch.Size([3 * self.world_size, 3])
global_tensor = torch.randn(global_shape)
sharded_tensor = distribute_tensor(global_tensor, placements=placements)
self.assertEqual(sharded_tensor.to_local().shape, torch.Size([3, 3]))
mesh_2d = DeviceMesh(
self.device_type, torch.arange(self.world_size).reshape(2, 4)
)
with mesh_2d:
shard_2d_spec = [Shard(0), Replicate()]
tensor_2d = distribute_tensor(global_tensor, placements=shard_2d_spec)
self.assertEqual(tensor_2d.to_local().shape, torch.Size([3 * 4, 3]))
sharded_after_2d = distribute_tensor(global_tensor, placements=placements)
self.assertEqual(sharded_after_2d.to_local().shape, torch.Size([3, 3]))
@with_comms
def test_dtensor_2d_mesh(self):
mesh_tensor = torch.arange(self.world_size).reshape(2, 4)
# construct a gpu device mesh
mesh = DeviceMesh(self.device_type, mesh_tensor)
# construct a dist tensor on 2d device mesh and test if works
placements = [Shard(0), Shard(1)]
local_tensor = torch.randn(3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, placements)
self.assertEqual(
dist_tensor.size(), torch.Size([3 * mesh.size(0), 3 * mesh.size(1)])
)
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
# if shard on the same tensor dimension
# we should correctly construct the global tensor size
shard_same_dim_spec = [Shard(0), Shard(0)]
local_tensor = torch.randn(3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, shard_same_dim_spec)
self.assertEqual(dist_tensor.size(), torch.Size([3 * self.world_size, 3]))
@with_comms
def test_device_mesh_nd(self):
# construct a gpu device mesh
mesh_tensor = torch.arange(self.world_size).reshape(2, 2, 2)
mesh = DeviceMesh(self.device_type, mesh_tensor)
# construct a dist tensor on 3d device mesh and test if works
placements = [Shard(0), Shard(1), Shard(2)]
local_tensor = torch.randn(3, 3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, placements)
self.assertEqual(dist_tensor.size(), torch.Size([6, 6, 6]))
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
# construct a dist tensor on 3d device mesh with some shards on same dim
placements = [Shard(0), Shard(0), Shard(2)]
local_tensor = torch.randn(3, 3, 3)
dist_tensor = DTensor.from_local(local_tensor, mesh, placements)
self.assertEqual(dist_tensor.size(), torch.Size([12, 3, 6]))
self.assertEqual(dist_tensor.device.type, self.device_type)
self.assertEqual(dist_tensor.to_local().device.type, self.device_type)
@with_comms
def test_dtensor_spec_local_shard_offset(self):
device_mesh = DeviceMesh(
self.device_type, torch.arange(self.world_size).reshape(2, 4)
)
tensor_shape = (3 * self.world_size, 3 * self.world_size)
# sharding specs and its corresponding local shard offsets
shard_spec_and_offsets = [
(
[Shard(0), Replicate()],
(3 * (self.world_size // 2) * (self.rank // 4), 0),
),
(
[Shard(1), Replicate()],
(0, 3 * (self.world_size // 2) * (self.rank // 4)),
),
(
[Replicate(), Shard(0)],
(3 * (self.world_size // 4) * (self.rank % 4), 0),
),
(
[Replicate(), Shard(1)],
(0, 3 * (self.world_size // 4) * (self.rank % 4)),
),
]
from torch.distributed.tensor._utils import (
compute_local_shape_and_global_offset,
)
# loop through all sharding specs and check local shard offsets
logical_tensor = torch.randn(tensor_shape)
for placements, expected_shard_offsets in shard_spec_and_offsets:
dtensor = distribute_tensor(logical_tensor, device_mesh, placements)
_, offset = compute_local_shape_and_global_offset(
dtensor.shape, device_mesh, dtensor.placements
)
self.assertEqual(expected_shard_offsets, offset)
@with_comms
def test_from_local_sub_mesh(self):
mesh = DeviceMesh(self.device_type, [0, 2])
local_tensor = torch.ones(3, 4)
dtensor = DTensor.from_local(local_tensor, mesh, [Shard(0)])
self.assertEqual(dtensor.size(), torch.Size([6, 4]))
self.sub_mesh_assert_equal(
mesh.mesh,
torch.ones(3, 4),
torch.tensor([]),
dtensor.to_local(),
)
# test dtensor created in submesh, the operation should only
# be applied to the local shard inside the mesh, not the whole
# world, so only 0/2 really run the computation
dtensor = dtensor + 2
self.sub_mesh_assert_equal(
mesh.mesh,
torch.ones(3, 4) + 2,
torch.tensor([]),
dtensor.to_local(),
)
@with_comms
def test_default_value_sub_mesh(self):
mesh = DeviceMesh(self.device_type, [0, 2])
# test scalar return value
local_tensor1 = torch.ones(4, 3)
local_tensor2 = torch.ones(4, 3)
dtensor1 = DTensor.from_local(local_tensor1, mesh, [Shard(0)])
dtensor2 = DTensor.from_local(local_tensor2, mesh, [Shard(0)])
local_res = dtensor1.equal(dtensor2) # equal returns local result
self.sub_mesh_assert_equal(
mesh.mesh,
True,
True,
local_res,
)
# test 0-d tensor return value
local_tensor = torch.ones(4, 3)
dtensor = DTensor.from_local(local_tensor, mesh, [Shard(0)]).sum()
self.sub_mesh_assert_equal(
mesh.mesh,
torch.tensor(12.0),
torch.tensor(0.0),
dtensor.to_local(),
)
# test List[torch.Tensor] return value
local_tensor = torch.ones(3, 4)
dtensor = DTensor.from_local(local_tensor, mesh, [Shard(0)])
dtensor_list = dtensor.split([2, 2], dim=1)
self.sub_mesh_assert_equal(
mesh.mesh,
[torch.ones(3, 2)] * 2,
[torch.tensor([])] * 2,
[dt.to_local() for dt in dtensor_list],
)
@with_comms
def test_redistribute_sub_mesh(self):
mesh = DeviceMesh(self.device_type, [0, 2])
# test redistribute on a submesh
local_tensor1 = torch.ones(4, 3)
sharded_dtensor = DTensor.from_local(local_tensor1, mesh, [Shard(0)])
replicated_dtensor = sharded_dtensor.redistribute(placements=[Replicate()])
self.sub_mesh_assert_equal(
mesh.mesh, torch.ones(8, 3), torch.tensor([]), replicated_dtensor.to_local()
)
sharded_again = replicated_dtensor.redistribute(placements=[Shard(0)])
self.sub_mesh_assert_equal(
mesh.mesh, torch.ones(4, 3), torch.tensor([]), sharded_again.to_local()
)
@with_comms
def test_implicit_replication(self):
mesh = self.build_device_mesh()
local_tensor1 = torch.ones(4, 3)
sharded_dtensor = DTensor.from_local(local_tensor1, mesh, [Shard(0)])
with implicit_replication():
# We put the scalar tensor as the left operand so we can test out
# when a non-dtensor is a the arg in the args list.
out_dt = torch.ones(3, device=self.device_type) + sharded_dtensor
self.assertEqual(out_dt.placements, [Shard(0)])
self.assertEqual(out_dt.shape, (4 * self.world_size, 3))
local_shard = out_dt.to_local()
self.assertEqual(local_shard.shape, (4, 3))
self.assertEqual(local_shard, torch.ones(4, 3) + torch.ones(3))
@with_comms
def test_vmap_embedding(self):
mesh = self.build_device_mesh()
batch_size, seq_len = 2, 6
output_dim = 32
indices = torch.zeros(*(batch_size, seq_len), dtype=torch.int64)
indices[0, 1] = 1
indices[1, 3] = 1
indices[1, 5] = 1
indices = DTensor.from_local(indices, mesh, [Shard(0)])
emb = torch.randn(
*(batch_size, 8, output_dim),
dtype=torch.float32,
)
emb = DTensor.from_local(emb, mesh, [Shard(0)])
result = torch.vmap(F.embedding)(indices, emb)
expected = [F.embedding(indices[i], emb[i]) for i in range(batch_size)]
expected = torch.stack(expected)
local_result = result.to_local()
local_expected = expected.to_local()
self.assertEqual(local_result, local_expected)
@unittest.expectedFailure
@with_comms
def test_inplace_on_local_tensor_view(self):
mesh = self.build_device_mesh()
seq = 8
vocab = 16
leaf = torch.randn((seq, vocab), device=self.device_type, requires_grad=True)
dtensor_leaf = DTensor.from_local(leaf, mesh, [Shard(1)])
dtensor_vocab_parallel_logits = dtensor_leaf * 2 # make this non-leaf
vocab_parallel_logits = dtensor_vocab_parallel_logits.to_local()
logits_max = torch.randn(seq, device=self.device_type)
vocab_parallel_logits -= logits_max.unsqueeze(dim=1)
@with_comms
def test_auto_implicit_replication(self):
mesh = self.build_device_mesh()
local_tensor = torch.ones(self.world_size, 3, device=self.device_type)
sharded_dtensor = DTensor.from_local(local_tensor, mesh, [Shard(0)])
# automatically turn tensor to DTensor replicate when ndim = 0 and numel = 1
ndim_0_tensor = torch.tensor(1, device=self.device_type)
def add_scalar_tensor_with_dtensor():
return ndim_0_tensor + sharded_dtensor
result = add_scalar_tensor_with_dtensor().to_local()
self.assertEqual(result, local_tensor + ndim_0_tensor)
self.assertNotWarn(
add_scalar_tensor_with_dtensor,
"Found a non-scalar tensor with numel=1 and ndim!=0",
)
# automatically turn tensor to DTensor replicate when ndim = 1 and numel = 1
numel_1_tensor = torch.tensor([1], device=self.device_type)
self.assertEqual(
(numel_1_tensor + sharded_dtensor).to_local(), numel_1_tensor + local_tensor
)
@unittest.expectedFailure
@with_comms
def test_dtensor_cond(self):
mesh = self.build_device_mesh()
def make_dtensor(*shape, dtype, device):
return distribute_tensor(
make_tensor(*shape, dtype=dtype, device=device),
device_mesh=mesh,
placements=None,
)
x = make_dtensor(1, 1, dtype=torch.bfloat16, device="cuda")
# Fails with AssertionError: P1972527564
torch.cond(
x > 0,
lambda: 1.0 / x,
lambda: torch.zeros_like(x),
)
@with_comms
def test_metadata_consistency_check(self):
device_mesh = self.build_device_mesh()
placements = [Shard(0)]
# Create a local tensor with specific metadata and check dtype change
local_tensor = torch.randn(3, 3, requires_grad=True, dtype=torch.float32)
if self.rank == 0:
local_tensor = local_tensor.to(dtype=torch.float64)
with self.assertRaises(ValueError):
DTensor.from_local(local_tensor, device_mesh, placements, run_check=True)
try:
DTensor.from_local(local_tensor, device_mesh, placements, run_check=False)
except ValueError:
self.fail("Unexpected ValueError raised with run_check=False")
# Create a local tensor with specific metadata and check requires_grad change
local_tensor = torch.randn(3, 3, requires_grad=True, dtype=torch.float32)
if self.rank == 0:
local_tensor.requires_grad = False
with self.assertRaises(ValueError):
DTensor.from_local(local_tensor, device_mesh, placements, run_check=True)
try:
DTensor.from_local(local_tensor, device_mesh, placements, run_check=False)
except ValueError:
self.fail("Unexpected ValueError raised with run_check=False")
# Create a local tensor with specific metadata and check stride change
local_tensor = torch.randn(3, 4, requires_grad=True, dtype=torch.float32)
if self.rank == 0:
local_tensor = local_tensor.t() # transpose changes the stride
with self.assertRaises(ValueError):
DTensor.from_local(local_tensor, device_mesh, placements, run_check=True)
try:
DTensor.from_local(local_tensor, device_mesh, placements, run_check=False)
except ValueError:
self.fail("Unexpected ValueError raised with run_check=False")
@with_comms
def test_as_strided_identity(self):
# Test calling as_strided with the same size/stride/offset as input tensor
# This should be a no-op but currently fails
device_mesh = self.build_device_mesh()
placements = [Shard(0)]
local_tensor = torch.randn(3, 4, device=self.device_type)
dtensor = DTensor.from_local(local_tensor, device_mesh, placements)
# Get the current size, stride, and storage_offset
size = dtensor.size()
stride = dtensor.stride()
storage_offset = dtensor.storage_offset()
# Call as_strided with the exact same parameters
result = dtensor.as_strided(size, stride, storage_offset)
# The result should be identical to the input
self.assertEqual(result.size(), dtensor.size())
self.assertEqual(result.stride(), dtensor.stride())
self.assertEqual(result.to_local(), dtensor.to_local())
DTensorMeshTestWithLocalTensor = create_local_tensor_test_class(
DTensorMeshTest,
skipped_tests=[
# Test asserts must be rewritten for local tensor
"test_from_local_sub_mesh",
"test_default_value_sub_mesh",
"test_redistribute_sub_mesh",
# Local tensor mode doesn't support tensors of different types on different ranks
"test_metadata_consistency_check",
],
)
| DTensorMeshTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-garden-perimeter-to-collect-enough-apples.py | {
"start": 43,
"end": 1535
} | class ____(object):
def minimumPerimeter(self, neededApples):
"""
:type neededApples: int
:rtype: int
"""
# find min r, s.t. 4r^3+6r^2+2r-neededApples >= 0
# => by depressed cubic (https://en.wikipedia.org/wiki/Cubic_equation#Depressed_cubic)
# let x = r+(6/(3*4)), r = x-(1/2)
# 4(x-(1/2))^3+6(x-(1/2))^2+2(x-(1/2))-neededApples
# = 4(x^3-3/2x^2+3/4x-1/8)
# + 6(x^2-x+1/4)
# + 2(x-1/2)
# = 4x^3-x-neededApples >= 0
#
# find x, s.t. 4x^3-x-neededApples = 0 <=> x^3+(-1/4)x+(-neededApples/4) = 0
# => by Cardano's formula (https://en.wikipedia.org/wiki/Cubic_equation#Cardano's_formula)
# x^3 + px + q = 0, p = (-1/4), q = (-neededApples/4)
# since (q/2)^2+(p/3)^3 = neededApples^2/64-1/1728 > 0 => only one real root
# => x = (-q/2 + ((q/2)^2+(p/3)^3)^(1/2)) + (-q/2 - ((q/2)^2+(p/3)^3)^(1/2))
# r = x-(1/2)
# => min r = ceil(r)
a, b, c, d = 4.0, 6.0, 2.0, float(-neededApples)
p = (3*a*c-b**2)/(3*a**2) # -1/4.0
q = (2*b**3-9*a*b*c+27*a**2*d)/(27*a**3) # -neededApples/4.0
assert((q/2)**2+(p/3)**3 > 0) # case of only one real root
x = (-q/2 + ((q/2)**2+(p/3)**3)**0.5)**(1.0/3) + \
(-q/2 - ((q/2)**2+(p/3)**3)**0.5)**(1.0/3)
return 8*int(math.ceil(x - b/(3*a)))
# Time: O(1)
# Space: O(1)
| Solution |
python | huggingface__transformers | src/transformers/models/speech_to_text/modeling_speech_to_text.py | {
"start": 3707,
"end": 8218
} | class ____(nn.Module):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None):
super().__init__()
self.offset = 2
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.make_weights(num_positions + self.offset, embedding_dim, padding_idx)
def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx)
if hasattr(self, "weights"):
# in forward put the weights on the correct dtype and device of the param
emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)
self.register_buffer("weights", emb_weights, persistent=False)
@staticmethod
def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None):
"""
Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb.to(torch.get_default_dtype())
@torch.no_grad()
def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
bsz, seq_len = input_ids.size()
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to(
input_ids.device
)
# expand embeddings if needed
max_pos = self.padding_idx + 1 + seq_len
if max_pos > self.weights.size(0):
self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx)
return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach()
def create_position_ids_from_input_ids(
self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0
):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenAttention with Musicgen->Speech2Text
| Speech2TextSinusoidalPositionalEmbedding |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_math_ops_test.py | {
"start": 1321,
"end": 1920
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.parameters(
[dict(original=['a b'.split(), 'c d e'.split()], expected='a b c d e')])
@test_util.run_in_graph_and_eager_modes
def testStringReduceJoin(self, original, expected, separator=' ', axis=None):
original_rt = ragged_factory_ops.constant(original)
expected_rt = ragged_factory_ops.constant(expected)
actual = ragged_string_ops.reduce_join(original_rt, axis=axis,
separator=separator)
self.assertAllEqual(actual, expected_rt)
| RaggedReduceTest |
python | sympy__sympy | sympy/polys/domains/fractionfield.py | {
"start": 555,
"end": 6225
} | class ____(Field, CompositeDomain, Generic[Er]):
"""A class for representing multivariate rational function fields. """
is_FractionField = is_Frac = True
has_assoc_Ring = True
has_assoc_Field = True
def __init__(self, domain_or_field: FracField[Er] | Domain[Er], symbols=None, order=None):
from sympy.polys.fields import FracField
if isinstance(domain_or_field, FracField) and symbols is None and order is None:
field = domain_or_field
else:
field = FracField(symbols, domain_or_field, order) # type: ignore
self.field: FracField[Er] = field
self.dtype = field.dtype
self.gens = field.gens
self.ngens = field.ngens
self.symbols = field.symbols
self.domain = field.domain
# TODO: remove this
self.dom = self.domain
def new(self, element):
return self.field.field_new(element)
def of_type(self, element) -> TypeIs[FracElement[Er]]:
"""Check if ``a`` is of type ``dtype``. """
return self.field.is_element(element)
@property
def zero(self):
return self.field.zero
@property
def one(self):
return self.field.one
@property
def order(self):
return self.field.order
def __str__(self):
return str(self.domain) + '(' + ','.join(map(str, self.symbols)) + ')'
def __hash__(self):
return hash((self.__class__.__name__, self.field, self.domain, self.symbols))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
if not isinstance(other, FractionField):
return NotImplemented
return self.field == other.field
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return a.as_expr()
def from_sympy(self, a):
"""Convert SymPy's expression to ``dtype``. """
return self.field.from_expr(a)
def from_ZZ(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_QQ(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
dom = K1.domain
conv = dom.convert_from
if dom.is_ZZ:
return K1(conv(K0.numer(a), K0)) / K1(conv(K0.denom(a), K0))
else:
return K1(conv(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_GaussianRationalField(K1, a, K0):
"""Convert a ``GaussianRational`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_GaussianIntegerRing(K1, a, K0):
"""Convert a ``GaussianInteger`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_RealField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_ComplexField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return K1(K1.domain.convert(a, K0))
def from_AlgebraicField(K1, a, K0):
"""Convert an algebraic number to ``dtype``. """
if K1.domain != K0:
a = K1.domain.convert_from(a, K0)
if a is not None:
return K1.new(a)
def from_PolynomialRing(K1, a, K0):
"""Convert a polynomial to ``dtype``. """
if a.is_ground:
return K1.convert_from(a.coeff(1), K0.domain)
try:
return K1.new(a.set_ring(K1.field.ring))
except (CoercionFailed, GeneratorsError):
# XXX: We get here if K1=ZZ(x,y) and K0=QQ[x,y]
# and the poly a in K0 has non-integer coefficients.
# It seems that K1.new can handle this but K1.new doesn't work
# when K0.domain is an algebraic field...
try:
return K1.new(a)
except (CoercionFailed, GeneratorsError):
return None
def from_FractionField(K1, a, K0):
"""Convert a rational function to ``dtype``. """
try:
return a.set_field(K1.field)
except (CoercionFailed, GeneratorsError):
return None
def get_ring(self):
"""Returns a field associated with ``self``. """
return self.field.to_ring().to_domain()
def is_positive(self, a):
"""Returns True if ``LC(a)`` is positive. """
return self.domain.is_positive(a.numer.LC)
def is_negative(self, a):
"""Returns True if ``LC(a)`` is negative. """
return self.domain.is_negative(a.numer.LC)
def is_nonpositive(self, a):
"""Returns True if ``LC(a)`` is non-positive. """
return self.domain.is_nonpositive(a.numer.LC)
def is_nonnegative(self, a):
"""Returns True if ``LC(a)`` is non-negative. """
return self.domain.is_nonnegative(a.numer.LC)
def numer(self, a):
"""Returns numerator of ``a``. """
return a.numer
def denom(self, a):
"""Returns denominator of ``a``. """
return a.denom
def factorial(self, a):
"""Returns factorial of ``a``. """
return self.dtype(self.domain.factorial(a))
| FractionField |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_cmath.py | {
"start": 2691,
"end": 23818
} | class ____(__TestCase):
# list of all functions in cmath
test_functions = [getattr(cmath, fname) for fname in [
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',
'cos', 'cosh', 'exp', 'log', 'log10', 'sin', 'sinh',
'sqrt', 'tan', 'tanh']]
# test first and second arguments independently for 2-argument log
test_functions.append(lambda x : cmath.log(x, 1729. + 0j))
test_functions.append(lambda x : cmath.log(14.-27j, x))
def setUp(self):
self.test_values = open(test_file, encoding="utf-8")
def tearDown(self):
self.test_values.close()
def assertFloatIdentical(self, x, y):
"""Fail unless floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if math.isnan(x) or math.isnan(y):
if math.isnan(x) and math.isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif math.copysign(1.0, x) == math.copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertComplexesAreIdentical(self, x, y):
"""Fail unless complex numbers x and y have equal values and signs.
In particular, if x and y both have real (or imaginary) part
zero, but the zeros have different signs, this test will fail.
"""
self.assertFloatIdentical(x.real, y.real)
self.assertFloatIdentical(x.imag, y.imag)
def rAssertAlmostEqual(self, a, b, rel_err = 2e-15, abs_err = 5e-323,
msg=None):
"""Fail if the two floating-point numbers are not almost equal.
Determine whether floating-point values a and b are equal to within
a (small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps.
"""
# special values testing
if math.isnan(a):
if math.isnan(b):
return
self.fail(msg or '{!r} should be nan'.format(b))
if math.isinf(a):
if a == b:
return
self.fail(msg or 'finite result where infinity expected: '
'expected {!r}, got {!r}'.format(a, b))
# if both a and b are zero, check whether they have the same sign
# (in theory there are examples where it would be legitimate for a
# and b to have opposite signs; in practice these hardly ever
# occur).
if not a and not b:
if math.copysign(1., a) != math.copysign(1., b):
self.fail(msg or 'zero has wrong sign: expected {!r}, '
'got {!r}'.format(a, b))
# if a-b overflows, or b is infinite, return False. Again, in
# theory there are examples where a is within a few ulps of the
# max representable float, and then b could legitimately be
# infinite. In practice these examples are rare.
try:
absolute_error = abs(b-a)
except OverflowError:
pass
else:
# test passes if either the absolute error or the relative
# error is sufficiently small. The defaults amount to an
# error of between 9 ulps and 19 ulps on an IEEE-754 compliant
# machine.
if absolute_error <= max(abs_err, rel_err * abs(a)):
return
self.fail(msg or
'{!r} and {!r} are not sufficiently close'.format(a, b))
def test_constants(self):
e_expected = 2.71828182845904523536
pi_expected = 3.14159265358979323846
self.assertAlmostEqual(cmath.pi, pi_expected, places=9,
msg="cmath.pi is {}; should be {}".format(cmath.pi, pi_expected))
self.assertAlmostEqual(cmath.e, e_expected, places=9,
msg="cmath.e is {}; should be {}".format(cmath.e, e_expected))
def test_infinity_and_nan_constants(self):
self.assertEqual(cmath.inf.real, math.inf)
self.assertEqual(cmath.inf.imag, 0.0)
self.assertEqual(cmath.infj.real, 0.0)
self.assertEqual(cmath.infj.imag, math.inf)
self.assertTrue(math.isnan(cmath.nan.real))
self.assertEqual(cmath.nan.imag, 0.0)
self.assertEqual(cmath.nanj.real, 0.0)
self.assertTrue(math.isnan(cmath.nanj.imag))
# Also check that the sign of all of these is positive:
self.assertEqual(math.copysign(1., cmath.nan.real), 1.)
self.assertEqual(math.copysign(1., cmath.nan.imag), 1.)
self.assertEqual(math.copysign(1., cmath.nanj.real), 1.)
self.assertEqual(math.copysign(1., cmath.nanj.imag), 1.)
# Check consistency with reprs.
self.assertEqual(repr(cmath.inf), "inf")
self.assertEqual(repr(cmath.infj), "infj")
self.assertEqual(repr(cmath.nan), "nan")
self.assertEqual(repr(cmath.nanj), "nanj")
def test_user_object(self):
# Test automatic calling of __complex__ and __float__ by cmath
# functions
# some random values to use as test values; we avoid values
# for which any of the functions in cmath is undefined
# (i.e. 0., 1., -1., 1j, -1j) or would cause overflow
cx_arg = 4.419414439 + 1.497100113j
flt_arg = -6.131677725
# a variety of non-complex numbers, used to check that
# non-complex return values from __complex__ give an error
non_complexes = ["not complex", 1, 5, 2., None,
object(), NotImplemented]
# Now we introduce a variety of classes whose instances might
# end up being passed to the cmath functions
# usual case: new-style class implementing __complex__
with torch._dynamo.error_on_graph_break(False):
class MyComplex:
def __init__(self, value):
self.value = value
def __complex__(self):
return self.value
# classes for which __complex__ raises an exception
class SomeException(Exception):
pass
class MyComplexException:
def __complex__(self):
raise SomeException
# some classes not providing __float__ or __complex__
class NeitherComplexNorFloat(object):
pass
class Index:
def __int__(self): return 2
def __index__(self): return 2
class MyInt:
def __int__(self): return 2
# other possible combinations of __float__ and __complex__
# that should work
class FloatAndComplex:
def __float__(self):
return flt_arg
def __complex__(self):
return cx_arg
class JustFloat:
def __float__(self):
return flt_arg
for f in self.test_functions:
# usual usage
self.assertEqual(f(MyComplex(cx_arg)), f(cx_arg))
# other combinations of __float__ and __complex__
self.assertEqual(f(FloatAndComplex()), f(cx_arg))
self.assertEqual(f(JustFloat()), f(flt_arg))
self.assertEqual(f(Index()), f(int(Index())))
# TypeError should be raised for classes not providing
# either __complex__ or __float__, even if they provide
# __int__ or __index__:
self.assertRaises(TypeError, f, NeitherComplexNorFloat())
self.assertRaises(TypeError, f, MyInt())
# non-complex return value from __complex__ -> TypeError
for bad_complex in non_complexes:
self.assertRaises(TypeError, f, MyComplex(bad_complex))
# exceptions in __complex__ should be propagated correctly
self.assertRaises(SomeException, f, MyComplexException())
def test_input_type(self):
# ints should be acceptable inputs to all cmath
# functions, by virtue of providing a __float__ method
for f in self.test_functions:
for arg in [2, 2.]:
self.assertEqual(f(arg), f(arg.__float__()))
# but strings should give a TypeError
for f in self.test_functions:
for arg in ["a", "long_string", "0", "1j", ""]:
self.assertRaises(TypeError, f, arg)
def test_cmath_matches_math(self):
# check that corresponding cmath and math functions are equal
# for floats in the appropriate range
# test_values in (0, 1)
test_values = [0.01, 0.1, 0.2, 0.5, 0.9, 0.99]
# test_values for functions defined on [-1., 1.]
unit_interval = test_values + [-x for x in test_values] + \
[0., 1., -1.]
# test_values for log, log10, sqrt
positive = test_values + [1.] + [1./x for x in test_values]
nonnegative = [0.] + positive
# test_values for functions defined on the whole real line
real_line = [0.] + positive + [-x for x in positive]
test_functions = {
'acos' : unit_interval,
'asin' : unit_interval,
'atan' : real_line,
'cos' : real_line,
'cosh' : real_line,
'exp' : real_line,
'log' : positive,
'log10' : positive,
'sin' : real_line,
'sinh' : real_line,
'sqrt' : nonnegative,
'tan' : real_line,
'tanh' : real_line}
for fn, values in test_functions.items():
float_fn = getattr(math, fn)
complex_fn = getattr(cmath, fn)
for v in values:
z = complex_fn(v)
self.rAssertAlmostEqual(float_fn(v), z.real)
self.assertEqual(0., z.imag)
# test two-argument version of log with various bases
for base in [0.5, 2., 10.]:
for v in positive:
z = cmath.log(v, base)
self.rAssertAlmostEqual(math.log(v, base), z.real)
self.assertEqual(0., z.imag)
@requires_IEEE_754
def test_specific_values(self):
# Some tests need to be skipped on ancient OS X versions.
# See issue #27953.
SKIP_ON_TIGER = {'tan0064'}
osx_version = None
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
osx_version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
def rect_complex(z):
"""Wrapped version of rect that accepts a complex number instead of
two float arguments."""
return cmath.rect(z.real, z.imag)
def polar_complex(z):
"""Wrapped version of polar that returns a complex number instead of
two floats."""
return complex(*polar(z))
for id, fn, ar, ai, er, ei, flags in parse_testfile(test_file):
arg = complex(ar, ai)
expected = complex(er, ei)
# Skip certain tests on OS X 10.4.
if osx_version is not None and osx_version < (10, 5):
if id in SKIP_ON_TIGER:
continue
if fn == 'rect':
function = rect_complex
elif fn == 'polar':
function = polar_complex
else:
function = getattr(cmath, fn)
if 'divide-by-zero' in flags or 'invalid' in flags:
try:
actual = function(arg)
except ValueError:
continue
else:
self.fail('ValueError not raised in test '
'{}: {}(complex({!r}, {!r}))'.format(id, fn, ar, ai))
if 'overflow' in flags:
try:
actual = function(arg)
except OverflowError:
continue
else:
self.fail('OverflowError not raised in test '
'{}: {}(complex({!r}, {!r}))'.format(id, fn, ar, ai))
actual = function(arg)
if 'ignore-real-sign' in flags:
actual = complex(abs(actual.real), actual.imag)
expected = complex(abs(expected.real), expected.imag)
if 'ignore-imag-sign' in flags:
actual = complex(actual.real, abs(actual.imag))
expected = complex(expected.real, abs(expected.imag))
# for the real part of the log function, we allow an
# absolute error of up to 2e-15.
if fn in ('log', 'log10'):
real_abs_err = 2e-15
else:
real_abs_err = 5e-323
error_message = (
'{}: {}(complex({!r}, {!r}))\n'
'Expected: complex({!r}, {!r})\n'
'Received: complex({!r}, {!r})\n'
'Received value insufficiently close to expected value.'
).format(id, fn, ar, ai,
expected.real, expected.imag,
actual.real, actual.imag)
self.rAssertAlmostEqual(expected.real, actual.real,
abs_err=real_abs_err,
msg=error_message)
self.rAssertAlmostEqual(expected.imag, actual.imag,
msg=error_message)
def check_polar(self, func):
def check(arg, expected):
got = func(arg)
for e, g in zip(expected, got):
self.rAssertAlmostEqual(e, g)
check(0, (0., 0.))
check(1, (1., 0.))
check(-1, (1., pi))
check(1j, (1., pi / 2))
check(-3j, (3., -pi / 2))
inf = float('inf')
check(complex(inf, 0), (inf, 0.))
check(complex(-inf, 0), (inf, pi))
check(complex(3, inf), (inf, pi / 2))
check(complex(5, -inf), (inf, -pi / 2))
check(complex(inf, inf), (inf, pi / 4))
check(complex(inf, -inf), (inf, -pi / 4))
check(complex(-inf, inf), (inf, 3 * pi / 4))
check(complex(-inf, -inf), (inf, -3 * pi / 4))
nan = float('nan')
check(complex(nan, 0), (nan, nan))
check(complex(0, nan), (nan, nan))
check(complex(nan, nan), (nan, nan))
check(complex(inf, nan), (inf, nan))
check(complex(-inf, nan), (inf, nan))
check(complex(nan, inf), (inf, nan))
check(complex(nan, -inf), (inf, nan))
def test_polar(self):
self.check_polar(polar)
@cpython_only
def test_polar_errno(self):
# Issue #24489: check a previously set C errno doesn't disturb polar()
_testcapi = import_helper.import_module('_testcapi')
def polar_with_errno_set(z):
_testcapi.set_errno(11)
try:
return polar(z)
finally:
_testcapi.set_errno(0)
self.check_polar(polar_with_errno_set)
def test_phase(self):
self.assertAlmostEqual(phase(0), 0.)
self.assertAlmostEqual(phase(1.), 0.)
self.assertAlmostEqual(phase(-1.), pi)
self.assertAlmostEqual(phase(-1.+1E-300j), pi)
self.assertAlmostEqual(phase(-1.-1E-300j), -pi)
self.assertAlmostEqual(phase(1j), pi/2)
self.assertAlmostEqual(phase(-1j), -pi/2)
# zeros
self.assertEqual(phase(complex(0.0, 0.0)), 0.0)
self.assertEqual(phase(complex(0.0, -0.0)), -0.0)
self.assertEqual(phase(complex(-0.0, 0.0)), pi)
self.assertEqual(phase(complex(-0.0, -0.0)), -pi)
# infinities
self.assertAlmostEqual(phase(complex(-INF, -0.0)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -2.3)), -pi)
self.assertAlmostEqual(phase(complex(-INF, -INF)), -0.75*pi)
self.assertAlmostEqual(phase(complex(-2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(-0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(0.0, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(2.3, -INF)), -pi/2)
self.assertAlmostEqual(phase(complex(INF, -INF)), -pi/4)
self.assertEqual(phase(complex(INF, -2.3)), -0.0)
self.assertEqual(phase(complex(INF, -0.0)), -0.0)
self.assertEqual(phase(complex(INF, 0.0)), 0.0)
self.assertEqual(phase(complex(INF, 2.3)), 0.0)
self.assertAlmostEqual(phase(complex(INF, INF)), pi/4)
self.assertAlmostEqual(phase(complex(2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-0.0, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-2.3, INF)), pi/2)
self.assertAlmostEqual(phase(complex(-INF, INF)), 0.75*pi)
self.assertAlmostEqual(phase(complex(-INF, 2.3)), pi)
self.assertAlmostEqual(phase(complex(-INF, 0.0)), pi)
# real or imaginary part NaN
for z in complex_nans:
self.assertTrue(math.isnan(phase(z)))
def test_abs(self):
# zeros
for z in complex_zeros:
self.assertEqual(abs(z), 0.0)
# infinities
for z in complex_infinities:
self.assertEqual(abs(z), INF)
# real or imaginary part NaN
self.assertEqual(abs(complex(NAN, -INF)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, -2.3))))
self.assertTrue(math.isnan(abs(complex(NAN, -0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 0.0))))
self.assertTrue(math.isnan(abs(complex(NAN, 2.3))))
self.assertEqual(abs(complex(NAN, INF)), INF)
self.assertEqual(abs(complex(-INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(-2.3, NAN))))
self.assertTrue(math.isnan(abs(complex(-0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(0.0, NAN))))
self.assertTrue(math.isnan(abs(complex(2.3, NAN))))
self.assertEqual(abs(complex(INF, NAN)), INF)
self.assertTrue(math.isnan(abs(complex(NAN, NAN))))
@requires_IEEE_754
def test_abs_overflows(self):
# result overflows
self.assertRaises(OverflowError, abs, complex(1.4e308, 1.4e308))
def assertCEqual(self, a, b):
eps = 1E-7
if abs(a.real - b[0]) > eps or abs(a.imag - b[1]) > eps:
self.fail((a ,b))
def test_rect(self):
self.assertCEqual(rect(0, 0), (0, 0))
self.assertCEqual(rect(1, 0), (1., 0))
self.assertCEqual(rect(1, -pi), (-1., 0))
self.assertCEqual(rect(1, pi/2), (0, 1.))
self.assertCEqual(rect(1, -pi/2), (0, -1.))
def test_isfinite(self):
real_vals = [float('-inf'), -2.3, -0.0,
0.0, 2.3, float('inf'), float('nan')]
for x in real_vals:
for y in real_vals:
z = complex(x, y)
self.assertEqual(cmath.isfinite(z),
math.isfinite(x) and math.isfinite(y))
def test_isnan(self):
self.assertFalse(cmath.isnan(1))
self.assertFalse(cmath.isnan(1j))
self.assertFalse(cmath.isnan(INF))
self.assertTrue(cmath.isnan(NAN))
self.assertTrue(cmath.isnan(complex(NAN, 0)))
self.assertTrue(cmath.isnan(complex(0, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, NAN)))
self.assertTrue(cmath.isnan(complex(NAN, INF)))
self.assertTrue(cmath.isnan(complex(INF, NAN)))
def test_isinf(self):
self.assertFalse(cmath.isinf(1))
self.assertFalse(cmath.isinf(1j))
self.assertFalse(cmath.isinf(NAN))
self.assertTrue(cmath.isinf(INF))
self.assertTrue(cmath.isinf(complex(INF, 0)))
self.assertTrue(cmath.isinf(complex(0, INF)))
self.assertTrue(cmath.isinf(complex(INF, INF)))
self.assertTrue(cmath.isinf(complex(NAN, INF)))
self.assertTrue(cmath.isinf(complex(INF, NAN)))
@requires_IEEE_754
def testTanhSign(self):
for z in complex_zeros:
self.assertComplexesAreIdentical(cmath.tanh(z), z)
# The algorithm used for atan and atanh makes use of the system
# log1p function; If that system function doesn't respect the sign
# of zero, then atan and atanh will also have difficulties with
# the sign of complex zeros.
@requires_IEEE_754
def testAtanSign(self):
for z in complex_zeros:
self.assertComplexesAreIdentical(cmath.atan(z), z)
@requires_IEEE_754
def testAtanhSign(self):
for z in complex_zeros:
self.assertComplexesAreIdentical(cmath.atanh(z), z)
| CMathTests |
python | huggingface__transformers | tests/models/flaubert/test_tokenization_flaubert.py | {
"start": 942,
"end": 3375
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "flaubert/flaubert_base_cased"
tokenizer_class = FlaubertTokenizer
test_rust_tokenizer = False
# Copied from transformers.tests.models.xlm.test_tokenization_xlm.XLMTokenizationTest.test_full_tokenizer
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "i</w>", "lo", "low", "ne", "new", "er</w>", "low</w>", "lowest</w>", "new</w>", "newer</w>", "wider</w>", "<unk>"] # fmt: skip
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["n e 300", "ne w 301", "e r</w> 302", ""]
with tempfile.TemporaryDirectory() as tmpdir:
vocab_file = os.path.join(tmpdir, VOCAB_FILES_NAMES["vocab_file"])
merges_file = os.path.join(tmpdir, VOCAB_FILES_NAMES["merges_file"])
with open(vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
tokenizer = FlaubertTokenizer(vocab_file, merges_file)
text = "lower newer"
bpe_tokens = ["l", "o", "w", "er</w>", "new", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [0, 1, 2, 18, 17, 18, 24]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
# Copied from transformers.tests.models.xlm.test_tokenization_xlm.XLMTokenizationTest.test_sequence_builders
def test_sequence_builders(self):
tokenizer = FlaubertTokenizer.from_pretrained("flaubert/flaubert_base_cased")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
print(encoded_sentence)
print(encoded_sentence)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_2 + [1]
| FlaubertTokenizationTest |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_custom_business_month.py | {
"start": 573,
"end": 1442
} | class ____:
@pytest.mark.parametrize("offset2", [CBMonthBegin(2), CBMonthEnd(2)])
def test_eq(self, offset2):
assert offset2 == offset2
@pytest.mark.parametrize("offset2", [CBMonthBegin(2), CBMonthEnd(2)])
def test_hash(self, offset2):
assert hash(offset2) == hash(offset2)
@pytest.mark.parametrize("_offset", [CBMonthBegin, CBMonthEnd])
def test_roundtrip_pickle(self, _offset):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(_offset())
_check_roundtrip(_offset(2))
_check_roundtrip(_offset() * 2)
@pytest.mark.parametrize("_offset", [CBMonthBegin, CBMonthEnd])
def test_copy(self, _offset):
# GH 17452
off = _offset(weekmask="Mon Wed Fri")
assert off == off.copy()
| TestCommonCBM |
python | scikit-learn__scikit-learn | sklearn/pipeline.py | {
"start": 3009,
"end": 59192
} | class ____(_BaseComposition):
"""
A sequence of data transformers with an optional final predictor.
`Pipeline` allows you to sequentially apply a list of transformers to
preprocess the data and, if desired, conclude the sequence with a final
:term:`predictor` for predictive modeling.
Intermediate steps of the pipeline must be transformers, that is, they
must implement `fit` and `transform` methods.
The final :term:`estimator` only needs to implement `fit`.
The transformers in the pipeline can be cached using ``memory`` argument.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters. For this, it
enables setting parameters of the various steps using their names and the
parameter name separated by a `'__'`, as in the example below. A step's
estimator may be replaced entirely by setting the parameter with its name
to another estimator, or a transformer removed by setting it to
`'passthrough'` or `None`.
For an example use case of `Pipeline` combined with
:class:`~sklearn.model_selection.GridSearchCV`, refer to
:ref:`sphx_glr_auto_examples_compose_plot_compare_reduction.py`. The
example :ref:`sphx_glr_auto_examples_compose_plot_digits_pipe.py` shows how
to grid search on a pipeline using `'__'` as a separator in the parameter names.
Read more in the :ref:`User Guide <pipeline>`.
.. versionadded:: 0.5
Parameters
----------
steps : list of tuples
List of (name of step, estimator) tuples that are to be chained in
sequential order. To be compatible with the scikit-learn API, all steps
must define `fit`. All non-last steps must also define `transform`. See
:ref:`Combining Estimators <combining_estimators>` for more details.
transform_input : list of str, default=None
The names of the :term:`metadata` parameters that should be transformed by the
pipeline before passing it to the step consuming it.
This enables transforming some input arguments to ``fit`` (other than ``X``)
to be transformed by the steps of the pipeline up to the step which requires
them. Requirement is defined via :ref:`metadata routing <metadata_routing>`.
For instance, this can be used to pass a validation set through the pipeline.
You can only set this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.6
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. The last step
will never be cached, even if it is a transformer. By default, no
caching is performed. If a string is given, it is the path to the
caching directory. Enabling caching triggers a clone of the transformers
before fitting. Therefore, the transformer instance given to the
pipeline cannot be inspected directly. Use the attribute ``named_steps``
or ``steps`` to inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming. See
:ref:`sphx_glr_auto_examples_neighbors_plot_caching_nearest_neighbors.py`
for an example on how to enable caching.
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Attributes
----------
named_steps : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
classes_ : ndarray of shape (n_classes,)
The classes labels. Only exist if the last step of the pipeline is a
classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying first estimator in `steps` exposes such an attribute
when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
make_pipeline : Convenience function for simplified pipeline construction.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.pipeline import Pipeline
>>> X, y = make_classification(random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())])
>>> # The pipeline can be used as any other estimator
>>> # and avoids leaking the test set into the train set
>>> pipe.fit(X_train, y_train).score(X_test, y_test)
0.88
>>> # An estimator's parameter can be set using '__' syntax
>>> pipe.set_params(svc__C=10).fit(X_train, y_train).score(X_test, y_test)
0.76
"""
# BaseEstimator interface
_parameter_constraints: dict = {
"steps": [list, Hidden(tuple)],
"transform_input": [list, None],
"memory": [None, str, HasMethods(["cache"])],
"verbose": ["boolean"],
}
def __init__(self, steps, *, transform_input=None, memory=None, verbose=False):
self.steps = steps
self.transform_input = transform_input
self.memory = memory
self.verbose = verbose
def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `steps`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
"""
for _, _, step in self._iter():
_safe_set_output(step, transform=transform)
return self
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `steps` of the `Pipeline`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return self._get_params("steps", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that
you can directly set the parameters of the estimators contained in
`steps`.
Parameters
----------
**kwargs : dict
Parameters of this estimator or parameters of estimators contained
in `steps`. Parameters of the steps may be set using its name and
the parameter name separated by a '__'.
Returns
-------
self : object
Pipeline class instance.
"""
self._set_params("steps", **kwargs)
return self
def _validate_steps(self):
if not self.steps:
raise ValueError("The pipeline is empty. Please add steps.")
names, estimators = zip(*self.steps)
# validate names
self._validate_names(names)
# validate estimators
transformers = estimators[:-1]
estimator = estimators[-1]
for t in transformers:
if t is None or t == "passthrough":
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All intermediate steps should be "
"transformers and implement fit and transform "
"or be the string 'passthrough' "
"'%s' (type %s) doesn't" % (t, type(t))
)
# We allow last estimator to be None as an identity transformation
if (
estimator is not None
and estimator != "passthrough"
and not hasattr(estimator, "fit")
):
raise TypeError(
"Last step of Pipeline should implement fit "
"or be the string 'passthrough'. "
"'%s' (type %s) doesn't" % (estimator, type(estimator))
)
def _iter(self, with_final=True, filter_passthrough=True):
"""
Generate (idx, (name, trans)) tuples from self.steps
When filter_passthrough is True, 'passthrough' and None transformers
are filtered out.
"""
stop = len(self.steps)
if not with_final:
stop -= 1
for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)):
if not filter_passthrough:
yield idx, name, trans
elif trans is not None and trans != "passthrough":
yield idx, name, trans
def __len__(self):
"""
Returns the length of the Pipeline
"""
return len(self.steps)
def __getitem__(self, ind):
"""Returns a sub-pipeline or a single estimator in the pipeline
Indexing with an integer will return an estimator; using a slice
returns another Pipeline instance which copies a slice of this
Pipeline. This copy is shallow: modifying (or fitting) estimators in
the sub-pipeline will affect the larger pipeline and vice-versa.
However, replacing a value in `step` will not affect a copy.
See
:ref:`sphx_glr_auto_examples_feature_selection_plot_feature_selection_pipeline.py`
for an example of how to use slicing to inspect part of a pipeline.
"""
if isinstance(ind, slice):
if ind.step not in (1, None):
raise ValueError("Pipeline slicing only supports a step of 1")
return self.__class__(
self.steps[ind], memory=self.memory, verbose=self.verbose
)
try:
name, est = self.steps[ind]
except TypeError:
# Not an int, try get step by name
return self.named_steps[ind]
return est
@property
def named_steps(self):
"""Access the steps by name.
Read-only attribute to access any step by given name.
Keys are steps names and values are the steps objects."""
# Use Bunch object to improve autocomplete
return Bunch(**dict(self.steps))
@property
def _final_estimator(self):
try:
estimator = self.steps[-1][1]
return "passthrough" if estimator is None else estimator
except (ValueError, AttributeError, TypeError):
# This condition happens when a call to a method is first calling
# `_available_if` and `fit` did not validate `steps` yet. We
# return `None` and an `InvalidParameterError` will be raised
# right after.
return None
def _log_message(self, step_idx):
if not self.verbose:
return None
name, _ = self.steps[step_idx]
return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name)
def _check_method_params(self, method, props, **kwargs):
if _routing_enabled():
routed_params = process_routing(self, method, **props, **kwargs)
return routed_params
else:
fit_params_steps = Bunch(
**{
name: Bunch(**{method: {} for method in METHODS})
for name, step in self.steps
if step is not None
}
)
for pname, pval in props.items():
if "__" not in pname:
raise ValueError(
"Pipeline.fit does not accept the {} parameter. "
"You can pass parameters to specific steps of your "
"pipeline using the stepname__parameter format, e.g. "
"`Pipeline.fit(X, y, logisticregression__sample_weight"
"=sample_weight)`.".format(pname)
)
step, param = pname.split("__", 1)
fit_params_steps[step]["fit"][param] = pval
# without metadata routing, fit_transform and fit_predict
# get all the same params and pass it to the last fit.
fit_params_steps[step]["fit_transform"][param] = pval
fit_params_steps[step]["fit_predict"][param] = pval
return fit_params_steps
def _get_metadata_for_step(self, *, step_idx, step_params, all_params):
"""Get params (metadata) for step `name`.
This transforms the metadata up to this step if required, which is
indicated by the `transform_input` parameter.
If a param in `step_params` is included in the `transform_input` list,
it will be transformed.
Parameters
----------
step_idx : int
Index of the step in the pipeline.
step_params : dict
Parameters specific to the step. These are routed parameters, e.g.
`routed_params[name]`. If a parameter name here is included in the
`pipeline.transform_input`, then it will be transformed. Note that
these parameters are *after* routing, so the aliases are already
resolved.
all_params : dict
All parameters passed by the user. Here this is used to call
`transform` on the slice of the pipeline itself.
Returns
-------
dict
Parameters to be passed to the step. The ones which should be
transformed are transformed.
"""
if (
self.transform_input is None
or not all_params
or not step_params
or step_idx == 0
):
# we only need to process step_params if transform_input is set
# and metadata is given by the user.
return step_params
sub_pipeline = self[:step_idx]
sub_metadata_routing = get_routing_for_object(sub_pipeline)
# here we get the metadata required by sub_pipeline.transform
transform_params = {
key: value
for key, value in all_params.items()
if key
in sub_metadata_routing.consumes(
method="transform", params=all_params.keys()
)
}
transformed_params = dict() # this is to be returned
transformed_cache = dict() # used to transform each param once
# `step_params` is the output of `process_routing`, so it has a dict for each
# method (e.g. fit, transform, predict), which are the args to be passed to
# those methods. We need to transform the parameters which are in the
# `transform_input`, before returning these dicts.
for method, method_params in step_params.items():
transformed_params[method] = Bunch()
for param_name, param_value in method_params.items():
# An example of `(param_name, param_value)` is
# `('sample_weight', array([0.5, 0.5, ...]))`
if param_name in self.transform_input:
# This parameter now needs to be transformed by the sub_pipeline, to
# this step. We cache these computations to avoid repeating them.
transformed_params[method][param_name] = _cached_transform(
sub_pipeline,
cache=transformed_cache,
param_name=param_name,
param_value=param_value,
transform_params=transform_params,
)
else:
transformed_params[method][param_name] = param_value
return transformed_params
# Estimator interface
def _fit(self, X, y=None, routed_params=None, raw_params=None):
"""Fit the pipeline except the last step.
routed_params is the output of `process_routing`
raw_params is the parameters passed by the user, used when `transform_input`
is set by the user, to transform metadata using a sub-pipeline.
"""
# shallow copy of steps - this should really be steps_
self.steps = list(self.steps)
self._validate_steps()
# Setup the memory
memory = check_memory(self.memory)
fit_transform_one_cached = memory.cache(_fit_transform_one)
for step_idx, name, transformer in self._iter(
with_final=False, filter_passthrough=False
):
if transformer is None or transformer == "passthrough":
with _print_elapsed_time("Pipeline", self._log_message(step_idx)):
continue
if hasattr(memory, "location") and memory.location is None:
# we do not clone when caching is disabled to
# preserve backward compatibility
cloned_transformer = transformer
else:
cloned_transformer = clone(transformer)
# Fit or load from cache the current transformer
step_params = self._get_metadata_for_step(
step_idx=step_idx,
step_params=routed_params[name],
all_params=raw_params,
)
X, fitted_transformer = fit_transform_one_cached(
cloned_transformer,
X,
y,
weight=None,
message_clsname="Pipeline",
message=self._log_message(step_idx),
params=step_params,
)
# Replace the transformer of the step with the fitted
# transformer. This is necessary when loading the transformer
# from the cache.
self.steps[step_idx] = (name, fitted_transformer)
return X
@_fit_context(
# estimators in Pipeline.steps are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, **params):
"""Fit the model.
Fit all the transformers one after the other and sequentially transform the
data. Finally, fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters passed to the
``fit`` method of each step, where each parameter name is prefixed such
that parameter ``p`` for step ``s`` has key ``s__p``.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
self : object
Pipeline with fitted steps.
"""
if not _routing_enabled() and self.transform_input is not None:
raise ValueError(
"The `transform_input` parameter can only be set if metadata "
"routing is enabled. You can enable metadata routing using "
"`sklearn.set_config(enable_metadata_routing=True)`."
)
routed_params = self._check_method_params(method="fit", props=params)
Xt = self._fit(X, y, routed_params, raw_params=params)
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if self._final_estimator != "passthrough":
last_step_params = self._get_metadata_for_step(
step_idx=len(self) - 1,
step_params=routed_params[self.steps[-1][0]],
all_params=params,
)
self._final_estimator.fit(Xt, y, **last_step_params["fit"])
return self
def _can_fit_transform(self):
return (
self._final_estimator == "passthrough"
or hasattr(self._final_estimator, "transform")
or hasattr(self._final_estimator, "fit_transform")
)
@available_if(_can_fit_transform)
@_fit_context(
# estimators in Pipeline.steps are not validated yet
prefer_skip_nested_validation=False
)
def fit_transform(self, X, y=None, **params):
"""Fit the model and transform with the final estimator.
Fit all the transformers one after the other and sequentially transform
the data. Only valid if the final estimator either implements
`fit_transform` or `fit` and `transform`.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters passed to the
``fit`` method of each step, where each parameter name is prefixed such
that parameter ``p`` for step ``s`` has key ``s__p``.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed samples.
"""
routed_params = self._check_method_params(method="fit_transform", props=params)
Xt = self._fit(X, y, routed_params)
last_step = self._final_estimator
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
if last_step == "passthrough":
return Xt
last_step_params = self._get_metadata_for_step(
step_idx=len(self) - 1,
step_params=routed_params[self.steps[-1][0]],
all_params=params,
)
if hasattr(last_step, "fit_transform"):
return last_step.fit_transform(
Xt, y, **last_step_params["fit_transform"]
)
else:
return last_step.fit(Xt, y, **last_step_params["fit"]).transform(
Xt, **last_step_params["transform"]
)
@available_if(_final_estimator_has("predict"))
def predict(self, X, **params):
"""Transform the data, and apply `predict` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls `predict`
method. Only valid if the final estimator implements `predict`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters to the
``predict`` called at the end of all transformations in the pipeline.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True` is set via
:func:`~sklearn.set_config`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Note that while this may be used to return uncertainties from some
models with ``return_std`` or ``return_cov``, uncertainties that are
generated by the transformations in the pipeline are not propagated
to the final estimator.
Returns
-------
y_pred : ndarray
Result of calling `predict` on the final estimator.
"""
check_is_fitted(self)
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict)
@available_if(_final_estimator_has("fit_predict"))
@_fit_context(
# estimators in Pipeline.steps are not validated yet
prefer_skip_nested_validation=False
)
def fit_predict(self, X, y=None, **params):
"""Transform the data, and apply `fit_predict` with the final estimator.
Call `fit_transform` of each transformer in the pipeline. The
transformed data are finally passed to the final estimator that calls
`fit_predict` method. Only valid if the final estimator implements
`fit_predict`.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters to the
``predict`` called at the end of all transformations in the pipeline.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Note that while this may be used to return uncertainties from some
models with ``return_std`` or ``return_cov``, uncertainties that are
generated by the transformations in the pipeline are not propagated
to the final estimator.
Returns
-------
y_pred : ndarray
Result of calling `fit_predict` on the final estimator.
"""
routed_params = self._check_method_params(method="fit_predict", props=params)
Xt = self._fit(X, y, routed_params)
params_last_step = routed_params[self.steps[-1][0]]
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(
Xt, y, **params_last_step.get("fit_predict", {})
)
return y_pred
@available_if(_final_estimator_has("predict_proba"))
def predict_proba(self, X, **params):
"""Transform the data, and apply `predict_proba` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`predict_proba` method. Only valid if the final estimator implements
`predict_proba`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters to the
`predict_proba` called at the end of all transformations in the pipeline.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y_proba : ndarray of shape (n_samples, n_classes)
Result of calling `predict_proba` on the final estimator.
"""
check_is_fitted(self)
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict_proba(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict_proba", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict_proba(
Xt, **routed_params[self.steps[-1][0]].predict_proba
)
@available_if(_final_estimator_has("decision_function"))
def decision_function(self, X, **params):
"""Transform the data, and apply `decision_function` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`decision_function` method. Only valid if the final estimator
implements `decision_function`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of string -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y_score : ndarray of shape (n_samples, n_classes)
Result of calling `decision_function` on the final estimator.
"""
check_is_fitted(self)
_raise_for_params(params, self, "decision_function")
# not branching here since params is only available if
# enable_metadata_routing=True
routed_params = process_routing(self, "decision_function", **params)
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(
Xt, **routed_params.get(name, {}).get("transform", {})
)
return self.steps[-1][1].decision_function(
Xt,
**routed_params.get(self.steps[-1][0], {}).get("decision_function", {}),
)
@available_if(_final_estimator_has("score_samples"))
def score_samples(self, X):
"""Transform the data, and apply `score_samples` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`score_samples` method. Only valid if the final estimator implements
`score_samples`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
Returns
-------
y_score : ndarray of shape (n_samples,)
Result of calling `score_samples` on the final estimator.
"""
check_is_fitted(self)
Xt = X
for _, _, transformer in self._iter(with_final=False):
Xt = transformer.transform(Xt)
return self.steps[-1][1].score_samples(Xt)
@available_if(_final_estimator_has("predict_log_proba"))
def predict_log_proba(self, X, **params):
"""Transform the data, and apply `predict_log_proba` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`predict_log_proba` method. Only valid if the final estimator
implements `predict_log_proba`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters to the
`predict_log_proba` called at the end of all transformations in the
pipeline.
- If `enable_metadata_routing=True`: Parameters requested and accepted by
steps. Each step must have requested certain metadata for these parameters
to be forwarded to them.
.. versionadded:: 0.20
.. versionchanged:: 1.4
Parameters are now passed to the ``transform`` method of the
intermediate steps as well, if requested, and if
`enable_metadata_routing=True`.
See :ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
y_log_proba : ndarray of shape (n_samples, n_classes)
Result of calling `predict_log_proba` on the final estimator.
"""
check_is_fitted(self)
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
return self.steps[-1][1].predict_log_proba(Xt, **params)
# metadata routing enabled
routed_params = process_routing(self, "predict_log_proba", **params)
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].predict_log_proba(
Xt, **routed_params[self.steps[-1][0]].predict_log_proba
)
def _can_transform(self):
return self._final_estimator == "passthrough" or hasattr(
self._final_estimator, "transform"
)
@available_if(_can_transform)
def transform(self, X, **params):
"""Transform the data, and apply `transform` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`transform` method. Only valid if the final estimator
implements `transform`.
This also works where final estimator is `None` in which case all prior
transformations are applied.
Parameters
----------
X : iterable
Data to transform. Must fulfill input requirements of first step
of the pipeline.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
Xt : ndarray of shape (n_samples, n_transformed_features)
Transformed data.
"""
check_is_fitted(self)
_raise_for_params(params, self, "transform")
# not branching here since params is only available if
# enable_metadata_routing=True
routed_params = process_routing(self, "transform", **params)
Xt = X
for _, name, transform in self._iter():
Xt = transform.transform(Xt, **routed_params[name].transform)
return Xt
def _can_inverse_transform(self):
return all(hasattr(t, "inverse_transform") for _, _, t in self._iter())
@available_if(_can_inverse_transform)
def inverse_transform(self, X, **params):
"""Apply `inverse_transform` for each step in a reverse order.
All estimators in the pipeline must support `inverse_transform`.
Parameters
----------
X : array-like of shape (n_samples, n_transformed_features)
Data samples, where ``n_samples`` is the number of samples and
``n_features`` is the number of features. Must fulfill
input requirements of last step of pipeline's
``inverse_transform`` method.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Inverse transformed data, that is, data in the original feature
space.
"""
check_is_fitted(self)
_raise_for_params(params, self, "inverse_transform")
# we don't have to branch here, since params is only non-empty if
# enable_metadata_routing=True.
routed_params = process_routing(self, "inverse_transform", **params)
reverse_iter = reversed(list(self._iter()))
for _, name, transform in reverse_iter:
X = transform.inverse_transform(X, **routed_params[name].inverse_transform)
return X
@available_if(_final_estimator_has("score"))
def score(self, X, y=None, sample_weight=None, **params):
"""Transform the data, and apply `score` with the final estimator.
Call `transform` of each transformer in the pipeline. The transformed
data are finally passed to the final estimator that calls
`score` method. Only valid if the final estimator implements `score`.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
sample_weight : array-like, default=None
If not None, this argument is passed as ``sample_weight`` keyword
argument to the ``score`` method of the final estimator.
**params : dict of str -> object
Parameters requested and accepted by steps. Each step must have
requested certain metadata for these parameters to be forwarded to
them.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : float
Result of calling `score` on the final estimator.
"""
check_is_fitted(self)
Xt = X
if not _routing_enabled():
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt)
score_params = {}
if sample_weight is not None:
score_params["sample_weight"] = sample_weight
return self.steps[-1][1].score(Xt, y, **score_params)
# metadata routing is enabled.
routed_params = process_routing(
self, "score", sample_weight=sample_weight, **params
)
Xt = X
for _, name, transform in self._iter(with_final=False):
Xt = transform.transform(Xt, **routed_params[name].transform)
return self.steps[-1][1].score(Xt, y, **routed_params[self.steps[-1][0]].score)
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
return self.steps[-1][1].classes_
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
if not self.steps:
return tags
try:
if self.steps[0][1] is not None and self.steps[0][1] != "passthrough":
tags.input_tags.pairwise = get_tags(
self.steps[0][1]
).input_tags.pairwise
# WARNING: the sparse tag can be incorrect.
# Some Pipelines accepting sparse data are wrongly tagged sparse=False.
# For example Pipeline([PCA(), estimator]) accepts sparse data
# even if the estimator doesn't as PCA outputs a dense array.
tags.input_tags.sparse = all(
get_tags(step).input_tags.sparse
for name, step in self.steps
if step is not None and step != "passthrough"
)
except (ValueError, AttributeError, TypeError):
# This happens when the `steps` is not a list of (name, estimator)
# tuples and `fit` is not called yet to validate the steps.
pass
try:
if self.steps[-1][1] is not None and self.steps[-1][1] != "passthrough":
last_step_tags = get_tags(self.steps[-1][1])
tags.estimator_type = last_step_tags.estimator_type
tags.target_tags.multi_output = last_step_tags.target_tags.multi_output
tags.classifier_tags = deepcopy(last_step_tags.classifier_tags)
tags.regressor_tags = deepcopy(last_step_tags.regressor_tags)
tags.transformer_tags = deepcopy(last_step_tags.transformer_tags)
except (ValueError, AttributeError, TypeError):
# This happens when the `steps` is not a list of (name, estimator)
# tuples and `fit` is not called yet to validate the steps.
pass
return tags
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Transform input features using the pipeline.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
feature_names_out = input_features
for _, name, transform in self._iter():
if not hasattr(transform, "get_feature_names_out"):
raise AttributeError(
"Estimator {} does not provide get_feature_names_out. "
"Did you mean to call pipeline[:-1].get_feature_names_out"
"()?".format(name)
)
feature_names_out = transform.get_feature_names_out(feature_names_out)
return feature_names_out
@property
def n_features_in_(self):
"""Number of features seen during first step `fit` method."""
# delegate to first step (which will call check_is_fitted)
return self.steps[0][1].n_features_in_
@property
def feature_names_in_(self):
"""Names of features seen during first step `fit` method."""
# delegate to first step (which will call check_is_fitted)
return self.steps[0][1].feature_names_in_
def __sklearn_is_fitted__(self):
"""Indicate whether pipeline has been fit.
This is done by checking whether the last non-`passthrough` step of the
pipeline is fitted.
An empty pipeline is considered fitted.
"""
# First find the last step that is not 'passthrough'
last_step = None
for _, estimator in reversed(self.steps):
if estimator != "passthrough":
last_step = estimator
break
if last_step is None:
# All steps are 'passthrough', so the pipeline is considered fitted
return True
try:
# check if the last step of the pipeline is fitted
# we only check the last step since if the last step is fit, it
# means the previous steps should also be fit. This is faster than
# checking if every step of the pipeline is fit.
check_is_fitted(last_step)
return True
except NotFittedError:
return False
def _sk_visual_block_(self):
def _get_name(name, est):
if est is None or est == "passthrough":
return f"{name}: passthrough"
# Is an estimator
return f"{name}: {est.__class__.__name__}"
names, estimators = zip(
*[(_get_name(name, est), est) for name, est in self.steps]
)
name_details = [str(est) for est in estimators]
return _VisualBlock(
"serial",
estimators,
names=names,
name_details=name_details,
dash_wrapped=False,
)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
# first we add all steps except the last one
for _, name, trans in self._iter(with_final=False, filter_passthrough=True):
method_mapping = MethodMapping()
# fit, fit_predict, and fit_transform call fit_transform if it
# exists, or else fit and transform
if hasattr(trans, "fit_transform"):
(
method_mapping.add(caller="fit", callee="fit_transform")
.add(caller="fit_transform", callee="fit_transform")
.add(caller="fit_predict", callee="fit_transform")
)
else:
(
method_mapping.add(caller="fit", callee="fit")
.add(caller="fit", callee="transform")
.add(caller="fit_transform", callee="fit")
.add(caller="fit_transform", callee="transform")
.add(caller="fit_predict", callee="fit")
.add(caller="fit_predict", callee="transform")
)
(
method_mapping.add(caller="predict", callee="transform")
.add(caller="predict", callee="transform")
.add(caller="predict_proba", callee="transform")
.add(caller="decision_function", callee="transform")
.add(caller="predict_log_proba", callee="transform")
.add(caller="transform", callee="transform")
.add(caller="inverse_transform", callee="inverse_transform")
.add(caller="score", callee="transform")
)
router.add(method_mapping=method_mapping, **{name: trans})
final_name, final_est = self.steps[-1]
if final_est is None or final_est == "passthrough":
return router
# then we add the last step
method_mapping = MethodMapping()
if hasattr(final_est, "fit_transform"):
method_mapping.add(caller="fit_transform", callee="fit_transform")
else:
method_mapping.add(caller="fit", callee="fit").add(
caller="fit", callee="transform"
)
(
method_mapping.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict")
.add(caller="fit_predict", callee="fit_predict")
.add(caller="predict_proba", callee="predict_proba")
.add(caller="decision_function", callee="decision_function")
.add(caller="predict_log_proba", callee="predict_log_proba")
.add(caller="transform", callee="transform")
.add(caller="inverse_transform", callee="inverse_transform")
.add(caller="score", callee="score")
)
router.add(method_mapping=method_mapping, **{final_name: final_est})
return router
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [
estimator if isinstance(estimator, str) else type(estimator).__name__.lower()
for estimator in estimators
]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(namecount.items()):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps, memory=None, transform_input=None, verbose=False):
"""Construct a :class:`Pipeline` from the given estimators.
This is a shorthand for the :class:`Pipeline` constructor; it does not
require, and does not permit, naming the estimators. Instead, their names
will be set to the lowercase of their types automatically.
Parameters
----------
*steps : list of Estimator objects
List of the scikit-learn estimators that are chained together.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. The last step
will never be cached, even if it is a transformer. By default, no
caching is performed. If a string is given, it is the path to the
caching directory. Enabling caching triggers a clone of the transformers
before fitting. Therefore, the transformer instance given to the
pipeline cannot be inspected directly. Use the attribute ``named_steps``
or ``steps`` to inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
transform_input : list of str, default=None
This enables transforming some input arguments to ``fit`` (other than ``X``)
to be transformed by the steps of the pipeline up to the step which requires
them. Requirement is defined via :ref:`metadata routing <metadata_routing>`.
This can be used to pass a validation set through the pipeline for instance.
You can only set this if metadata routing is enabled, which you
can enable using ``sklearn.set_config(enable_metadata_routing=True)``.
.. versionadded:: 1.6
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
Returns
-------
p : Pipeline
Returns a scikit-learn :class:`Pipeline` object.
See Also
--------
Pipeline : Class for creating a pipeline of transforms with a final
estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None))
Pipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
"""
return Pipeline(
_name_estimators(steps),
transform_input=transform_input,
memory=memory,
verbose=verbose,
)
def _transform_one(transformer, X, y, weight, params):
"""Call transform and apply weight to output.
Parameters
----------
transformer : estimator
Estimator to be used for transformation.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data to be transformed.
y : ndarray of shape (n_samples,)
Ignored.
weight : float
Weight to be applied to the output of the transformation.
params : dict
Parameters to be passed to the transformer's ``transform`` method.
This should be of the form ``process_routing()["step_name"]``.
"""
res = transformer.transform(X, **params.transform)
# if we have a weight for this transformer, multiply output
if weight is None:
return res
return res * weight
def _fit_transform_one(
transformer, X, y, weight, message_clsname="", message=None, params=None
):
"""
Fits ``transformer`` to ``X`` and ``y``. The transformed result is returned
with the fitted transformer. If ``weight`` is not ``None``, the result will
be multiplied by ``weight``.
``params`` needs to be of the form ``process_routing()["step_name"]``.
"""
params = params or {}
with _print_elapsed_time(message_clsname, message):
if hasattr(transformer, "fit_transform"):
res = transformer.fit_transform(X, y, **params.get("fit_transform", {}))
else:
res = transformer.fit(X, y, **params.get("fit", {})).transform(
X, **params.get("transform", {})
)
if weight is None:
return res, transformer
return res * weight, transformer
def _fit_one(transformer, X, y, weight, message_clsname="", message=None, params=None):
"""
Fits ``transformer`` to ``X`` and ``y``.
"""
with _print_elapsed_time(message_clsname, message):
return transformer.fit(X, y, **params["fit"])
| Pipeline |
python | pallets__flask | tests/test_async.py | {
"start": 289,
"end": 444
} | class ____(View):
methods = ["GET", "POST"]
async def dispatch_request(self):
await asyncio.sleep(0)
return request.method
| AsyncView |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli_tests/cli_tests/test_environment_validation.py | {
"start": 1077,
"end": 8291
} | class ____:
def __init__(self, command: tuple[str, ...], *args: str):
self.command = command
self.args = args
def to_cli_args(self) -> tuple[str, ...]:
return (*self.command, *self.args)
DEFAULT_COMPONENT_TYPE = "dagster_test.components.SimpleAssetComponent"
NO_REQUIRED_CONTEXT_COMMANDS = [
CommandSpec(("scaffold", "defs", "dagster.asset"), "foo"),
CommandSpec(("scaffold", "defs", "dagster.asset_check"), "foo"),
CommandSpec(("scaffold", "defs", "dagster.schedule"), "foo"),
CommandSpec(("scaffold", "defs", "dagster.sensor"), "foo"),
CommandSpec(("plus", "login")),
]
COMPONENT_LIBRARY_CONTEXT_COMMANDS = [
CommandSpec(("scaffold", "component"), "foo"),
]
REGISTRY_CONTEXT_COMMANDS = [
CommandSpec(("docs", "serve")),
CommandSpec(("list", "component")),
CommandSpec(("list", "registry-modules")),
CommandSpec(("utils", "inspect-component"), DEFAULT_COMPONENT_TYPE),
]
PROJECT_CONTEXT_COMMANDS = [
CommandSpec(("launch",), "--assets", "foo"),
CommandSpec(("utils", "configure-editor"), "vscode"),
CommandSpec(("utils", "generate-component-schema")),
CommandSpec(("check", "yaml")),
CommandSpec(("list", "defs")),
CommandSpec(("list", "env")),
CommandSpec(("scaffold", "defs", DEFAULT_COMPONENT_TYPE, "foot")),
]
WORKSPACE_CONTEXT_COMMANDS = [
CommandSpec(("list", "project")),
]
WORKSPACE_OR_PROJECT_CONTEXT_COMMANDS = [
CommandSpec(("dev",)),
CommandSpec(("check", "defs")),
]
# ########################
# ##### TESTS
# ########################
@pytest.mark.skip("temp")
def test_all_commands_represented_in_env_check_tests() -> None:
commands = crawl_cli_commands()
all_listed_commands = [
spec.command
for spec in [
*NO_REQUIRED_CONTEXT_COMMANDS,
*COMPONENT_LIBRARY_CONTEXT_COMMANDS,
*PROJECT_CONTEXT_COMMANDS,
*WORKSPACE_CONTEXT_COMMANDS,
*WORKSPACE_OR_PROJECT_CONTEXT_COMMANDS,
*REGISTRY_CONTEXT_COMMANDS,
]
]
crawled_commands = [tuple(key[1:]) for key in commands.keys() if len(key) > 1]
unlisted_commands = set(crawled_commands) - set(all_listed_commands)
assert not unlisted_commands, f"Unlisted commands have no env tests: {unlisted_commands}"
@pytest.mark.parametrize(
"spec",
[
*PROJECT_CONTEXT_COMMANDS,
],
ids=lambda spec: "-".join(spec.command),
)
def test_no_local_venv_failure(spec: CommandSpec) -> None:
if spec.command == ("docs", "serve"):
pytest.skip("docs serve command hangs on this test")
with ProxyRunner.test() as runner, runner.isolated_filesystem():
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
@pytest.mark.parametrize(
"spec",
[
*PROJECT_CONTEXT_COMMANDS,
],
ids=lambda spec: "-".join(spec.command),
)
def test_no_local_dagster_components_failure(spec: CommandSpec) -> None:
with (
ProxyRunner.test(use_fixed_test_components=True) as runner,
isolated_components_venv(runner),
):
_uninstall_dagster_from_local_venv(Path.cwd())
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
@pytest.mark.parametrize("spec", PROJECT_CONTEXT_COMMANDS, ids=lambda spec: "-".join(spec.command))
def test_no_project_failure(spec: CommandSpec) -> None:
with (
ProxyRunner.test(use_fixed_test_components=True) as runner,
isolated_components_venv(runner),
):
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster project directory" in result.output
runner.invoke_create_dagster("project", "foo")
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster project directory" in result.output
assert "You may have wanted to" in result.output
assert "/foo" in result.output
@pytest.mark.parametrize(
"spec", COMPONENT_LIBRARY_CONTEXT_COMMANDS, ids=lambda spec: "-".join(spec.command)
)
def test_no_component_library_failure(spec: CommandSpec) -> None:
with (
ProxyRunner.test(use_fixed_test_components=True) as runner,
isolated_components_venv(runner),
):
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster component library directory" in result.output
@pytest.mark.parametrize(
"spec", WORKSPACE_CONTEXT_COMMANDS, ids=lambda spec: "-".join(spec.command)
)
def test_no_workspace_failure(spec: CommandSpec) -> None:
with (
ProxyRunner.test(use_fixed_test_components=True) as runner,
isolated_components_venv(runner),
):
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster workspace directory" in result.output
assert "You may have wanted to" not in result.output
runner.invoke_create_dagster("workspace", "foo")
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster workspace directory" in result.output
assert "You may have wanted to" in result.output
assert "/foo" in result.output
@pytest.mark.parametrize(
"spec", WORKSPACE_OR_PROJECT_CONTEXT_COMMANDS, ids=lambda spec: "-".join(spec.command)
)
def test_no_workspace_or_project_failure(spec: CommandSpec) -> None:
with (
ProxyRunner.test(use_fixed_test_components=True) as runner,
isolated_components_venv(runner),
):
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster workspace or project directory" in result.output
assert "You may have wanted to" not in result.output
runner.invoke_create_dagster("project", "foo")
result = runner.invoke(*spec.to_cli_args())
assert_runner_result(result, exit_0=False)
assert "must be run inside a Dagster workspace or project directory" in result.output
assert "You may have wanted to" in result.output
assert "/foo" in result.output
# ########################
# ##### HELPERS
# ########################
# `dg scaffold` is special because global options have to be inserted before the
# subcommand name, instead of just at the end.
def _add_global_cli_options(cli_args: tuple[str, ...], *global_opts: str) -> list[str]:
if cli_args[0] == "scaffold":
return [cli_args[0], *global_opts, *cli_args[1:]]
else:
return [*cli_args, *global_opts]
def _uninstall_dagster_from_local_venv(path: Path) -> None:
local_venv = resolve_local_venv(Path.cwd())
assert local_venv, f"No local venv resolvable from {path}"
subprocess.check_output(
[
"uv",
"pip",
"uninstall",
"--python",
str(get_venv_executable(local_venv)),
"dagster",
],
)
| CommandSpec |
python | dagster-io__dagster | python_modules/dagster-test/dagster_test/test_project/test_jobs/pending_repo.py | {
"start": 304,
"end": 1488
} | class ____(CacheableAssetsDefinition):
_cacheable_data = AssetsDefinitionCacheableData(keys_by_output_name={"result": AssetKey("bar")})
def compute_cacheable_data(self):
# make sure this never gets called in the normal course of a run
assert os.getenv("IN_EXTERNAL_PROCESS") == "yes"
return [self._cacheable_data]
def build_definitions(self, data):
assert len(data) == 1
assert data == [self._cacheable_data]
@op
def _op(foo):
return foo + 1
return [
AssetsDefinition.from_op(_op, keys_by_output_name=cd.keys_by_output_name) for cd in data
]
@asset
def foo():
return 1
def define_demo_execution_repo():
from dagster_aws.s3 import s3_pickle_io_manager, s3_resource
from dagster_docker import docker_executor
@repository
def demo_execution_repo():
return [
with_resources([foo], {"s3": s3_resource, "io_manager": s3_pickle_io_manager}),
MyCacheableAssetsDefinition("xyz"),
define_asset_job("demo_job_docker", executor_def=docker_executor),
]
return demo_execution_repo
| MyCacheableAssetsDefinition |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 5953,
"end": 7092
} | class ____(DashboardComponent):
"""How many tasks are on each worker"""
@log_errors
def __init__(self, scheduler, **kwargs):
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{"left": [1, 2], "right": [10, 10], "top": [0, 0]}
)
self.root = figure(
title="Tasks Processing (count)",
name="processing",
y_axis_label="frequency",
tools="",
**kwargs,
)
self.root.xaxis.minor_tick_line_alpha = 0
self.root.ygrid.visible = False
self.root.toolbar_location = None
self.root.quad(
source=self.source,
left="left",
right="right",
bottom=0,
top="top",
color="deepskyblue",
fill_alpha=0.5,
)
@without_property_validation
def update(self):
L = [len(ws.processing) for ws in self.scheduler.workers.values()]
counts, x = np.histogram(L, bins=40)
self.source.data.update({"left": x[:-1], "right": x[1:], "top": counts})
| ProcessingHistogram |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 134642,
"end": 142115
} | class ____(Layout):
"""
A Tensor layout that we are allowed to change
Assumption: layout change should NOT add or remove free symbols
"""
allow_indexing = False
# WARNING! This doesn't handle zero size tensors correctly
@staticmethod
def contiguous_strides(sizes: Sequence[int]) -> list[Expr]:
if len(sizes) == 0:
return []
reversed_strides = [sympy.S.One]
for size in reversed(sizes[1:]):
reversed_strides.append(size * reversed_strides[-1])
return list(reversed(reversed_strides))
@staticmethod
def fill_ordered(sizes: Sequence[int], order: Sequence[int]) -> list[Expr]:
"""
Create a stride based on the order the dimensions should be filled in.
In this format, channels last would be:
[1, 3, 2, 0]
"""
assert OrderedSet(range(len(sizes))) == OrderedSet(order), (sizes, order)
next_stride = sympy.S.One
strides = [None] * len(order)
for i in order:
strides[i] = next_stride
next_stride = next_stride * sizes[i]
return strides
@staticmethod
def stride_ordered(sizes: Sequence[int], order: Sequence[int]) -> Sequence[Expr]:
"""
Create a stride based on the sorted order of a permuted range.
In this format, channels last would be:
[3, 0, 2, 1]
"""
assert OrderedSet(range(len(sizes))) == OrderedSet(order)
fill_order = stride_order2fill_order(order)
return FlexibleLayout.fill_ordered(sizes, fill_order)
@staticmethod
def stride_ordered_for_memory_format(
sizes: Sequence[int], memory_format: torch.memory_format
) -> Sequence[Expr]:
"""
Create a stride based on a memory format.
Memory format is translasted into a stride order,
so channels_last is the same as:
FlexibleLayout.stride_ordered(sizes, [3, 0, 2, 1])
This interface does not support memory_format `torch.preserve_format`
which should be used to deduce a format from another source
"""
if memory_format == torch.channels_last:
return FlexibleLayout.stride_ordered(sizes, NHWC_STRIDE_ORDER)
elif memory_format == torch.channels_last_3d:
return FlexibleLayout.stride_ordered(sizes, NHWDC_STRIDE_ORDER)
elif memory_format == torch.contiguous_format:
return FlexibleLayout.contiguous_strides(sizes)
else:
log.debug(
"stride_ordered_for_memory_format, unsuppored memory_format: %s",
memory_format,
)
raise NotImplementedError
@staticmethod
def same_ordered(
sizes: Sequence[int], stride: Sequence[_IntLike]
) -> Sequence[Expr]:
"""
Create a stride that has the same stride order as given stride
For example, if given stride is [1000, 1, 100, 10],
the fill order should be [1, 3, 2, 0]
"""
assert len(sizes) == len(stride)
stride = [V.graph.sizevars.size_hint_or_throw(x) for x in stride]
fill_order = sorted(range(len(stride)), key=stride.__getitem__)
return FlexibleLayout.fill_ordered(sizes, fill_order)
@property
def size(self) -> Sequence[Expr]:
return self._size
@size.setter
def size(self, value: Sequence[Expr]) -> None:
self.assert_free_symbol_uses_unchanged("size", value)
self._size = value
@property
def stride(self) -> Sequence[Expr]:
return self._stride
@stride.setter
def stride(self, value: Sequence[Expr]) -> None:
self.assert_free_symbol_uses_unchanged("stride", value)
self._stride = value
@property
def offset(self) -> Expr:
return self._offset
@offset.setter
def offset(self, value: Expr) -> None:
self.assert_free_symbol_uses_unchanged("offset", value)
self._offset = value
def as_stride_order(
self, order: Sequence[int], allow_padding: bool = False
) -> FixedLayout:
new_stride = self.stride_ordered(self.size, order)
if self.should_pad_strides() and allow_padding:
new_stride = self._pad_strides(new_stride, self.size, self.dtype)
return FixedLayout(
self.device,
self.dtype,
self.size,
new_stride,
self.offset,
self.is_pinned,
)
def as_exact_strides(
self, exact_strides: Sequence[_IntLike], allow_padding: bool = False
) -> FixedLayout:
new_stride = exact_strides
if self.should_pad_strides() and allow_padding:
new_stride = self._pad_strides(new_stride, self.size, self.dtype)
return FixedLayout(
self.device,
self.dtype,
self.size,
new_stride,
self.offset,
self.is_pinned,
)
def as_fill_order(self, order: Sequence[int]) -> FixedLayout:
new_stride: Sequence[int] = self.fill_ordered(self.size, order)
if self.should_pad_strides():
new_stride = self._pad_strides(new_stride, self.size, self.dtype)
return FixedLayout(
self.device,
self.dtype,
self.size,
new_stride,
self.offset,
self.is_pinned,
)
def as_same_order(self, stride: Sequence[_IntLike]) -> FixedLayout:
new_stride = self.same_ordered(self.size, stride)
if self.should_pad_strides():
new_stride = self._pad_strides(new_stride, self.size, self.dtype)
return FixedLayout(
self.device,
self.dtype,
self.size,
new_stride,
self.offset,
self.is_pinned,
)
def get_initial_free_symbol_uses(self) -> dict[tuple[str, bool], sympy.Symbol]:
initial_free_symbols = {}
for name in ["size", "stride", "offset"]:
for unbacked_only in [True, False]:
key = (name, unbacked_only)
initial_free_symbols[key] = OrderedSet(
get_free_symbols(getattr(self, name), unbacked_only)
)
return initial_free_symbols
def assert_free_symbol_uses_unchanged(self, name: str, value: IterateExprs) -> None:
for unbacked_only in [True, False]:
old_free_symbols = self.initial_free_symbols[(name, unbacked_only)]
new_free_symbols = OrderedSet(get_free_symbols(value, unbacked_only))
assert new_free_symbols == old_free_symbols, (
f"Expected free symbols unchanged, but got {new_free_symbols} vs {old_free_symbols}"
)
def __init__(
self,
device: torch.device,
dtype: torch.dtype,
size: Sequence[Expr],
stride_order: Optional[Sequence[Union[int, Integer]]] = None,
is_pinned: bool = False,
) -> None:
if stride_order:
strides = FlexibleLayout.fill_ordered(size, stride_order)
else:
strides = FlexibleLayout.contiguous_strides(size)
super().__init__(device, dtype, size, strides, is_pinned=is_pinned)
# record the initial free symbols to check that we do not add new free symbols
# later when modifying sizes, strides, and offsets.
self.initial_free_symbols = self.get_initial_free_symbol_uses()
| FlexibleLayout |
python | numba__numba | numba/parfors/array_analysis.py | {
"start": 40140,
"end": 124117
} | class ____(object):
aa_count = 0
"""Analyzes Numpy array computations for properties such as
shape/size equivalence, and keeps track of them on a per-block
basis. The analysis should only be run once because it modifies
the incoming IR by inserting assertion statements that safeguard
parfor optimizations.
"""
def __init__(self, context, func_ir, typemap, calltypes):
self.context = context
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
# EquivSet of variables, indexed by block number
self.equiv_sets = {}
# keep attr calls to arrays like t=A.sum() as {t:('sum',A)}
self.array_attr_calls = {}
# keep attrs of objects (value,attr)->shape_var
self.object_attrs = {}
# keep prepended instructions from conditional branch
self.prepends = {}
# keep track of pruned precessors when branch degenerates to jump
self.pruned_predecessors = {}
def get_equiv_set(self, block_label):
"""Return the equiv_set object of an block given its label.
"""
return self.equiv_sets[block_label]
def remove_redefineds(self, redefineds):
"""Take a set of variables in redefineds and go through all
the currently existing equivalence sets (created in topo order)
and remove that variable from all of them since it is multiply
defined within the function.
"""
unused = set()
for r in redefineds:
for eslabel in self.equiv_sets:
es = self.equiv_sets[eslabel]
es.define(r, unused)
def run(self, blocks=None, equiv_set=None):
"""run array shape analysis on the given IR blocks, resulting in
modified IR and finalized EquivSet for each block.
"""
if blocks is None:
blocks = self.func_ir.blocks
self.func_ir._definitions = build_definitions(self.func_ir.blocks)
if equiv_set is None:
init_equiv_set = SymbolicEquivSet(self.typemap)
else:
init_equiv_set = equiv_set
self.alias_map, self.arg_aliases = find_potential_aliases(
blocks,
self.func_ir.arg_names,
self.typemap,
self.func_ir
)
aa_count_save = ArrayAnalysis.aa_count
ArrayAnalysis.aa_count += 1
if config.DEBUG_ARRAY_OPT >= 1:
print("Starting ArrayAnalysis:", aa_count_save)
dprint_func_ir(self.func_ir, "before array analysis", blocks)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"ArrayAnalysis variable types: ", sorted(self.typemap.items())
)
print("ArrayAnalysis call types: ", self.calltypes)
cfg = compute_cfg_from_blocks(blocks)
topo_order = find_topo_order(blocks, cfg=cfg)
# Traverse blocks in topological order
self._run_on_blocks(topo_order, blocks, cfg, init_equiv_set)
if config.DEBUG_ARRAY_OPT >= 1:
self.dump()
print(
"ArrayAnalysis post variable types: ",
sorted(self.typemap.items()),
)
print("ArrayAnalysis post call types: ", self.calltypes)
dprint_func_ir(self.func_ir, "after array analysis", blocks)
if config.DEBUG_ARRAY_OPT >= 1:
print("Ending ArrayAnalysis:", aa_count_save)
def _run_on_blocks(self, topo_order, blocks, cfg, init_equiv_set):
for label in topo_order:
if config.DEBUG_ARRAY_OPT >= 2:
print("Processing block:", label)
block = blocks[label]
scope = block.scope
pending_transforms = self._determine_transform(
cfg, block, label, scope, init_equiv_set
)
self._combine_to_new_block(block, pending_transforms)
def _combine_to_new_block(self, block, pending_transforms):
"""Combine the new instructions from previous pass into a new block
body.
"""
new_body = []
for inst, pre, post in pending_transforms:
for instr in pre:
new_body.append(instr)
new_body.append(inst)
for instr in post:
new_body.append(instr)
block.body = new_body
def _determine_transform(self, cfg, block, label, scope, init_equiv_set):
"""Determine the transformation for each instruction in the block
"""
equiv_set = None
# equiv_set is the intersection of predecessors
preds = cfg.predecessors(label)
# some incoming edge may be pruned due to prior analysis
if label in self.pruned_predecessors:
pruned = self.pruned_predecessors[label]
else:
pruned = []
# Go through each incoming edge, process prepended instructions and
# calculate beginning equiv_set of current block as an intersection
# of incoming ones.
if config.DEBUG_ARRAY_OPT >= 2:
print("preds:", preds)
for (p, q) in preds:
if config.DEBUG_ARRAY_OPT >= 2:
print("p, q:", p, q)
if p in pruned:
continue
if p in self.equiv_sets:
from_set = self.equiv_sets[p].clone()
if config.DEBUG_ARRAY_OPT >= 2:
print("p in equiv_sets", from_set)
if (p, label) in self.prepends:
instrs = self.prepends[(p, label)]
for inst in instrs:
redefined = set()
self._analyze_inst(
label, scope, from_set, inst, redefined
)
# Remove anything multiply defined in this block
# from every block equivs.
# NOTE: necessary? can't observe effect in testsuite
self.remove_redefineds(redefined)
if equiv_set is None:
equiv_set = from_set
else:
equiv_set = equiv_set.intersect(from_set)
redefined = set()
equiv_set.union_defs(from_set.defs, redefined)
# Remove anything multiply defined in this block
# from every block equivs.
# NOTE: necessary? can't observe effect in testsuite
self.remove_redefineds(redefined)
# Start with a new equiv_set if none is computed
if equiv_set is None:
equiv_set = init_equiv_set
self.equiv_sets[label] = equiv_set
# Go through instructions in a block, and insert pre/post
# instructions as we analyze them.
pending_transforms = []
for inst in block.body:
redefined = set()
pre, post = self._analyze_inst(
label, scope, equiv_set, inst, redefined
)
# Remove anything multiply defined in this block from every block
# equivs.
if len(redefined) > 0:
self.remove_redefineds(redefined)
pending_transforms.append((inst, pre, post))
return pending_transforms
def dump(self):
"""dump per-block equivalence sets for debugging purposes.
"""
print("Array Analysis: ", self.equiv_sets)
def _define(self, equiv_set, var, typ, value):
self.typemap[var.name] = typ
self.func_ir._definitions[var.name] = [value]
redefineds = set()
equiv_set.define(var, redefineds, self.func_ir, typ)
class AnalyzeResult(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def _analyze_inst(self, label, scope, equiv_set, inst, redefined):
pre = []
post = []
if config.DEBUG_ARRAY_OPT >= 2:
print("analyze_inst:", inst)
if isinstance(inst, ir.Assign):
lhs = inst.target
typ = self.typemap[lhs.name]
shape = None
if isinstance(typ, types.ArrayCompatible) and typ.ndim == 0:
shape = ()
elif isinstance(inst.value, ir.Expr):
result = self._analyze_expr(scope, equiv_set, inst.value, lhs)
if result:
require(isinstance(result, ArrayAnalysis.AnalyzeResult))
if 'shape' in result.kwargs:
shape = result.kwargs['shape']
if 'pre' in result.kwargs:
pre.extend(result.kwargs['pre'])
if 'post' in result.kwargs:
post.extend(result.kwargs['post'])
if 'rhs' in result.kwargs:
inst.value = result.kwargs['rhs']
elif isinstance(inst.value, (ir.Var, ir.Const)):
shape = inst.value
elif isinstance(inst.value, ir.Global):
gvalue = inst.value.value
# only integer values can be part of shape
# TODO: support cases with some but not all integer values or
# nested tuples
if (isinstance(gvalue, tuple)
and all(isinstance(v, int) for v in gvalue)):
shape = gvalue
elif isinstance(gvalue, int):
shape = (gvalue,)
elif isinstance(inst.value, ir.Arg):
if (
isinstance(typ, types.containers.UniTuple)
and isinstance(typ.dtype, types.Integer)
):
shape = inst.value
elif (
isinstance(typ, types.containers.Tuple)
and all([isinstance(x,
(types.Integer, types.IntegerLiteral))
for x in typ.types]
)
):
shape = inst.value
if isinstance(shape, ir.Const):
if isinstance(shape.value, tuple):
loc = shape.loc
shape = tuple(ir.Const(x, loc) for x in shape.value)
elif isinstance(shape.value, int):
shape = (shape,)
else:
shape = None
elif isinstance(shape, ir.Var) and isinstance(
self.typemap[shape.name], types.Integer
):
shape = (shape,)
elif isinstance(shape, WrapIndexMeta):
""" Here we've got the special WrapIndexMeta object
back from analyzing a wrap_index call. We define
the lhs and then get it's equivalence class then
add the mapping from the tuple of slice size and
dimensional size equivalence ids to the lhs
equivalence id.
"""
equiv_set.define(lhs, redefined, self.func_ir, typ)
lhs_ind = equiv_set._get_ind(lhs.name)
if lhs_ind != -1:
equiv_set.wrap_map[
(shape.slice_size, shape.dim_size)
] = lhs_ind
return pre, post
if isinstance(typ, types.ArrayCompatible):
if (
shape is not None
and isinstance(shape, ir.Var)
and isinstance(
self.typemap[shape.name], types.containers.BaseTuple
)
):
pass
elif (
shape is None
or isinstance(shape, tuple)
or (
isinstance(shape, ir.Var)
and not equiv_set.has_shape(shape)
)
):
shape = self._gen_shape_call(
equiv_set, lhs, typ.ndim, shape, post
)
elif isinstance(typ, types.UniTuple):
if shape and isinstance(typ.dtype, types.Integer):
shape = self._gen_shape_call(
equiv_set, lhs, len(typ), shape, post
)
elif (
isinstance(typ, types.containers.Tuple)
and all([isinstance(x,
(types.Integer, types.IntegerLiteral))
for x in typ.types]
)
):
shape = self._gen_shape_call(
equiv_set, lhs, len(typ), shape, post
)
""" See the comment on the define() function.
We need only call define(), which will invalidate a variable
from being in the equivalence sets on multiple definitions,
if the variable was not previously defined or if the new
definition would be in a conflicting equivalence class to the
original equivalence class for the variable.
insert_equiv() returns True if either of these conditions are
True and then we call define() in those cases.
If insert_equiv() returns False then no changes were made and
all equivalence classes are consistent upon a redefinition so
no invalidation is needed and we don't call define().
"""
needs_define = True
if shape is not None:
needs_define = equiv_set.insert_equiv(lhs, shape)
if needs_define:
equiv_set.define(lhs, redefined, self.func_ir, typ)
elif isinstance(inst, (ir.StaticSetItem, ir.SetItem)):
index = (
inst.index if isinstance(inst, ir.SetItem) else inst.index_var
)
result = guard(
self._index_to_shape, scope, equiv_set, inst.target, index
)
if not result:
return [], []
if result[0] is not None:
assert isinstance(inst, (ir.StaticSetItem, ir.SetItem))
inst.index = result[0]
result = result[1]
target_shape = result.kwargs['shape']
if 'pre' in result.kwargs:
pre = result.kwargs['pre']
value_shape = equiv_set.get_shape(inst.value)
if value_shape == (): # constant
equiv_set.set_shape_setitem(inst, target_shape)
return pre, []
elif value_shape is not None:
target_typ = self.typemap[inst.target.name]
require(isinstance(target_typ, types.ArrayCompatible))
target_ndim = target_typ.ndim
shapes = [target_shape, value_shape]
names = [inst.target.name, inst.value.name]
broadcast_result = self._broadcast_assert_shapes(
scope, equiv_set, inst.loc, shapes, names
)
require('shape' in broadcast_result.kwargs)
require('pre' in broadcast_result.kwargs)
shape = broadcast_result.kwargs['shape']
asserts = broadcast_result.kwargs['pre']
n = len(shape)
# shape dimension must be within target dimension
assert target_ndim >= n
equiv_set.set_shape_setitem(inst, shape)
return pre + asserts, []
else:
return pre, []
elif isinstance(inst, ir.Branch):
def handle_call_binop(cond_def):
br = None
if cond_def.fn == operator.eq:
br = inst.truebr
otherbr = inst.falsebr
cond_val = 1
elif cond_def.fn == operator.ne:
br = inst.falsebr
otherbr = inst.truebr
cond_val = 0
lhs_typ = self.typemap[cond_def.lhs.name]
rhs_typ = self.typemap[cond_def.rhs.name]
if br is not None and (
(
isinstance(lhs_typ, types.Integer)
and isinstance(rhs_typ, types.Integer)
)
or (
isinstance(lhs_typ, types.BaseTuple)
and isinstance(rhs_typ, types.BaseTuple)
)
):
loc = inst.loc
args = (cond_def.lhs, cond_def.rhs)
asserts = self._make_assert_equiv(
scope, loc, equiv_set, args
)
asserts.append(
ir.Assign(ir.Const(cond_val, loc), cond_var, loc)
)
self.prepends[(label, br)] = asserts
self.prepends[(label, otherbr)] = [
ir.Assign(ir.Const(1 - cond_val, loc), cond_var, loc)
]
cond_var = inst.cond
cond_def = guard(get_definition, self.func_ir, cond_var)
if not cond_def: # phi variable has no single definition
# We'll use equiv_set to try to find a cond_def instead
equivs = equiv_set.get_equiv_set(cond_var)
defs = []
for name in equivs:
if isinstance(name, str) and name in self.typemap:
var_def = guard(
get_definition, self.func_ir, name, lhs_only=True
)
if isinstance(var_def, ir.Var):
var_def = var_def.name
if var_def:
defs.append(var_def)
else:
defs.append(name)
defvars = set(filter(lambda x: isinstance(x, str), defs))
defconsts = set(defs).difference(defvars)
if len(defconsts) == 1:
cond_def = list(defconsts)[0]
elif len(defvars) == 1:
cond_def = guard(
get_definition, self.func_ir, list(defvars)[0]
)
if isinstance(cond_def, ir.Expr) and cond_def.op == 'binop':
handle_call_binop(cond_def)
elif isinstance(cond_def, ir.Expr) and cond_def.op == 'call':
# this handles bool(predicate)
glbl_bool = guard(get_definition, self.func_ir, cond_def.func)
if glbl_bool is not None and glbl_bool.value is bool:
if len(cond_def.args) == 1:
condition = guard(get_definition, self.func_ir,
cond_def.args[0])
if (condition is not None and
isinstance(condition, ir.Expr) and
condition.op == 'binop'):
handle_call_binop(condition)
else:
if isinstance(cond_def, ir.Const):
cond_def = cond_def.value
if isinstance(cond_def, int) or isinstance(cond_def, bool):
# condition is always true/false, prune the outgoing edge
pruned_br = inst.falsebr if cond_def else inst.truebr
if pruned_br in self.pruned_predecessors:
self.pruned_predecessors[pruned_br].append(label)
else:
self.pruned_predecessors[pruned_br] = [label]
elif type(inst) in array_analysis_extensions:
# let external calls handle stmt if type matches
f = array_analysis_extensions[type(inst)]
pre, post = f(inst, equiv_set, self.typemap, self)
return pre, post
def _analyze_expr(self, scope, equiv_set, expr, lhs):
fname = "_analyze_op_{}".format(expr.op)
try:
fn = getattr(self, fname)
except AttributeError:
return None
return guard(fn, scope, equiv_set, expr, lhs)
def _analyze_op_getattr(self, scope, equiv_set, expr, lhs):
# TODO: getattr of npytypes.Record
if expr.attr == "T" and self._isarray(expr.value.name):
return self._analyze_op_call_numpy_transpose(
scope, equiv_set, expr.loc, [expr.value], {}
)
elif expr.attr == "shape":
shape = equiv_set.get_shape(expr.value)
return ArrayAnalysis.AnalyzeResult(shape=shape)
elif expr.attr in ("real", "imag") and self._isarray(expr.value.name):
# Shape of real or imag attr is the same as the shape of the array
# itself.
return ArrayAnalysis.AnalyzeResult(shape=expr.value)
elif self._isarray(lhs.name):
canonical_value = get_canonical_alias(
expr.value.name, self.alias_map
)
if (canonical_value, expr.attr) in self.object_attrs:
return ArrayAnalysis.AnalyzeResult(
shape=self.object_attrs[(canonical_value, expr.attr)]
)
else:
typ = self.typemap[lhs.name]
post = []
shape = self._gen_shape_call(
equiv_set, lhs, typ.ndim, None, post
)
self.object_attrs[(canonical_value, expr.attr)] = shape
return ArrayAnalysis.AnalyzeResult(shape=shape, post=post)
return None
def _analyze_op_cast(self, scope, equiv_set, expr, lhs):
return ArrayAnalysis.AnalyzeResult(shape=expr.value)
def _analyze_op_exhaust_iter(self, scope, equiv_set, expr, lhs):
var = expr.value
typ = self.typemap[var.name]
if isinstance(typ, types.BaseTuple):
require(len(typ) == expr.count)
require(equiv_set.has_shape(var))
return ArrayAnalysis.AnalyzeResult(shape=var)
return None
def gen_literal_slice_part(
self,
arg_val,
loc,
scope,
stmts,
equiv_set,
name="static_literal_slice_part",
):
# Create var to hold the calculated slice size.
static_literal_slice_part_var = ir.Var(scope, mk_unique_var(name), loc)
static_literal_slice_part_val = ir.Const(arg_val, loc)
static_literal_slice_part_typ = types.IntegerLiteral(arg_val)
# We'll prepend this slice size calculation to the get/setitem.
stmts.append(
ir.Assign(
value=static_literal_slice_part_val,
target=static_literal_slice_part_var,
loc=loc,
)
)
self._define(
equiv_set,
static_literal_slice_part_var,
static_literal_slice_part_typ,
static_literal_slice_part_val,
)
return static_literal_slice_part_var, static_literal_slice_part_typ
def gen_static_slice_size(
self, lhs_rel, rhs_rel, loc, scope, stmts, equiv_set
):
the_var, *_ = self.gen_literal_slice_part(
rhs_rel - lhs_rel,
loc,
scope,
stmts,
equiv_set,
name="static_slice_size",
)
return the_var
def gen_explicit_neg(
self,
arg,
arg_rel,
arg_typ,
size_typ,
loc,
scope,
dsize,
stmts,
equiv_set,
):
assert not isinstance(size_typ, int)
# Create var to hold the calculated slice size.
explicit_neg_var = ir.Var(scope, mk_unique_var("explicit_neg"), loc)
explicit_neg_val = ir.Expr.binop(operator.add, dsize, arg, loc=loc)
# Determine the type of that var. Can be literal if we know the
# literal size of the dimension.
explicit_neg_typ = types.intp
self.calltypes[explicit_neg_val] = signature(
explicit_neg_typ, size_typ, arg_typ
)
# We'll prepend this slice size calculation to the get/setitem.
stmts.append(
ir.Assign(value=explicit_neg_val, target=explicit_neg_var, loc=loc)
)
self._define(
equiv_set, explicit_neg_var, explicit_neg_typ, explicit_neg_val
)
return explicit_neg_var, explicit_neg_typ
def update_replacement_slice(
self,
lhs,
lhs_typ,
lhs_rel,
dsize_rel,
replacement_slice,
slice_index,
need_replacement,
loc,
scope,
stmts,
equiv_set,
size_typ,
dsize,
):
# Do compile-time calculation of real index value if both the given
# index value and the array length are known at compile time.
known = False
if isinstance(lhs_rel, int):
# If the index and the array size are known then the real index
# can be calculated at compile time.
if lhs_rel == 0:
# Special-case 0 as nothing needing to be done.
known = True
elif isinstance(dsize_rel, int):
known = True
# Calculate the real index.
wil = wrap_index_literal(lhs_rel, dsize_rel)
# If the given index value is between 0 and dsize then
# there's no need to rewrite anything.
if wil != lhs_rel:
if config.DEBUG_ARRAY_OPT >= 2:
print("Replacing slice to hard-code known slice size.")
# Indicate we will need to replace the slice var.
need_replacement = True
literal_var, literal_typ = self.gen_literal_slice_part(
wil, loc, scope, stmts, equiv_set
)
assert slice_index == 0 or slice_index == 1
if slice_index == 0:
replacement_slice.args = (
literal_var,
replacement_slice.args[1],
)
else:
replacement_slice.args = (
replacement_slice.args[0],
literal_var,
)
# Update lhs information with the negative removed.
lhs = replacement_slice.args[slice_index]
lhs_typ = literal_typ
lhs_rel = equiv_set.get_rel(lhs)
elif lhs_rel < 0:
# Indicate we will need to replace the slice var.
need_replacement = True
if config.DEBUG_ARRAY_OPT >= 2:
print("Replacing slice due to known negative index.")
explicit_neg_var, explicit_neg_typ = self.gen_explicit_neg(
lhs,
lhs_rel,
lhs_typ,
size_typ,
loc,
scope,
dsize,
stmts,
equiv_set,
)
if slice_index == 0:
replacement_slice.args = (
explicit_neg_var,
replacement_slice.args[1],
)
else:
replacement_slice.args = (
replacement_slice.args[0],
explicit_neg_var,
)
# Update lhs information with the negative removed.
lhs = replacement_slice.args[slice_index]
lhs_typ = explicit_neg_typ
lhs_rel = equiv_set.get_rel(lhs)
return (
lhs,
lhs_typ,
lhs_rel,
replacement_slice,
need_replacement,
known,
)
def slice_size(self, index, dsize, equiv_set, scope, stmts):
"""Reason about the size of a slice represented by the "index"
variable, and return a variable that has this size data, or
raise GuardException if it cannot reason about it.
The computation takes care of negative values used in the slice
with respect to the given dimensional size ("dsize").
Extra statements required to produce the result are appended
to parent function's stmts list.
"""
loc = index.loc
# Get the definition of the index variable.
index_def = get_definition(self.func_ir, index)
fname, mod_name = find_callname(
self.func_ir, index_def, typemap=self.typemap
)
require(fname == 'slice' and mod_name in ('builtins'))
require(len(index_def.args) == 2)
lhs = index_def.args[0]
rhs = index_def.args[1]
size_typ = self.typemap[dsize.name]
lhs_typ = self.typemap[lhs.name]
rhs_typ = self.typemap[rhs.name]
if config.DEBUG_ARRAY_OPT >= 2:
print(f"slice_size index={index} dsize={dsize} "
f"index_def={index_def} lhs={lhs} rhs={rhs} "
f"size_typ={size_typ} lhs_typ={lhs_typ} rhs_typ={rhs_typ}")
# Make a deepcopy of the original slice to use as the
# replacement slice, which we will modify as necessary
# below to convert all negative constants in the slice
# to be relative to the dimension size.
replacement_slice = copy.deepcopy(index_def)
need_replacement = False
# Fill in the left side of the slice's ":" with 0 if it wasn't
# specified.
if isinstance(lhs_typ, types.NoneType):
zero_var = ir.Var(scope, mk_unique_var("zero"), loc)
zero = ir.Const(0, loc)
stmts.append(ir.Assign(value=zero, target=zero_var, loc=loc))
self._define(equiv_set, zero_var, types.IntegerLiteral(0), zero)
lhs = zero_var
lhs_typ = types.IntegerLiteral(0)
replacement_slice.args = (lhs, replacement_slice.args[1])
need_replacement = True
if config.DEBUG_ARRAY_OPT >= 2:
print("Replacing slice because lhs is None.")
# Fill in the right side of the slice's ":" with the array
# length if it wasn't specified.
if isinstance(rhs_typ, types.NoneType):
rhs = dsize
rhs_typ = size_typ
replacement_slice.args = (replacement_slice.args[0], rhs)
need_replacement = True
if config.DEBUG_ARRAY_OPT >= 2:
print("Replacing slice because lhs is None.")
lhs_rel = equiv_set.get_rel(lhs)
rhs_rel = equiv_set.get_rel(rhs)
dsize_rel = equiv_set.get_rel(dsize)
if config.DEBUG_ARRAY_OPT >= 2:
print(
"lhs_rel", lhs_rel, "rhs_rel", rhs_rel, "dsize_rel", dsize_rel
)
# Update replacement slice with the real index value if we can
# compute it at compile time.
[
lhs,
lhs_typ,
lhs_rel,
replacement_slice,
need_replacement,
lhs_known,
] = self.update_replacement_slice(
lhs,
lhs_typ,
lhs_rel,
dsize_rel,
replacement_slice,
0,
need_replacement,
loc,
scope,
stmts,
equiv_set,
size_typ,
dsize,
)
[
rhs,
rhs_typ,
rhs_rel,
replacement_slice,
need_replacement,
rhs_known,
] = self.update_replacement_slice(
rhs,
rhs_typ,
rhs_rel,
dsize_rel,
replacement_slice,
1,
need_replacement,
loc,
scope,
stmts,
equiv_set,
size_typ,
dsize,
)
if config.DEBUG_ARRAY_OPT >= 2:
print("lhs_known:", lhs_known)
print("rhs_known:", rhs_known)
# If neither of the parts of the slice were negative constants
# then we don't need to do slice replacement in the IR.
if not need_replacement:
replacement_slice_var = None
else:
# Create a new var for the replacement slice.
replacement_slice_var = ir.Var(
scope, mk_unique_var("replacement_slice"), loc
)
# Create a deepcopy of slice calltype so that when we change it
# below the original isn't changed. Make the types of the parts of
# the slice intp.
new_arg_typs = (types.intp, types.intp)
rs_calltype = self.typemap[index_def.func.name].get_call_type(
self.context, new_arg_typs, {}
)
self.calltypes[replacement_slice] = rs_calltype
stmts.append(
ir.Assign(
value=replacement_slice,
target=replacement_slice_var,
loc=loc,
)
)
# The type of the replacement slice is the same type as the
# original.
self.typemap[replacement_slice_var.name] = self.typemap[index.name]
if config.DEBUG_ARRAY_OPT >= 2:
print(
"after rewriting negatives",
"lhs_rel",
lhs_rel,
"rhs_rel",
rhs_rel,
)
if lhs_known and rhs_known:
if config.DEBUG_ARRAY_OPT >= 2:
print("lhs and rhs known so return static size")
return (
self.gen_static_slice_size(
lhs_rel, rhs_rel, loc, scope, stmts, equiv_set
),
replacement_slice_var,
)
if (
lhs_rel == 0
and isinstance(rhs_rel, tuple)
and equiv_set.is_equiv(dsize, rhs_rel[0])
and rhs_rel[1] == 0
):
return dsize, None
slice_typ = types.intp
orig_slice_typ = slice_typ
size_var = ir.Var(scope, mk_unique_var("slice_size"), loc)
size_val = ir.Expr.binop(operator.sub, rhs, lhs, loc=loc)
self.calltypes[size_val] = signature(slice_typ, rhs_typ, lhs_typ)
self._define(equiv_set, size_var, slice_typ, size_val)
size_rel = equiv_set.get_rel(size_var)
if config.DEBUG_ARRAY_OPT >= 2:
print("size_rel", size_rel, type(size_rel))
wrap_var = ir.Var(scope, mk_unique_var("wrap"), loc)
wrap_def = ir.Global("wrap_index", wrap_index, loc=loc)
fnty = get_global_func_typ(wrap_index)
sig = self.context.resolve_function_type(
fnty, (orig_slice_typ, size_typ), {}
)
self._define(equiv_set, wrap_var, fnty, wrap_def)
def gen_wrap_if_not_known(val, val_typ, known):
if not known:
var = ir.Var(scope, mk_unique_var("var"), loc)
var_typ = types.intp
new_value = ir.Expr.call(wrap_var, [val, dsize], {}, loc)
# def_res will be False if there is something unanalyzable
# that prevents a size association from being created.
self._define(equiv_set, var, var_typ, new_value)
self.calltypes[new_value] = sig
return (var, var_typ, new_value)
else:
return (val, val_typ, None)
var1, var1_typ, value1 = gen_wrap_if_not_known(lhs, lhs_typ, lhs_known)
var2, var2_typ, value2 = gen_wrap_if_not_known(rhs, rhs_typ, rhs_known)
stmts.append(ir.Assign(value=size_val, target=size_var, loc=loc))
stmts.append(ir.Assign(value=wrap_def, target=wrap_var, loc=loc))
if value1 is not None:
stmts.append(ir.Assign(value=value1, target=var1, loc=loc))
if value2 is not None:
stmts.append(ir.Assign(value=value2, target=var2, loc=loc))
post_wrap_size_var = ir.Var(
scope, mk_unique_var("post_wrap_slice_size"), loc
)
post_wrap_size_val = ir.Expr.binop(operator.sub,
var2,
var1,
loc=loc)
self.calltypes[post_wrap_size_val] = signature(
slice_typ, var2_typ, var1_typ
)
self._define(
equiv_set, post_wrap_size_var, slice_typ, post_wrap_size_val
)
stmts.append(
ir.Assign(
value=post_wrap_size_val, target=post_wrap_size_var, loc=loc
)
)
# rel_map keeps a map of relative sizes that we have seen so
# that if we compute the same relative sizes different times
# in different ways we can associate those two instances
# of the same relative size to the same equivalence class.
if isinstance(size_rel, tuple):
if config.DEBUG_ARRAY_OPT >= 2:
print("size_rel is tuple", equiv_set.rel_map)
rel_map_entry = None
for rme, rme_tuple in equiv_set.rel_map.items():
if rme[1] == size_rel[1] and equiv_set.is_equiv(
rme[0], size_rel[0]
):
rel_map_entry = rme_tuple
break
if rel_map_entry is not None:
# We have seen this relative size before so establish
# equivalence to the previous variable.
if config.DEBUG_ARRAY_OPT >= 2:
print("establishing equivalence to", rel_map_entry)
equiv_set.insert_equiv(size_var, rel_map_entry[0])
equiv_set.insert_equiv(post_wrap_size_var, rel_map_entry[1])
else:
# The first time we've seen this relative size so
# remember the variable defining that size.
equiv_set.rel_map[size_rel] = (size_var, post_wrap_size_var)
return post_wrap_size_var, replacement_slice_var
def _index_to_shape(self, scope, equiv_set, var, ind_var):
"""For indexing like var[index] (either write or read), see if
the index corresponds to a range/slice shape.
Returns a 2-tuple where the first item is either None or a ir.Var
to be used to replace the index variable in the outer getitem or
setitem instruction. The second item is also a tuple returning
the shape and prepending instructions.
"""
typ = self.typemap[var.name]
require(isinstance(typ, types.ArrayCompatible))
ind_typ = self.typemap[ind_var.name]
ind_shape = equiv_set._get_shape(ind_var)
var_shape = equiv_set._get_shape(var)
if isinstance(ind_typ, types.SliceType):
seq_typs = (ind_typ,)
seq = (ind_var,)
else:
require(isinstance(ind_typ, types.BaseTuple))
seq, op = find_build_sequence(self.func_ir, ind_var)
require(op == "build_tuple")
seq_typs = tuple(self.typemap[x.name] for x in seq)
require(len(ind_shape) == len(seq_typs) == len(var_shape))
stmts = []
def to_shape(typ, index, dsize):
if isinstance(typ, types.SliceType):
return self.slice_size(index, dsize, equiv_set, scope, stmts)
elif isinstance(typ, types.Number):
return None, None
else:
# unknown dimension size for this index,
# so we'll raise GuardException
require(False)
shape_list = []
index_var_list = []
replace_index = False
for (typ, size, dsize, orig_ind) in zip(seq_typs,
ind_shape,
var_shape,
seq):
# Convert the given dimension of the get/setitem index expr.
shape_part, index_var_part = to_shape(typ, size, dsize)
shape_list.append(shape_part)
# to_shape will return index_var_part as not None if a
# replacement of the slice is required to convert from
# negative indices to positive relative indices.
if index_var_part is not None:
# Remember that we need to replace the build_tuple.
replace_index = True
index_var_list.append(index_var_part)
else:
index_var_list.append(orig_ind)
# If at least one of the dimensions required a new slice variable
# then we'll need to replace the build_tuple for this get/setitem.
if replace_index:
# Multi-dimensional array access needs a replacement tuple built.
if len(index_var_list) > 1:
# Make a variable to hold the new build_tuple.
replacement_build_tuple_var = ir.Var(
scope,
mk_unique_var("replacement_build_tuple"),
ind_shape[0].loc,
)
# Create the build tuple from the accumulated index vars above.
new_build_tuple = ir.Expr.build_tuple(
index_var_list, ind_shape[0].loc
)
stmts.append(
ir.Assign(
value=new_build_tuple,
target=replacement_build_tuple_var,
loc=ind_shape[0].loc,
)
)
# New build_tuple has same type as the original one.
self.typemap[replacement_build_tuple_var.name] = ind_typ
else:
replacement_build_tuple_var = index_var_list[0]
else:
replacement_build_tuple_var = None
shape = tuple(shape_list)
require(not all(x is None for x in shape))
shape = tuple(x for x in shape if x is not None)
return (replacement_build_tuple_var,
ArrayAnalysis.AnalyzeResult(shape=shape, pre=stmts))
def _analyze_op_getitem(self, scope, equiv_set, expr, lhs):
result = self._index_to_shape(scope, equiv_set, expr.value, expr.index)
if result[0] is not None:
expr.index = result[0]
return result[1]
def _analyze_op_static_getitem(self, scope, equiv_set, expr, lhs):
var = expr.value
typ = self.typemap[var.name]
if not isinstance(typ, types.BaseTuple):
result = self._index_to_shape(
scope, equiv_set, expr.value, expr.index_var
)
if result[0] is not None:
expr.index_var = result[0]
return result[1]
shape = equiv_set._get_shape(var)
if isinstance(expr.index, int):
require(expr.index < len(shape))
return ArrayAnalysis.AnalyzeResult(shape=shape[expr.index])
elif isinstance(expr.index, slice):
return ArrayAnalysis.AnalyzeResult(shape=shape[expr.index])
require(False)
def _analyze_op_unary(self, scope, equiv_set, expr, lhs):
require(expr.fn in UNARY_MAP_OP)
# for scalars, only + operator results in equivalence
# for example, if "m = -n", m and n are not equivalent
if self._isarray(expr.value.name) or expr.fn == operator.add:
return ArrayAnalysis.AnalyzeResult(shape=expr.value)
return None
def _analyze_op_binop(self, scope, equiv_set, expr, lhs):
require(expr.fn in BINARY_MAP_OP)
return self._analyze_broadcast(
scope, equiv_set, expr.loc, [expr.lhs, expr.rhs], expr.fn
)
def _analyze_op_inplace_binop(self, scope, equiv_set, expr, lhs):
require(expr.fn in INPLACE_BINARY_MAP_OP)
return self._analyze_broadcast(
scope, equiv_set, expr.loc, [expr.lhs, expr.rhs], expr.fn
)
def _analyze_op_arrayexpr(self, scope, equiv_set, expr, lhs):
return self._analyze_broadcast(
scope, equiv_set, expr.loc, expr.list_vars(), None
)
def _analyze_op_build_tuple(self, scope, equiv_set, expr, lhs):
# For the moment, we can't do anything with tuples that
# contain multi-dimensional arrays, compared to array dimensions.
# Return None to say we won't track this tuple if a part of it
# is an array.
for x in expr.items:
if (
isinstance(x, ir.Var)
and isinstance(self.typemap[x.name], types.ArrayCompatible)
and self.typemap[x.name].ndim > 1
):
return None
consts = []
for var in expr.items:
x = guard(find_const, self.func_ir, var)
if x is not None:
consts.append(x)
else:
break
else:
out = tuple([ir.Const(x, expr.loc) for x in consts])
return ArrayAnalysis.AnalyzeResult(
shape=out,
rhs=ir.Const(tuple(consts), expr.loc)
)
# default return for non-const
return ArrayAnalysis.AnalyzeResult(shape=tuple(expr.items))
def _analyze_op_call(self, scope, equiv_set, expr, lhs):
from numba.stencils.stencil import StencilFunc
callee = expr.func
callee_def = get_definition(self.func_ir, callee)
if isinstance(
callee_def, (ir.Global, ir.FreeVar)
) and is_namedtuple_class(callee_def.value):
return ArrayAnalysis.AnalyzeResult(shape=tuple(expr.args))
if isinstance(callee_def, (ir.Global, ir.FreeVar)) and isinstance(
callee_def.value, StencilFunc
):
args = expr.args
return self._analyze_stencil(
scope,
equiv_set,
callee_def.value,
expr.loc,
args,
dict(expr.kws),
)
fname, mod_name = find_callname(
self.func_ir, expr, typemap=self.typemap
)
added_mod_name = False
# call via attribute (i.e. array.func)
if isinstance(mod_name, ir.Var) and isinstance(
self.typemap[mod_name.name], types.ArrayCompatible
):
args = [mod_name] + expr.args
mod_name = "numpy"
# Remember that args and expr.args don't alias.
added_mod_name = True
else:
args = expr.args
fname = "_analyze_op_call_{}_{}".format(mod_name, fname).replace(
".", "_"
)
if fname in UFUNC_MAP_OP: # known numpy ufuncs
return self._analyze_broadcast(scope, equiv_set,
expr.loc, args, None)
else:
try:
fn = getattr(self, fname)
except AttributeError:
return None
result = guard(
fn,
scope=scope,
equiv_set=equiv_set,
loc=expr.loc,
args=args,
kws=dict(expr.kws),
)
# We want the ability for function fn to modify arguments.
# If args and expr.args don't alias then we need the extra
# step of assigning back into expr.args from the args that
# was passed to fn.
if added_mod_name:
expr.args = args[1:]
return result
def _analyze_op_call_builtins_len(self, scope, equiv_set, loc, args, kws):
# python 3 version of len()
require(len(args) == 1)
var = args[0]
typ = self.typemap[var.name]
require(isinstance(typ, types.ArrayCompatible))
shape = equiv_set._get_shape(var)
return ArrayAnalysis.AnalyzeResult(shape=shape[0], rhs=shape[0])
def _analyze_op_call_numba_parfors_array_analysis_assert_equiv(
self, scope, equiv_set, loc, args, kws
):
equiv_set.insert_equiv(*args[1:])
return None
def _analyze_op_call_numba_parfors_array_analysis_wrap_index(
self, scope, equiv_set, loc, args, kws
):
""" Analyze wrap_index calls added by a previous run of
Array Analysis
"""
require(len(args) == 2)
# Two parts to wrap index, the specified slice size...
slice_size = args[0].name
# ...and the size of the dimension.
dim_size = args[1].name
# Get the equivalence class ids for both.
slice_eq = equiv_set._get_or_add_ind(slice_size)
dim_eq = equiv_set._get_or_add_ind(dim_size)
# See if a previous wrap_index calls we've analyzed maps from
# the same pair of equivalence class ids for slice and dim size.
if (slice_eq, dim_eq) in equiv_set.wrap_map:
wrap_ind = equiv_set.wrap_map[(slice_eq, dim_eq)]
require(wrap_ind in equiv_set.ind_to_var)
vs = equiv_set.ind_to_var[wrap_ind]
require(vs != [])
# Return the shape of the variable from the previous wrap_index.
return ArrayAnalysis.AnalyzeResult(shape=(vs[0],))
else:
# We haven't seen this combination of slice and dim
# equivalence class ids so return a WrapIndexMeta so that
# _analyze_inst can establish the connection to the lhs var.
return ArrayAnalysis.AnalyzeResult(
shape=WrapIndexMeta(slice_eq, dim_eq)
)
def _analyze_numpy_create_array(self, scope, equiv_set, loc, args, kws):
shape_var = None
if len(args) > 0:
shape_var = args[0]
elif "shape" in kws:
shape_var = kws["shape"]
if shape_var:
return ArrayAnalysis.AnalyzeResult(shape=shape_var)
raise errors.UnsupportedRewriteError(
"Must specify a shape for array creation",
loc=loc,
)
def _analyze_op_call_numpy_empty(self, scope, equiv_set, loc, args, kws):
return self._analyze_numpy_create_array(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numba_np_unsafe_ndarray_empty_inferred(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_numpy_create_array(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numpy_zeros(self, scope, equiv_set, loc, args, kws):
return self._analyze_numpy_create_array(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numpy_ones(self, scope, equiv_set, loc, args, kws):
return self._analyze_numpy_create_array(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numpy_eye(self, scope, equiv_set, loc, args, kws):
if len(args) > 0:
N = args[0]
elif "N" in kws:
N = kws["N"]
else:
raise errors.UnsupportedRewriteError(
"Expect one argument (or 'N') to eye function",
loc=loc,
)
if "M" in kws:
M = kws["M"]
else:
M = N
return ArrayAnalysis.AnalyzeResult(shape=(N, M))
def _analyze_op_call_numpy_identity(
self, scope, equiv_set, loc, args, kws
):
assert len(args) > 0
N = args[0]
return ArrayAnalysis.AnalyzeResult(shape=(N, N))
def _analyze_op_call_numpy_diag(self, scope, equiv_set, loc, args, kws):
# We can only reason about the output shape when the input is 1D or
# square 2D.
assert len(args) > 0
a = args[0]
assert isinstance(a, ir.Var)
atyp = self.typemap[a.name]
if isinstance(atyp, types.ArrayCompatible):
if atyp.ndim == 2:
if "k" in kws: # will proceed only when k = 0 or absent
k = kws["k"]
if not equiv_set.is_equiv(k, 0):
return None
(m, n) = equiv_set._get_shape(a)
if equiv_set.is_equiv(m, n):
return ArrayAnalysis.AnalyzeResult(shape=(m,))
elif atyp.ndim == 1:
(m,) = equiv_set._get_shape(a)
return ArrayAnalysis.AnalyzeResult(shape=(m, m))
return None
def _analyze_numpy_array_like(self, scope, equiv_set, args, kws):
assert len(args) > 0
var = args[0]
typ = self.typemap[var.name]
if isinstance(typ, types.Integer):
return ArrayAnalysis.AnalyzeResult(shape=(1,))
elif isinstance(typ, types.ArrayCompatible) and equiv_set.has_shape(
var
):
return ArrayAnalysis.AnalyzeResult(shape=var)
return None
def _analyze_op_call_numpy_ravel(self, scope, equiv_set, loc, args, kws):
assert len(args) == 1
var = args[0]
typ = self.typemap[var.name]
assert isinstance(typ, types.ArrayCompatible)
# output array is same shape as input if input is 1D
if typ.ndim == 1 and equiv_set.has_shape(var):
if typ.layout == "C":
# output is the same as input (no copy) for 'C' layout
# optimize out the call
return ArrayAnalysis.AnalyzeResult(shape=var, rhs=var)
else:
return ArrayAnalysis.AnalyzeResult(shape=var)
# TODO: handle multi-D input arrays (calc array size)
return None
def _analyze_op_call_numpy_copy(self, scope, equiv_set, loc, args, kws):
return self._analyze_numpy_array_like(scope, equiv_set, args, kws)
def _analyze_op_call_numpy_empty_like(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_numpy_array_like(scope, equiv_set, args, kws)
def _analyze_op_call_numpy_zeros_like(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_numpy_array_like(scope, equiv_set, args, kws)
def _analyze_op_call_numpy_ones_like(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_numpy_array_like(scope, equiv_set, args, kws)
def _analyze_op_call_numpy_full_like(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_numpy_array_like(scope, equiv_set, args, kws)
def _analyze_op_call_numpy_asfortranarray(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_numpy_array_like(scope, equiv_set, args, kws)
def _analyze_op_call_numpy_reshape(self, scope, equiv_set, loc, args, kws):
n = len(args)
assert n > 1
if n == 2:
typ = self.typemap[args[1].name]
if isinstance(typ, types.BaseTuple):
return ArrayAnalysis.AnalyzeResult(shape=args[1])
# Reshape is allowed to take one argument that has the value <0.
# This means that the size of that dimension should be inferred from
# the size of the array being reshaped and the other dimensions
# specified. Our general approach here is to see if the reshape
# has any <0 arguments. If it has more than one then throw a
# ValueError. If exactly one <0 argument is found, remember its
# argument index.
stmts = []
neg_one_index = -1
for arg_index in range(1, len(args)):
reshape_arg = args[arg_index]
reshape_arg_def = guard(get_definition, self.func_ir, reshape_arg)
if isinstance(reshape_arg_def, ir.Const):
if reshape_arg_def.value < 0:
if neg_one_index == -1:
neg_one_index = arg_index
else:
msg = ("The reshape API may only include one negative"
" argument.")
raise errors.UnsupportedRewriteError(
msg, loc=reshape_arg.loc
)
if neg_one_index >= 0:
# If exactly one <0 argument to reshape was found, then we are
# going to insert code to calculate the missing dimension and then
# replace the negative with the calculated size. We do this
# because we can't let array equivalence analysis think that some
# array has a negative dimension size.
loc = args[0].loc
# Create a variable to hold the size of the array being reshaped.
calc_size_var = ir.Var(scope, mk_unique_var("calc_size_var"), loc)
self.typemap[calc_size_var.name] = types.intp
# Assign the size of the array calc_size_var.
init_calc_var = ir.Assign(
ir.Expr.getattr(args[0], "size", loc), calc_size_var, loc
)
stmts.append(init_calc_var)
# For each other dimension, divide the current size by the
# specified dimension size. Once all such dimensions have been
# done then what is left is the size of the negative dimension.
for arg_index in range(1, len(args)):
# Skip the negative dimension.
if arg_index == neg_one_index:
continue
div_calc_size_var = ir.Var(
scope, mk_unique_var("calc_size_var"), loc
)
self.typemap[div_calc_size_var.name] = types.intp
# Calculate the next size as current size // the current arg's
# dimension size.
new_binop = ir.Expr.binop(
operator.floordiv, calc_size_var, args[arg_index], loc
)
div_calc = ir.Assign(new_binop, div_calc_size_var, loc)
self.calltypes[new_binop] = signature(
types.intp, types.intp, types.intp
)
stmts.append(div_calc)
calc_size_var = div_calc_size_var
# Put the calculated value back into the reshape arguments,
# replacing the negative.
args[neg_one_index] = calc_size_var
return ArrayAnalysis.AnalyzeResult(shape=tuple(args[1:]), pre=stmts)
def _analyze_op_call_numpy_transpose(
self, scope, equiv_set, loc, args, kws
):
in_arr = args[0]
typ = self.typemap[in_arr.name]
assert isinstance(
typ, types.ArrayCompatible
), "Invalid np.transpose argument"
shape = equiv_set._get_shape(in_arr)
if len(args) == 1:
return ArrayAnalysis.AnalyzeResult(shape=tuple(reversed(shape)))
axes = [guard(find_const, self.func_ir, a) for a in args[1:]]
if isinstance(axes[0], tuple):
axes = list(axes[0])
if None in axes:
return None
ret = [shape[i] for i in axes]
return ArrayAnalysis.AnalyzeResult(shape=tuple(ret))
def _analyze_op_call_numpy_random_rand(
self, scope, equiv_set, loc, args, kws
):
if len(args) > 0:
return ArrayAnalysis.AnalyzeResult(shape=tuple(args))
return None
def _analyze_op_call_numpy_random_randn(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_call_numpy_random_rand(
scope, equiv_set, loc, args, kws
)
def _analyze_op_numpy_random_with_size(
self, pos, scope, equiv_set, args, kws
):
if "size" in kws:
return ArrayAnalysis.AnalyzeResult(shape=kws["size"])
if len(args) > pos:
return ArrayAnalysis.AnalyzeResult(shape=args[pos])
return None
def _analyze_op_call_numpy_random_ranf(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
0, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_random_sample(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
0, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_sample(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
0, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_random(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
0, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_standard_normal(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
0, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_chisquare(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_weibull(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_power(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_geometric(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_exponential(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_poisson(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_rayleigh(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
1, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_normal(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_uniform(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_beta(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_binomial(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_f(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_gamma(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_lognormal(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_laplace(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_randint(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
2, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_random_triangular(
self, scope, equiv_set, loc, args, kws
):
return self._analyze_op_numpy_random_with_size(
3, scope, equiv_set, args, kws
)
def _analyze_op_call_numpy_concatenate(
self, scope, equiv_set, loc, args, kws
):
assert len(args) > 0
loc = args[0].loc
seq, op = find_build_sequence(self.func_ir, args[0])
n = len(seq)
require(n > 0)
axis = 0
if "axis" in kws:
if isinstance(kws["axis"], int): # internal use only
axis = kws["axis"]
else:
axis = find_const(self.func_ir, kws["axis"])
elif len(args) > 1:
axis = find_const(self.func_ir, args[1])
require(isinstance(axis, int))
require(op == "build_tuple")
shapes = [equiv_set._get_shape(x) for x in seq]
if axis < 0:
axis = len(shapes[0]) + axis
require(0 <= axis < len(shapes[0]))
asserts = []
new_shape = []
if n == 1: # from one array N-dimension to (N-1)-dimension
shape = shapes[0]
# first size is the count, pop it out of shapes
n = equiv_set.get_equiv_const(shapes[0])
shape.pop(0)
for i in range(len(shape)):
if i == axis:
m = equiv_set.get_equiv_const(shape[i])
size = m * n if (m and n) else None
else:
size = self._sum_size(equiv_set, shapes[0])
new_shape.append(size)
else: # from n arrays N-dimension to N-dimension
for i in range(len(shapes[0])):
if i == axis:
size = self._sum_size(
equiv_set, [shape[i] for shape in shapes]
)
else:
sizes = [shape[i] for shape in shapes]
asserts.append(
self._call_assert_equiv(scope, loc, equiv_set, sizes)
)
size = sizes[0]
new_shape.append(size)
return ArrayAnalysis.AnalyzeResult(
shape=tuple(new_shape),
pre=sum(asserts, [])
)
def _analyze_op_call_numpy_stack(self, scope, equiv_set, loc, args, kws):
assert len(args) > 0
loc = args[0].loc
seq, op = find_build_sequence(self.func_ir, args[0])
n = len(seq)
require(n > 0)
axis = 0
if "axis" in kws:
if isinstance(kws["axis"], int): # internal use only
axis = kws["axis"]
else:
axis = find_const(self.func_ir, kws["axis"])
elif len(args) > 1:
axis = find_const(self.func_ir, args[1])
require(isinstance(axis, int))
# only build_tuple can give reliable count
require(op == "build_tuple")
shapes = [equiv_set._get_shape(x) for x in seq]
asserts = self._call_assert_equiv(scope, loc, equiv_set, seq)
shape = shapes[0]
if axis < 0:
axis = len(shape) + axis + 1
require(0 <= axis <= len(shape))
new_shape = list(shape[0:axis]) + [n] + list(shape[axis:])
return ArrayAnalysis.AnalyzeResult(shape=tuple(new_shape), pre=asserts)
def _analyze_op_call_numpy_vstack(self, scope, equiv_set, loc, args, kws):
assert len(args) == 1
seq, op = find_build_sequence(self.func_ir, args[0])
n = len(seq)
require(n > 0)
typ = self.typemap[seq[0].name]
require(isinstance(typ, types.ArrayCompatible))
if typ.ndim < 2:
return self._analyze_op_call_numpy_stack(
scope, equiv_set, loc, args, kws
)
else:
kws["axis"] = 0
return self._analyze_op_call_numpy_concatenate(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numpy_hstack(self, scope, equiv_set, loc, args, kws):
assert len(args) == 1
seq, op = find_build_sequence(self.func_ir, args[0])
n = len(seq)
require(n > 0)
typ = self.typemap[seq[0].name]
require(isinstance(typ, types.ArrayCompatible))
if typ.ndim < 2:
kws["axis"] = 0
else:
kws["axis"] = 1
return self._analyze_op_call_numpy_concatenate(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numpy_dstack(self, scope, equiv_set, loc, args, kws):
assert len(args) == 1
seq, op = find_build_sequence(self.func_ir, args[0])
n = len(seq)
require(n > 0)
typ = self.typemap[seq[0].name]
require(isinstance(typ, types.ArrayCompatible))
if typ.ndim == 1:
kws["axis"] = 1
result = self._analyze_op_call_numpy_stack(
scope, equiv_set, loc, args, kws
)
require(result)
result.kwargs['shape'] = tuple([1] + list(result.kwargs['shape']))
return result
elif typ.ndim == 2:
kws["axis"] = 2
return self._analyze_op_call_numpy_stack(
scope, equiv_set, loc, args, kws
)
else:
kws["axis"] = 2
return self._analyze_op_call_numpy_concatenate(
scope, equiv_set, loc, args, kws
)
def _analyze_op_call_numpy_cumsum(self, scope, equiv_set, loc, args, kws):
# TODO
return None
def _analyze_op_call_numpy_cumprod(self, scope, equiv_set, loc, args, kws):
# TODO
return None
def _analyze_op_call_numpy_linspace(
self, scope, equiv_set, loc, args, kws
):
n = len(args)
num = 50
if n > 2:
num = args[2]
elif "num" in kws:
num = kws["num"]
return ArrayAnalysis.AnalyzeResult(shape=(num,))
def _analyze_op_call_numpy_dot(self, scope, equiv_set, loc, args, kws):
n = len(args)
assert n >= 2
loc = args[0].loc
require(all([self._isarray(x.name) for x in args]))
typs = [self.typemap[x.name] for x in args]
dims = [ty.ndim for ty in typs]
require(all(x > 0 for x in dims))
if dims[0] == 1 and dims[1] == 1:
return None
shapes = [equiv_set._get_shape(x) for x in args]
if dims[0] == 1:
asserts = self._call_assert_equiv(
scope, loc, equiv_set, [shapes[0][0], shapes[1][-2]]
)
return ArrayAnalysis.AnalyzeResult(
shape=tuple(shapes[1][0:-2] + shapes[1][-1:]),
pre=asserts
)
if dims[1] == 1:
asserts = self._call_assert_equiv(
scope, loc, equiv_set, [shapes[0][-1], shapes[1][0]]
)
return ArrayAnalysis.AnalyzeResult(
shape=tuple(shapes[0][0:-1]),
pre=asserts
)
if dims[0] == 2 and dims[1] == 2:
asserts = self._call_assert_equiv(
scope, loc, equiv_set, [shapes[0][1], shapes[1][0]]
)
return ArrayAnalysis.AnalyzeResult(
shape=(shapes[0][0], shapes[1][1]),
pre=asserts
)
if dims[0] > 2: # TODO: handle higher dimension cases
pass
return None
def _analyze_stencil(self, scope, equiv_set, stencil_func, loc, args, kws):
# stencil requires that all relatively indexed array arguments are
# of same size
std_idx_arrs = stencil_func.options.get("standard_indexing", ())
kernel_arg_names = stencil_func.kernel_ir.arg_names
if isinstance(std_idx_arrs, str):
std_idx_arrs = (std_idx_arrs,)
rel_idx_arrs = []
assert len(args) > 0 and len(args) == len(kernel_arg_names)
for arg, var in zip(kernel_arg_names, args):
typ = self.typemap[var.name]
if isinstance(typ, types.ArrayCompatible) and not (
arg in std_idx_arrs
):
rel_idx_arrs.append(var)
n = len(rel_idx_arrs)
require(n > 0)
asserts = self._call_assert_equiv(scope, loc, equiv_set, rel_idx_arrs)
shape = equiv_set.get_shape(rel_idx_arrs[0])
return ArrayAnalysis.AnalyzeResult(shape=shape, pre=asserts)
def _analyze_op_call_numpy_linalg_inv(
self, scope, equiv_set, loc, args, kws
):
require(len(args) >= 1)
return ArrayAnalysis.AnalyzeResult(shape=equiv_set._get_shape(args[0]))
def _analyze_broadcast(self, scope, equiv_set, loc, args, fn):
"""Infer shape equivalence of arguments based on Numpy broadcast rules
and return shape of output
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
"""
tups = list(filter(lambda a: self._istuple(a.name), args))
# Here we have a tuple concatenation.
if len(tups) == 2 and fn.__name__ == 'add':
# If either of the tuples is empty then the resulting shape
# is just the other tuple.
tup0typ = self.typemap[tups[0].name]
tup1typ = self.typemap[tups[1].name]
if tup0typ.count == 0:
return ArrayAnalysis.AnalyzeResult(
shape=equiv_set.get_shape(tups[1])
)
if tup1typ.count == 0:
return ArrayAnalysis.AnalyzeResult(
shape=equiv_set.get_shape(tups[0])
)
try:
shapes = [equiv_set.get_shape(x) for x in tups]
if None in shapes:
return None
concat_shapes = sum(shapes, ())
return ArrayAnalysis.AnalyzeResult(
shape=concat_shapes
)
except GuardException:
return None
# else arrays
arrs = list(filter(lambda a: self._isarray(a.name), args))
require(len(arrs) > 0)
names = [x.name for x in arrs]
dims = [self.typemap[x.name].ndim for x in arrs]
max_dim = max(dims)
require(max_dim > 0)
try:
shapes = [equiv_set.get_shape(x) for x in arrs]
except GuardException:
return ArrayAnalysis.AnalyzeResult(
shape=arrs[0],
pre=self._call_assert_equiv(scope, loc, equiv_set, arrs)
)
pre = []
if None in shapes:
# There is at least 1 shape that we don't know,
# so we need to generate that shape now.
new_shapes = []
for i, s in enumerate(shapes):
if s is None:
var = arrs[i]
typ = self.typemap[var.name]
shape = self._gen_shape_call(
equiv_set, var, typ.ndim, None, pre
)
new_shapes.append(shape)
else:
new_shapes.append(s)
shapes = new_shapes
result = self._broadcast_assert_shapes(
scope, equiv_set, loc, shapes, names
)
if pre:
# If we had to generate a shape we have to insert
# that code before the broadcast assertion.
if 'pre' in result.kwargs:
prev_pre = result.kwargs['pre']
else:
prev_pre = []
result.kwargs['pre'] = pre + prev_pre
return result
def _broadcast_assert_shapes(self, scope, equiv_set, loc, shapes, names):
"""Produce assert_equiv for sizes in each dimension, taking into
account of dimension coercion and constant size of 1.
"""
asserts = []
new_shape = []
max_dim = max([len(shape) for shape in shapes])
const_size_one = None
for i in range(max_dim):
sizes = []
size_names = []
for name, shape in zip(names, shapes):
if i < len(shape):
size = shape[len(shape) - 1 - i]
const_size = equiv_set.get_equiv_const(size)
if const_size == 1:
const_size_one = size
else:
sizes.append(size) # non-1 size to front
size_names.append(name)
if sizes == []:
assert const_size_one is not None
sizes.append(const_size_one)
size_names.append("1")
asserts.append(
self._call_assert_equiv(
scope, loc, equiv_set, sizes, names=size_names
)
)
new_shape.append(sizes[0])
return ArrayAnalysis.AnalyzeResult(
shape=tuple(reversed(new_shape)),
pre=sum(asserts, [])
)
def _call_assert_equiv(self, scope, loc, equiv_set, args, names=None):
insts = self._make_assert_equiv(
scope, loc, equiv_set, args, names=names
)
if len(args) > 1:
equiv_set.insert_equiv(*args)
return insts
def _make_assert_equiv(self, scope, loc, equiv_set, _args, names=None):
# filter out those that are already equivalent
if config.DEBUG_ARRAY_OPT >= 2:
print("make_assert_equiv:", _args, names)
if names is None:
names = [x.name for x in _args]
args = []
arg_names = []
for name, x in zip(names, _args):
if config.DEBUG_ARRAY_OPT >= 2:
print("name, x:", name, x)
seen = False
for y in args:
if config.DEBUG_ARRAY_OPT >= 2:
print("is equiv to?", y, equiv_set.is_equiv(x, y))
if equiv_set.is_equiv(x, y):
seen = True
break
if not seen:
args.append(x)
arg_names.append(name)
# no assertion necessary if there are less than two
if len(args) < 2:
if config.DEBUG_ARRAY_OPT >= 2:
print(
"Will not insert assert_equiv as args are known to be "
"equivalent."
)
return []
msg = "Sizes of {} do not match on {}".format(
", ".join(arg_names), loc
)
msg_val = ir.Const(msg, loc)
msg_typ = types.StringLiteral(msg)
msg_var = ir.Var(scope, mk_unique_var("msg"), loc)
self.typemap[msg_var.name] = msg_typ
argtyps = tuple([msg_typ] + [self.typemap[x.name] for x in args])
# assert_equiv takes vararg, which requires a tuple as argument type
tup_typ = types.StarArgTuple.from_types(argtyps)
# prepare function variable whose type may vary since it takes vararg
assert_var = ir.Var(scope, mk_unique_var("assert"), loc)
assert_def = ir.Global("assert_equiv", assert_equiv, loc=loc)
fnty = get_global_func_typ(assert_equiv)
sig = self.context.resolve_function_type(fnty, (tup_typ,), {})
self._define(equiv_set, assert_var, fnty, assert_def)
# The return value from assert_equiv is always of none type.
var = ir.Var(scope, mk_unique_var("ret"), loc)
value = ir.Expr.call(assert_var, [msg_var] + args, {}, loc=loc)
self._define(equiv_set, var, types.none, value)
self.calltypes[value] = sig
return [
ir.Assign(value=msg_val, target=msg_var, loc=loc),
ir.Assign(value=assert_def, target=assert_var, loc=loc),
ir.Assign(value=value, target=var, loc=loc),
]
def _gen_shape_call(self, equiv_set, var, ndims, shape, post):
# attr call: A_sh_attr = getattr(A, shape)
if isinstance(shape, ir.Var):
shape = equiv_set.get_shape(shape)
# already a tuple variable that contains size
if isinstance(shape, ir.Var):
attr_var = shape
shape_attr_call = None
shape = None
elif isinstance(shape, ir.Arg):
attr_var = var
shape_attr_call = None
shape = None
else:
shape_attr_call = ir.Expr.getattr(var, "shape", var.loc)
attr_var = ir.Var(
var.scope, mk_unique_var("{}_shape".format(var.name)), var.loc
)
shape_attr_typ = types.containers.UniTuple(types.intp, ndims)
size_vars = []
use_attr_var = False
# trim shape tuple if it is more than ndim
if shape:
nshapes = len(shape)
if ndims < nshapes:
shape = shape[(nshapes - ndims) :]
for i in range(ndims):
skip = False
if shape and shape[i]:
if isinstance(shape[i], ir.Var):
typ = self.typemap[shape[i].name]
if isinstance(typ, (types.Number, types.SliceType)):
size_var = shape[i]
skip = True
else:
if isinstance(shape[i], int):
size_val = ir.Const(shape[i], var.loc)
else:
size_val = shape[i]
assert isinstance(size_val, ir.Const)
size_var = ir.Var(
var.scope,
mk_unique_var("{}_size{}".format(var.name, i)),
var.loc,
)
post.append(ir.Assign(size_val, size_var, var.loc))
self._define(equiv_set, size_var, types.intp, size_val)
skip = True
if not skip:
# get size: Asize0 = A_sh_attr[0]
size_var = ir.Var(
var.scope,
mk_unique_var("{}_size{}".format(var.name, i)),
var.loc,
)
getitem = ir.Expr.static_getitem(attr_var, i, None, var.loc)
use_attr_var = True
self.calltypes[getitem] = None
post.append(ir.Assign(getitem, size_var, var.loc))
self._define(equiv_set, size_var, types.intp, getitem)
size_vars.append(size_var)
if use_attr_var and shape_attr_call:
# only insert shape call if there is any getitem call
post.insert(0, ir.Assign(shape_attr_call, attr_var, var.loc))
self._define(equiv_set, attr_var, shape_attr_typ, shape_attr_call)
return tuple(size_vars)
def _isarray(self, varname):
typ = self.typemap[varname]
return isinstance(typ, types.npytypes.Array) and typ.ndim > 0
def _istuple(self, varname):
typ = self.typemap[varname]
return isinstance(typ, types.BaseTuple)
def _sum_size(self, equiv_set, sizes):
"""Return the sum of the given list of sizes if they are all equivalent
to some constant, or None otherwise.
"""
s = 0
for size in sizes:
n = equiv_set.get_equiv_const(size)
if n is None:
return None
else:
s += n
return s
UNARY_MAP_OP = list(npydecl.NumpyRulesUnaryArrayOperator._op_map.keys()) + [
operator.pos
]
BINARY_MAP_OP = npydecl.NumpyRulesArrayOperator._op_map.keys()
INPLACE_BINARY_MAP_OP = npydecl.NumpyRulesInplaceArrayOperator._op_map.keys()
UFUNC_MAP_OP = [f.__name__ for f in npydecl.supported_ufuncs]
| ArrayAnalysis |
python | prabhupant__python-ds | data_structures/trie/trie.py | {
"start": 112,
"end": 2020
} | class ____():
def __init__(self):
self.root = TrieNode()
self.word_list = []
def formTrie(self, keys):
for key in keys:
self.insert(key)
def insert(self, key):
node = self.root
for a in list(key):
if not node.children.get(a):
node.children[a] = TrieNode()
node = node.children[a]
node.last = True
def search(self, key):
node = self.root
found = True
for a in list(key):
if not node.children.get(a):
found = False
break
node = node.children[a]
return node and node.last and found
def suggestionsRec(self, node, word):
if node.last:
self.word_list.append(word)
for a,n in node.children.items():
self.suggestionsRec(n, word + a)
def printAutoSuggestions(self, key):
node = self.root
not_found = False
temp_word = ''
for a in list(key):
if not node.children.get(a):
not_found = True
break
temp_word += a
node = node.children[a]
if not_found:
return 0
elif node.last and not node.children:
return -1
self.suggestionsRec(node, temp_word)
for s in self.word_list:
print(s)
return 1
keys = ["hello", "dog", "hell", "cat", "a",
"hel", "help", "helps", "helping"]
key = "hel"
status = ["Not found", "Found"]
t = Trie()
t.formTrie(keys)
comp = t.printAutoSuggestions(key)
if comp == -1:
print("No other strings found with this prefix\n")
elif comp == 0:
print("No string found with this prefix\n")
| Trie |
python | encode__httpx | httpx/_auth.py | {
"start": 11744,
"end": 11891
} | class ____(typing.NamedTuple):
realm: bytes
nonce: bytes
algorithm: str
opaque: bytes | None
qop: bytes | None
| _DigestAuthChallenge |
python | ethereum__web3.py | tests/core/middleware/test_formatting_middleware.py | {
"start": 385,
"end": 7029
} | class ____(BaseProvider):
def make_request(self, method, params):
raise NotImplementedError(f"Cannot make request for {method}:{params}")
@pytest.fixture
def w3():
return Web3(provider=DummyProvider(), middleware=[])
def test_formatting_middleware(w3, request_mocker):
# No formatters by default
expected = "done"
with request_mocker(w3, mock_results={"test_endpoint": "done"}):
actual = w3.manager.request_blocking(RPCEndpoint("test_endpoint"), [])
assert actual == expected
def test_formatting_middleware_no_method(w3):
w3.middleware_onion.add(FormattingMiddlewareBuilder.build())
# Formatting middleware requires an endpoint
with pytest.raises(NotImplementedError):
w3.manager.request_blocking("test_endpoint", [])
def test_formatting_middleware_request_formatters(w3, request_mocker):
callable_mock = Mock()
w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
request_formatters={"test_endpoint": callable_mock}
)
)
expected = "done"
with request_mocker(w3, mock_results={"test_endpoint": "done"}):
actual = w3.manager.request_blocking("test_endpoint", ["param1"])
callable_mock.assert_called_once_with(["param1"])
assert actual == expected
def test_formatting_middleware_result_formatters(w3, request_mocker):
w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: f"STATUS: {x}"}
)
)
expected = "STATUS: done"
with request_mocker(w3, mock_results={"test_endpoint": "done"}):
actual = w3.manager.request_blocking("test_endpoint", [])
assert actual == expected
def test_formatting_middleware_result_formatters_for_none(w3, request_mocker):
w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: hex(x)}
)
)
expected = None
with request_mocker(w3, mock_results={"test_endpoint": expected}):
actual = w3.manager.request_blocking("test_endpoint", [])
assert actual == expected
def test_formatting_middleware_error_formatters(w3, request_mocker):
w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: f"STATUS: {x}"}
)
)
expected = "error"
with request_mocker(w3, mock_errors={"test_endpoint": {"message": "error"}}):
with pytest.raises(Web3RPCError) as err:
w3.manager.request_blocking("test_endpoint", [])
assert str(err.value) == expected
def test_formatting_middleware_raises_for_non_dict_responses(w3, request_mocker):
w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: x}
)
)
w3.provider.make_request = lambda *_: "a string"
with pytest.raises(
BadResponseFormat,
match=r"Malformed response: expected a valid JSON-RPC response object, "
r"got: `a string`",
):
_ = w3.eth.chain_id
# -- async -- #
@pytest_asyncio.fixture
async def async_w3():
return AsyncWeb3(provider=AsyncBaseProvider(), middleware=[])
@pytest.mark.asyncio
async def test_async_formatting_middleware(async_w3, request_mocker):
# No formatters by default
expected = "done"
async with request_mocker(async_w3, mock_results={"test_endpoint": "done"}):
actual = await async_w3.manager.coro_request(RPCEndpoint("test_endpoint"), [])
assert actual == expected
@pytest.mark.asyncio
async def test_async_formatting_middleware_no_method(async_w3):
async_w3.middleware_onion.add(FormattingMiddlewareBuilder.build())
# Formatting middleware requires an endpoint
with pytest.raises(NotImplementedError):
await async_w3.manager.coro_request("test_endpoint", [])
@pytest.mark.asyncio
async def test_async_formatting_middleware_request_formatters(async_w3, request_mocker):
callable_mock = Mock()
async_w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
request_formatters={"test_endpoint": callable_mock}
)
)
expected = "done"
async with request_mocker(async_w3, mock_results={"test_endpoint": "done"}):
actual = await async_w3.manager.coro_request("test_endpoint", ["param1"])
callable_mock.assert_called_once_with(["param1"])
assert actual == expected
@pytest.mark.asyncio
async def test_async_formatting_middleware_result_formatters(async_w3, request_mocker):
async_w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: f"STATUS: {x}"}
)
)
expected = "STATUS: done"
async with request_mocker(async_w3, mock_results={"test_endpoint": "done"}):
actual = await async_w3.manager.coro_request("test_endpoint", [])
assert actual == expected
@pytest.mark.asyncio
async def test_async_formatting_middleware_result_formatters_for_none(
async_w3, request_mocker
):
async_w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: hex(x)}
)
)
expected = None
async with request_mocker(async_w3, mock_results={"test_endpoint": expected}):
actual = await async_w3.manager.coro_request("test_endpoint", [])
assert actual == expected
@pytest.mark.asyncio
async def test_async_formatting_middleware_error_formatters(async_w3, request_mocker):
async_w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: f"STATUS: {x}"}
)
)
expected = "error"
async with request_mocker(
async_w3, mock_errors={"test_endpoint": {"message": "error"}}
):
with pytest.raises(Web3RPCError) as err:
await async_w3.manager.coro_request("test_endpoint", [])
assert str(err.value) == expected
@pytest.mark.asyncio
async def test_async_formatting_middleware_raises_for_non_dict_responses(async_w3):
async_w3.middleware_onion.add(
FormattingMiddlewareBuilder.build(
result_formatters={"test_endpoint": lambda x: x}
)
)
async def _make_request(*_):
return "a string"
async_w3.provider.make_request = _make_request
with pytest.raises(
BadResponseFormat,
match=r"Malformed response: expected a valid JSON-RPC response object, "
r"got: `a string`",
):
_ = await async_w3.eth.chain_id
| DummyProvider |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-intercom/components.py | {
"start": 845,
"end": 6150
} | class ____:
"""
Define timings for RateLimits. Adjust timings if needed.
:: on_unknown_load = 1.0 sec - Intercom recommended time to hold between each API call.
:: on_low_load = 0.01 sec (10 miliseconds) - ideal ratio between hold time and api call, also the standard hold time between each API call.
:: on_mid_load = 1.5 sec - great timing to retrieve another 15% of request capacity while having mid_load.
:: on_high_load = 8.0 sec - ideally we should wait 5.0 sec while having high_load, but we hold 8 sec to retrieve up to 80% of request capacity.
"""
threshold: float = 0.1
on_unknown_load: float = 1.0
on_low_load: float = 0.01
on_mid_load: float = 1.5
on_high_load: float = 8.0 # max time
@staticmethod
def backoff_time(backoff_time: float):
return sleep(backoff_time)
@staticmethod
def _define_values_from_headers(
current_rate_header_value: Optional[float],
total_rate_header_value: Optional[float],
threshold: float = threshold,
) -> tuple[float, Union[float, str]]:
# define current load and cutoff from rate_limits
if current_rate_header_value and total_rate_header_value:
cutoff: float = (total_rate_header_value / 2) / total_rate_header_value
load: float = current_rate_header_value / total_rate_header_value
else:
# to guarantee cutoff value to be exactly 1 sec, based on threshold, if headers are not available
cutoff: float = threshold * (1 / threshold)
load = None
return cutoff, load
@staticmethod
def _convert_load_to_backoff_time(
cutoff: float,
load: Optional[float] = None,
threshold: float = threshold,
) -> float:
# define backoff_time based on load conditions
if not load:
backoff_time = IntercomRateLimiter.on_unknown_load
elif load <= threshold:
backoff_time = IntercomRateLimiter.on_high_load
elif load <= cutoff:
backoff_time = IntercomRateLimiter.on_mid_load
elif load > cutoff:
backoff_time = IntercomRateLimiter.on_low_load
return backoff_time
@staticmethod
def get_backoff_time(
*args,
threshold: float = threshold,
rate_limit_header: str = "X-RateLimit-Limit",
rate_limit_remain_header: str = "X-RateLimit-Remaining",
):
"""
To avoid reaching Intercom API Rate Limits, use the 'X-RateLimit-Limit','X-RateLimit-Remaining' header values,
to determine the current rate limits and load and handle backoff_time based on load %.
Recomended backoff_time between each request is 1 sec, we would handle this dynamicaly.
:: threshold - is the % cutoff for the rate_limits % load, if this cutoff is crossed,
the connector waits `sleep_on_high_load` amount of time, default value = 0.1 (10% left from max capacity)
:: backoff_time - time between each request = 200 miliseconds
:: rate_limit_header - responce header item, contains information with max rate_limits available (max)
:: rate_limit_remain_header - responce header item, contains information with how many requests are still available (current)
Header example:
{
X-RateLimit-Limit: 100
X-RateLimit-Remaining: 51
X-RateLimit-Reset: 1487332510
},
where: 51 - requests remains and goes down, 100 - max requests capacity.
More information: https://developers.intercom.com/intercom-api-reference/reference/rate-limiting
"""
# find the requests.Response inside args list
for arg in args:
if isinstance(arg, requests.models.Response):
headers = arg.headers or {}
# Get the rate_limits from response
total_rate = int(headers.get(rate_limit_header, 0)) if headers else None
current_rate = int(headers.get(rate_limit_remain_header, 0)) if headers else None
cutoff, load = IntercomRateLimiter._define_values_from_headers(
current_rate_header_value=current_rate,
total_rate_header_value=total_rate,
threshold=threshold,
)
backoff_time = IntercomRateLimiter._convert_load_to_backoff_time(cutoff=cutoff, load=load, threshold=threshold)
return backoff_time
@staticmethod
def balance_rate_limit(
threshold: float = threshold,
rate_limit_header: str = "X-RateLimit-Limit",
rate_limit_remain_header: str = "X-RateLimit-Remaining",
):
"""
The decorator function.
Adjust `threshold`,`rate_limit_header`,`rate_limit_remain_header` if needed.
"""
def decorator(func):
@wraps(func)
def wrapper_balance_rate_limit(*args, **kwargs):
IntercomRateLimiter.backoff_time(
IntercomRateLimiter.get_backoff_time(
*args, threshold=threshold, rate_limit_header=rate_limit_header, rate_limit_remain_header=rate_limit_remain_header
)
)
return func(*args, **kwargs)
return wrapper_balance_rate_limit
return decorator
| IntercomRateLimiter |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 33247,
"end": 37649
} | class ____:
def _create_arrays(self):
a = np.arange(5)
nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
return a, nd_a
def _check_inverse_of_slicing(self, indices):
a, nd_a = self._create_arrays()
a_del = delete(a, indices)
nd_a_del = delete(nd_a, indices, axis=1)
msg = f'Delete failed for obj: {indices!r}'
assert_array_equal(setxor1d(a_del, a[indices, ]), a,
err_msg=msg)
xor = setxor1d(nd_a_del[0, :, 0], nd_a[0, indices, 0])
assert_array_equal(xor, nd_a[0, :, 0], err_msg=msg)
def test_slices(self):
lims = [-6, -2, 0, 1, 2, 4, 5]
steps = [-3, -1, 1, 3]
for start in lims:
for stop in lims:
for step in steps:
s = slice(start, stop, step)
self._check_inverse_of_slicing(s)
def test_fancy(self):
a, _ = self._create_arrays()
self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
with pytest.raises(IndexError):
delete(a, [100])
with pytest.raises(IndexError):
delete(a, [-100])
self._check_inverse_of_slicing([0, -1, 2, 2])
self._check_inverse_of_slicing([True, False, False, True, False])
# not legal, indexing with these would change the dimension
with pytest.raises(ValueError):
delete(a, True)
with pytest.raises(ValueError):
delete(a, False)
# not enough items
with pytest.raises(ValueError):
delete(a, [False] * 4)
def test_single(self):
self._check_inverse_of_slicing(0)
self._check_inverse_of_slicing(-4)
def test_0d(self):
a = np.array(1)
with pytest.raises(AxisError):
delete(a, [], axis=0)
with pytest.raises(TypeError):
delete(a, [], axis="nonsense")
def test_subclass(self):
class SubClass(np.ndarray):
pass
a_orig, _ = self._create_arrays()
a = a_orig.view(SubClass)
assert_(isinstance(delete(a, 0), SubClass))
assert_(isinstance(delete(a, []), SubClass))
assert_(isinstance(delete(a, [0, 1]), SubClass))
assert_(isinstance(delete(a, slice(1, 2)), SubClass))
assert_(isinstance(delete(a, slice(1, -2)), SubClass))
def test_array_order_preserve(self):
# See gh-7113
k = np.arange(10).reshape(2, 5, order='F')
m = delete(k, slice(60, None), axis=1)
# 'k' is Fortran ordered, and 'm' should have the
# same ordering as 'k' and NOT become C ordered
assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
def test_index_floats(self):
with pytest.raises(IndexError):
np.delete([0, 1, 2], np.array([1.0, 2.0]))
with pytest.raises(IndexError):
np.delete([0, 1, 2], np.array([], dtype=float))
@pytest.mark.parametrize("indexer", [np.array([1]), [1]])
def test_single_item_array(self, indexer):
a, nd_a = self._create_arrays()
a_del_int = delete(a, 1)
a_del = delete(a, indexer)
assert_equal(a_del_int, a_del)
nd_a_del_int = delete(nd_a, 1, axis=1)
nd_a_del = delete(nd_a, np.array([1]), axis=1)
assert_equal(nd_a_del_int, nd_a_del)
def test_single_item_array_non_int(self):
# Special handling for integer arrays must not affect non-integer ones.
# If `False` was cast to `0` it would delete the element:
res = delete(np.ones(1), np.array([False]))
assert_array_equal(res, np.ones(1))
# Test the more complicated (with axis) case from gh-21840
x = np.ones((3, 1))
false_mask = np.array([False], dtype=bool)
true_mask = np.array([True], dtype=bool)
res = delete(x, false_mask, axis=-1)
assert_array_equal(res, x)
res = delete(x, true_mask, axis=-1)
assert_array_equal(res, x[:, :0])
# Object or e.g. timedeltas should *not* be allowed
with pytest.raises(IndexError):
delete(np.ones(2), np.array([0], dtype=object))
with pytest.raises(IndexError):
# timedeltas are sometimes "integral, but clearly not allowed:
delete(np.ones(2), np.array([0], dtype="m8[ns]"))
| TestDelete |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 71737,
"end": 71910
} | class ____:
xlCompactRow = 0 # from enum XlLayoutRowType
xlOutlineRow = 2 # from enum XlLayoutRowType
xlTabularRow = 1 # from enum XlLayoutRowType
| LayoutRowType |
python | sympy__sympy | sympy/polys/domains/mpelements.py | {
"start": 567,
"end": 861
} | class ____(_mpf, DomainElement):
"""An element of a real domain. """
__slots__ = ('__mpf__',)
def _set_mpf(self, val):
self.__mpf__ = val
_mpf_ = property(lambda self: self.__mpf__, _set_mpf)
def parent(self):
return self.context._parent
@public
| RealElement |
python | django__django | tests/sitemaps_tests/models.py | {
"start": 63,
"end": 261
} | class ____(models.Model):
name = models.CharField(max_length=100)
lastmod = models.DateTimeField(null=True)
def get_absolute_url(self):
return "/testmodel/%s/" % self.id
| TestModel |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 2775,
"end": 6053
} | class ____(types.Type):
"""
A Numba type consisting of a value of some primitive type
and a validity boolean, over which we can define math ops
"""
def __init__(self, value):
# MaskedType in Numba shall be parameterized
# with a value type
if default_manager[value].has_nrt_meminfo():
ctx = _current_nrt_context.get(None)
if ctx is not None:
# we're in a compilation that is determining
# if NRT must be linked
ctx.use_nrt = True
self.value_type = _type_to_masked_type(value)
super().__init__(name=f"Masked({self.value_type})")
def __hash__(self):
"""
Needed so that numba caches type instances with different
`value_type` separately.
"""
return hash(repr(self))
def unify(self, context, other):
"""
Often within a UDF an instance arises where a variable could
be a `MaskedType`, an `NAType`, or a literal based off
the data at runtime, for example the variable `ret` here:
def f(x):
if x == 1:
ret = x
elif x > 2:
ret = 1
else:
ret = cudf.NA
return ret
When numba analyzes this function it will eventually figure
out that the variable `ret` could be any of the three types
from above. This scenario will only work if numba knows how
to find some kind of common type between the possibilities,
and this function implements that - the goal is to return a
common type when comparing `self` to other.
"""
# If we have Masked and NA, the output should be a
# MaskedType with the original type as its value_type
if isinstance(other, NAType):
return self
# two MaskedType unify to a new MaskedType whose value_type
# is the result of unifying `self` and `other` `value_type`
elif isinstance(other, MaskedType):
return MaskedType(
context.unify_pairs(self.value_type, other.value_type)
)
# if we have MaskedType and something that results in a
# scalar, unify between the MaskedType's value_type
# and that other thing
unified = context.unify_pairs(self.value_type, other)
if unified is None:
# The value types don't unify, so there is no unified masked type
return None
return MaskedType(unified)
def __eq__(self, other):
# Equality is required for determining whether a cast is required
# between two different types.
if not isinstance(other, MaskedType):
# Require a cast when the other type is not masked
return False
# Require a cast for another masked with a different value type
return self.value_type == other.value_type
# For typing a Masked constant value defined outside a kernel (e.g. captured in
# a closure).
@typeof_impl.register(api.Masked)
def typeof_masked(val, c):
return MaskedType(typeof(val.value))
# Implemented typing for Masked(value, valid) - the construction of a Masked
# type in a kernel.
@cuda_decl_registry.register
| MaskedType |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 39589,
"end": 42305
} | class ____:
"""
The Tensor metadata relevant to hashing FakeTensors when caching.
"""
dtype: torch.dtype
shape: tuple[_MetadataIntLike, ...]
stride: tuple[_MetadataIntLike, ...]
device: torch.device
layout: torch.layout
memory_format: Optional[torch.memory_format]
storage_offset: _MetadataIntLike
storage_bytes: Optional[_MetadataIntLike]
requires_grad: bool
is_quantized: bool
is_conj: bool
is_neg: bool
is_inference: bool
is_sparse: bool # read: is sparse COO
is_coalesced: Optional[bool]
dense_dim: Optional[int]
sparse_dim: Optional[int]
def _flatten_into(
self,
result: list[object],
mode: FakeTensorMode,
state: _CacheKeyState,
) -> None:
# Flatten the TensorMetadata out into `result`. Make sure to call
# state.convert_sym_int() on any SymInts.
for field in dataclasses.fields(self):
value = getattr(self, field.name)
if isinstance(value, (tuple, list, torch.Size)):
# This will recursively flatten the iterable, calling
# convert_sym_int() as necessary.
id_hashed_objects: list[object] = []
mode._prep_args_for_hash(result, value, state, id_hashed_objects)
id_hashed_objects.clear()
elif isinstance(value, SymInt):
state.convert_sym_int(result, value)
else:
result.append(value)
def extract_tensor_metadata(t: Tensor) -> TensorMetadata:
"""
Extract the TensorMetadata of a tensor.
"""
memory_format = suggest_memory_format(t)
# Don't call is_contiguous() on a Tensor which has symbolic sizes or things
# will go badly (guards will be messed up?)
if (
t._has_symbolic_sizes_strides
or is_sparse_any(t)
or not t.is_contiguous(memory_format=memory_format)
):
memory_format = None # type: ignore[assignment]
storage_offset = t.storage_offset()
return TensorMetadata(
t.dtype,
t.shape,
t.stride() if t.layout == torch.strided else (),
t.device,
t.layout,
memory_format,
storage_offset,
# Only set storage_bytes for tensors that have storage (not sparse)
t.untyped_storage().nbytes() if not is_sparse_any(t) else None,
t.requires_grad,
t.is_quantized,
t.is_conj(),
t.is_neg(),
t.is_inference(),
t.is_sparse,
t.is_coalesced() if t.is_sparse else None,
t.dense_dim() if is_sparse_any(t) else None,
t.sparse_dim() if is_sparse_any(t) else None,
)
@dataclass(slots=True)
| TensorMetadata |
python | kamyu104__LeetCode-Solutions | Python/design-movie-rental-system.py | {
"start": 202,
"end": 1685
} | class ____(object):
def __init__(self, n, entries):
"""
:type n: int
:type entries: List[List[int]]
"""
self.__movie_to_ordered_price_shop = collections.defaultdict(SortedList)
self.__shop_movie_to_price = {}
self.__rented_ordered_price_shop_movie = SortedList()
for s, m, p in entries:
self.__movie_to_ordered_price_shop[m].add((p, s))
self.__shop_movie_to_price[s, m] = p
def search(self, movie):
"""
:type movie: int
:rtype: List[int]
"""
return [s for _, s in self.__movie_to_ordered_price_shop[movie][:5]]
def rent(self, shop, movie):
"""
:type shop: int
:type movie: int
:rtype: None
"""
price = self.__shop_movie_to_price[shop, movie]
self.__movie_to_ordered_price_shop[movie].remove((price, shop))
self.__rented_ordered_price_shop_movie.add((price, shop, movie))
def drop(self, shop, movie):
"""
:type shop: int
:type movie: int
:rtype: None
"""
price = self.__shop_movie_to_price[shop, movie]
self.__movie_to_ordered_price_shop[movie].add((price, shop))
self.__rented_ordered_price_shop_movie.remove((price, shop, movie))
def report(self):
"""
:rtype: List[List[int]]
"""
return [[s, m] for _, s, m in self.__rented_ordered_price_shop_movie[:5]]
| MovieRentingSystem |
python | getsentry__sentry | src/sentry/hybridcloud/outbox/base.py | {
"start": 18095,
"end": 19861
} | class ____(Protocol):
"""
Helps cover the interface of ReplicatedControlModel and User (which cannot subclass) that allows them
to use OutboxCategory.connect_control_model_updates.
"""
@classmethod
def handle_async_deletion(
cls,
identifier: int,
region_name: str,
shard_identifier: int,
payload: Mapping[str, Any] | None,
) -> None:
pass
def handle_async_replication(self, region_name: str, shard_identifier: int) -> None:
pass
@receiver(post_upgrade)
def run_outbox_replications_for_self_hosted(*args: Any, **kwds: Any) -> None:
from django.conf import settings
from sentry.hybridcloud.models.outbox import OutboxBase
from sentry.hybridcloud.tasks.backfill_outboxes import backfill_outboxes_for
if not settings.SENTRY_SELF_HOSTED:
return
logger.info("Executing outbox replication backfill")
while backfill_outboxes_for(
SiloMode.get_current_mode(), max_batch_rate=1000, force_synchronous=True
):
pass
for outbox_name in (name for names in settings.SENTRY_OUTBOX_MODELS.values() for name in names):
logger.info("Processing %ss...", outbox_name)
outbox_model: type[OutboxBase] = OutboxBase.from_outbox_name(outbox_name)
for shard_attrs in outbox_model.find_scheduled_shards():
next_outbox: OutboxBase | None = outbox_model.prepare_next_from_shard(shard_attrs)
if next_outbox is None:
continue
try:
next_outbox.drain_shard(flush_all=True)
except Exception:
capture_exception()
if in_test_environment():
raise
logger.info("done")
| HasControlReplicationHandlers |
python | pytorch__pytorch | torch/ao/nn/intrinsic/qat/modules/conv_fused.py | {
"start": 692,
"end": 15481
} | class ____(nn.modules.conv._ConvNd, nni._FusedModule):
_version = 2
_FLOAT_MODULE: ClassVar[type[nn.modules.conv._ConvNd]]
def __init__(
self,
# ConvNd args
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
# BatchNormNd args
# num_features: out_channels
eps=1e-05,
momentum=0.1,
# affine: True
# track_running_stats: True
# Args for this module
freeze_bn=False,
qconfig=None,
dim=2,
):
nn.modules.conv._ConvNd.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
False,
padding_mode,
)
assert qconfig, "qconfig must be provided for QAT module"
self.qconfig = qconfig
self.freeze_bn = freeze_bn if self.training else True
self.bn = _BN_CLASS_MAP[dim](out_channels, eps, momentum, True, True)
self.weight_fake_quant = self.qconfig.weight()
if bias:
self.bias = Parameter(torch.empty(out_channels))
else:
self.register_parameter("bias", None)
self.reset_bn_parameters()
# this needs to be called after reset_bn_parameters,
# as they modify the same state
if self.training:
if freeze_bn:
self.freeze_bn_stats()
else:
self.update_bn_stats()
else:
self.freeze_bn_stats()
self._enable_slow_path_for_better_numerical_stability = False
def reset_running_stats(self):
self.bn.reset_running_stats()
def reset_bn_parameters(self):
self.bn.reset_running_stats()
init.uniform_(self.bn.weight)
init.zeros_(self.bn.bias)
# note: below is actually for conv, not BN
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def update_bn_stats(self):
self.freeze_bn = False
self.bn.training = True
return self
def freeze_bn_stats(self):
self.freeze_bn = True
self.bn.training = False
return self
def _forward(self, input):
if self._enable_slow_path_for_better_numerical_stability:
return self._forward_slow(input)
return self._forward_approximate(input)
def _forward_approximate(self, input):
"""Approximated method to fuse conv and bn. It requires only one forward pass.
conv_orig = conv / scale_factor where scale_factor = bn.weight / running_std
"""
assert self.bn.running_var is not None
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
weight_shape = [1] * len(self.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.weight.shape)
bias_shape[1] = -1
scaled_weight = self.weight_fake_quant(
self.weight * scale_factor.reshape(weight_shape)
)
# using zero bias here since the bias for original conv
# will be added later
if self.bias is not None:
zero_bias = torch.zeros_like(self.bias, dtype=input.dtype)
else:
zero_bias = torch.zeros(
self.out_channels, device=scaled_weight.device, dtype=input.dtype
)
conv = self._conv_forward(input, scaled_weight, zero_bias)
conv_orig = conv / scale_factor.reshape(bias_shape)
if self.bias is not None:
conv_orig = conv_orig + self.bias.reshape(bias_shape)
conv = self.bn(conv_orig)
return conv
def _forward_slow(self, input):
"""
A more accurate but slow method to compute conv bn fusion, following https://arxiv.org/pdf/1806.08342.pdf
It requires two forward passes but handles the case bn.weight == 0
Conv: Y = WX + B_c
Conv without bias: Y0 = WX = Y - B_c, Y = Y0 + B_c
Batch statistics:
mean_Y = Y.mean()
= Y0.mean() + B_c
var_Y = (Y - mean_Y)^2.mean()
= (Y0 - Y0.mean())^2.mean()
BN (r: bn.weight, beta: bn.bias):
Z = r * (Y - mean_Y) / sqrt(var_Y + eps) + beta
= r * (Y0 - Y0.mean()) / sqrt(var_Y + eps) + beta
Fused Conv BN training (std_Y = sqrt(var_Y + eps)):
Z = (r * W / std_Y) * X + r * (B_c - mean_Y) / std_Y + beta
= (r * W / std_Y) * X - r * Y0.mean() / std_Y + beta
Fused Conv BN inference (running_std = sqrt(running_var + eps)):
Z = (r * W / running_std) * X - r * (running_mean - B_c) / running_std + beta
QAT with fused conv bn:
Z_train = fake_quant(r * W / running_std) * X * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
= conv(X, fake_quant(r * W / running_std)) * (running_std / std_Y) - r * Y0.mean() / std_Y + beta
Z_inference = conv(X, fake_quant(r * W / running_std)) - r * (running_mean - B_c) / running_std + beta
"""
assert self.bn.running_var is not None
assert self.bn.running_mean is not None
# using zero bias here since the bias for original conv
# will be added later
zero_bias = torch.zeros(
self.out_channels, device=self.weight.device, dtype=input.dtype
)
weight_shape = [1] * len(self.weight.shape)
weight_shape[0] = -1
bias_shape = [1] * len(self.weight.shape)
bias_shape[1] = -1
if self.bn.training:
# needed to compute batch mean/std
conv_out = self._conv_forward(input, self.weight, zero_bias)
# update bn statistics
with torch.no_grad():
conv_out_bias = (
conv_out
if self.bias is None
else conv_out + self.bias.reshape(bias_shape)
)
self.bn(conv_out_bias)
# fused conv + bn without bias using bn running statistics
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
scaled_weight = self.weight_fake_quant(
self.weight * scale_factor.reshape(weight_shape)
)
# fused conv without bias for inference: (r * W / running_std) * X
conv_bn = self._conv_forward(input, scaled_weight, zero_bias)
avg_dims = [0] + list(range(2, len(self.weight.shape)))
batch_mean = conv_out.mean(avg_dims)
batch_var = torch.square(conv_out - batch_mean.reshape(bias_shape)).mean(
avg_dims
)
batch_std = torch.sqrt(batch_var + self.bn.eps)
# scale to use batch std in training mode
# conv(X, r * W / std_Y) = conv(X, r * W / running_std) * (running_std / std_Y)
unscale_factor = running_std / batch_std
conv_bn *= unscale_factor.reshape(bias_shape)
fused_mean = batch_mean
fused_std = batch_std
else:
# fused conv + bn without bias using bn running statistics
running_std = torch.sqrt(self.bn.running_var + self.bn.eps)
scale_factor = self.bn.weight / running_std
scaled_weight = self.weight_fake_quant(
self.weight * scale_factor.reshape(weight_shape)
)
# fused conv without bias for inference: (r * W / running_std) * X
conv_bn = self._conv_forward(input, scaled_weight, zero_bias)
fused_mean = self.bn.running_mean - (
self.bias if self.bias is not None else 0
)
fused_std = running_std
# fused bias = beta - r * mean / std
fused_bias = self.bn.bias - self.bn.weight * fused_mean / fused_std
conv_bn += fused_bias.reshape(bias_shape)
# HACK to let conv bias participate in loss to avoid DDP error (parameters
# were not used in producing loss)
if self.bias is not None:
conv_bn += (self.bias - self.bias).reshape(bias_shape)
return conv_bn
def forward(self, input):
return self._forward(input)
def train(self, mode=True):
"""
Batchnorm's training behavior is using the self.training flag. Prevent
changing it if BN is frozen. This makes sure that calling `model.train()`
on a model with a frozen BN will behave properly.
"""
self.training = mode
if not self.freeze_bn:
for module in self.children():
module.train(mode)
return self
# ===== Serialization version history =====
#
# Version 1/None
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- gamma : Tensor
# |--- beta : Tensor
# |--- running_mean : Tensor
# |--- running_var : Tensor
# |--- num_batches_tracked : Tensor
#
# Version 2
# self
# |--- weight : Tensor
# |--- bias : Tensor
# |--- bn : Module
# |--- weight : Tensor (moved from v1.self.gamma)
# |--- bias : Tensor (moved from v1.self.beta)
# |--- running_mean : Tensor (moved from v1.self.running_mean)
# |--- running_var : Tensor (moved from v1.self.running_var)
# |--- num_batches_tracked : Tensor (moved from v1.self.num_batches_tracked)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version == 1:
# BN related parameters and buffers were moved into the BN module for v2
v2_to_v1_names = {
"bn.weight": "gamma",
"bn.bias": "beta",
"bn.running_mean": "running_mean",
"bn.running_var": "running_var",
"bn.num_batches_tracked": "num_batches_tracked",
}
for v2_name, v1_name in v2_to_v1_names.items():
if prefix + v1_name in state_dict:
state_dict[prefix + v2_name] = state_dict[prefix + v1_name]
state_dict.pop(prefix + v1_name)
elif prefix + v2_name in state_dict:
# there was a brief period where forward compatibility
# for this module was broken (between
# https://github.com/pytorch/pytorch/pull/38478
# and https://github.com/pytorch/pytorch/pull/38820)
# and modules emitted the v2 state_dict format while
# specifying that version == 1. This patches the forward
# compatibility issue by allowing the v2 style entries to
# be used.
pass
elif strict:
missing_keys.append(prefix + v2_name)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a qat module from a float module or qparams_dict
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
# The ignore is because _FLOAT_MODULE is a TypeVar here where the bound
# has no __name__ (code is fine though)
assert type(mod) is cls._FLOAT_MODULE, (
"qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
assert mod.qconfig, "Input float module must have a valid qconfig"
qconfig = mod.qconfig
conv, bn = mod[0], mod[1] # type: ignore[index]
qat_convbn = cls(
conv.in_channels,
conv.out_channels,
conv.kernel_size,
conv.stride,
conv.padding,
conv.dilation,
conv.groups,
conv.bias is not None,
conv.padding_mode,
bn.eps,
bn.momentum,
False,
qconfig,
)
qat_convbn.weight = conv.weight
qat_convbn.bias = conv.bias
qat_convbn.bn.weight = bn.weight
qat_convbn.bn.bias = bn.bias
qat_convbn.bn.running_mean = bn.running_mean
qat_convbn.bn.running_var = bn.running_var
# mypy error: Cannot determine type of 'num_batches_tracked'
qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked
return qat_convbn
def to_float(self):
cls = type(self)
conv = cls._FLOAT_CONV_MODULE( # type: ignore[attr-defined]
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.groups,
self.bias is not None,
self.padding_mode,
)
conv.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
conv.bias = torch.nn.Parameter(self.bias.detach())
if cls._FLOAT_BN_MODULE: # type: ignore[attr-defined]
# fuse bn into conv
assert self.bn.running_var is not None and self.bn.running_mean is not None
conv.weight, conv.bias = fuse_conv_bn_weights(
conv.weight,
conv.bias,
self.bn.running_mean,
self.bn.running_var,
self.bn.eps,
self.bn.weight,
self.bn.bias,
)
if cls._FLOAT_RELU_MODULE: # type: ignore[attr-defined]
modules = []
modules.append(conv)
relu = cls._FLOAT_RELU_MODULE() # type: ignore[attr-defined]
modules.append(relu)
conv_relu = cls._FUSED_FLOAT_MODULE(*modules) # type: ignore[attr-defined]
conv_relu.train(self.training)
return conv_relu
else:
conv.train(self.training)
return conv
| _ConvBnNd |
python | kubernetes-client__python | kubernetes/base/config/dateutil.py | {
"start": 628,
"end": 2745
} | class ____(datetime.tzinfo):
def __init__(self, h, m):
self._name = "UTC"
if h != 0 and m != 0:
self._name += "%+03d:%2d" % (h, m)
self._delta = datetime.timedelta(hours=h, minutes=math.copysign(m, h))
def utcoffset(self, dt):
return self._delta
def tzname(self, dt):
return self._name
def dst(self, dt):
return datetime.timedelta(0)
UTC = TimezoneInfo(0, 0)
# ref https://www.ietf.org/rfc/rfc3339.txt
_re_rfc3339 = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)" # full-date
r"[ Tt]" # Separator
r"(\d\d):(\d\d):(\d\d)([.,]\d+)?" # partial-time
r"([zZ ]|[-+]\d\d?:\d\d)?", # time-offset
re.VERBOSE + re.IGNORECASE)
_re_timezone = re.compile(r"([-+])(\d\d?):?(\d\d)?")
MICROSEC_PER_SEC = 1000000
def parse_rfc3339(s):
if isinstance(s, datetime.datetime):
# no need to parse it, just make sure it has a timezone.
if not s.tzinfo:
return s.replace(tzinfo=UTC)
return s
groups = _re_rfc3339.search(s).groups()
dt = [0] * 7
for x in range(6):
dt[x] = int(groups[x])
us = 0
if groups[6] is not None:
partial_sec = float(groups[6].replace(",", "."))
us = int(MICROSEC_PER_SEC * partial_sec)
tz = UTC
if groups[7] is not None and groups[7] != 'Z' and groups[7] != 'z':
tz_groups = _re_timezone.search(groups[7]).groups()
hour = int(tz_groups[1])
minute = 0
if tz_groups[0] == "-":
hour *= -1
if tz_groups[2]:
minute = int(tz_groups[2])
tz = TimezoneInfo(hour, minute)
return datetime.datetime(
year=dt[0], month=dt[1], day=dt[2],
hour=dt[3], minute=dt[4], second=dt[5],
microsecond=us, tzinfo=tz)
def format_rfc3339(date_time):
if date_time.tzinfo is None:
date_time = date_time.replace(tzinfo=UTC)
date_time = date_time.astimezone(UTC)
return date_time.strftime('%Y-%m-%dT%H:%M:%SZ')
| TimezoneInfo |
python | getsentry__sentry | src/sentry/audit_log/events.py | {
"start": 5479,
"end": 6477
} | class ____(AuditLogEvent):
def __init__(self) -> None:
super().__init__(event_id=51, name="PROJECTKEY_EDIT", api_name="projectkey.edit")
def render(self, audit_log_entry: AuditLogEntry) -> str:
items_strings = []
if "prev_rate_limit_count" in audit_log_entry.data:
items_strings.append(
" rate limit count from {prev_rate_limit_count} to {rate_limit_count}".format(
**audit_log_entry.data
)
)
if "prev_rate_limit_window" in audit_log_entry.data:
items_strings.append(
" rate limit window from {prev_rate_limit_window} to {rate_limit_window}".format(
**audit_log_entry.data
)
)
item_string = ""
if items_strings:
item_string = ":" + ",".join(items_strings)
return "edited project key {public_key}".format(**audit_log_entry.data) + item_string
| ProjectKeyEditAuditLogEvent |
python | tiangolo__fastapi | fastapi/security/http.py | {
"start": 557,
"end": 968
} | class ____(BaseModel):
"""
The HTTP Basic credentials given as the result of using `HTTPBasic` in a
dependency.
Read more about it in the
[FastAPI docs for HTTP Basic Auth](https://fastapi.tiangolo.com/advanced/security/http-basic-auth/).
"""
username: Annotated[str, Doc("The HTTP Basic username.")]
password: Annotated[str, Doc("The HTTP Basic password.")]
| HTTPBasicCredentials |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/request_builders/api.py | {
"start": 1398,
"end": 1587
} | class ____(AbstractRequestBuilder):
URL = "https://api.hubapi.com/crm/v3/schemas"
def build(self) -> HttpRequest:
return HttpRequest(url=self.URL)
| CustomObjectsRequestBuilder |
python | getsentry__sentry | src/sentry/relay/config/__init__.py | {
"start": 14343,
"end": 14559
} | class ____(TypedDict):
op: Literal["http"]
"""Top scope to match on. Subscopes match all top scopes; for example, the
scope `http` matches `http.client` and `http.server` operations."""
| SpanDescriptionScope |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/eventloop/async_generator.py | {
"start": 1562,
"end": 3933
} | class ____:
pass
async def generator_to_async_generator(
get_iterable: Callable[[], Iterable[_T]],
buffer_size: int = DEFAULT_BUFFER_SIZE,
) -> AsyncGenerator[_T, None]:
"""
Turn a generator or iterable into an async generator.
This works by running the generator in a background thread.
:param get_iterable: Function that returns a generator or iterable when
called.
:param buffer_size: Size of the queue between the async consumer and the
synchronous generator that produces items.
"""
quitting = False
# NOTE: We are limiting the queue size in order to have back-pressure.
q: Queue[_T | _Done] = Queue(maxsize=buffer_size)
loop = get_running_loop()
def runner() -> None:
"""
Consume the generator in background thread.
When items are received, they'll be pushed to the queue.
"""
try:
for item in get_iterable():
# When this async generator was cancelled (closed), stop this
# thread.
if quitting:
return
while True:
try:
q.put(item, timeout=1)
except Full:
if quitting:
return
continue
else:
break
finally:
while True:
try:
q.put(_Done(), timeout=1)
except Full:
if quitting:
return
continue
else:
break
# Start background thread.
runner_f = run_in_executor_with_context(runner)
try:
while True:
try:
item = q.get_nowait()
except Empty:
item = await loop.run_in_executor(None, q.get)
if isinstance(item, _Done):
break
else:
yield item
finally:
# When this async generator is closed (GeneratorExit exception, stop
# the background thread as well. - we don't need that anymore.)
quitting = True
# Wait for the background thread to finish. (should happen right after
# the last item is yielded).
await runner_f
| _Done |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_types.py | {
"start": 10774,
"end": 12200
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
def test_time_microseconds(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME()
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27 12:00:00.000125")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
def test_truncate_microseconds(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
dt_out = datetime.datetime(2008, 6, 27, 12, 0, 0)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME(truncate_microseconds=True)
bp = sldt.bind_processor(None)
eq_(bp(dt), "2008-06-27 12:00:00")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt_out)
def test_custom_format_compact(self):
dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125)
eq_(str(dt), "2008-06-27 12:00:00.000125")
sldt = sqlite.DATETIME(
storage_format=(
"%(year)04d%(month)02d%(day)02d"
"%(hour)02d%(minute)02d%(second)02d%(microsecond)06d"
),
regexp=r"(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})",
)
bp = sldt.bind_processor(None)
eq_(bp(dt), "20080627120000000125")
rp = sldt.result_processor(None, None)
eq_(rp(bp(dt)), dt)
| DateTimeTest |
python | sqlalchemy__sqlalchemy | test/orm/test_cascade.py | {
"start": 5784,
"end": 7763
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", Integer, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
def test_delete_skips_lazy_raise(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, delete-orphan", lazy="raise"
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.metadata.create_all(testing.db)
sess = fixture_session()
u1 = User(
name="u1",
addresses=[
Address(email_address="e1"),
Address(email_address="e2"),
],
)
sess.add(u1)
sess.commit()
eq_(
sess.scalars(
select(Address).order_by(Address.email_address)
).all(),
[Address(email_address="e1"), Address(email_address="e2")],
)
sess.close()
sess.delete(u1)
sess.commit()
eq_(sess.scalars(select(Address)).all(), [])
| CasadeWithRaiseloadTest |
python | scrapy__scrapy | scrapy/cmdline.py | {
"start": 762,
"end": 7647
} | class ____(argparse.ArgumentParser):
def _parse_optional(
self, arg_string: str
) -> tuple[argparse.Action | None, str, str | None] | None:
# Support something like ‘-o -:json’, where ‘-:json’ is a value for
# ‘-o’, not another parameter.
if arg_string.startswith("-:"):
return None
return super()._parse_optional(arg_string)
def _iter_command_classes(module_name: str) -> Iterable[type[ScrapyCommand]]:
# TODO: add `name` attribute to commands and merge this function with
# scrapy.utils.spider.iter_spider_classes
for module in walk_modules(module_name):
for obj in vars(module).values():
if (
inspect.isclass(obj)
and issubclass(obj, ScrapyCommand)
and obj.__module__ == module.__name__
and obj not in (ScrapyCommand, BaseRunSpiderCommand)
):
yield obj
def _get_commands_from_module(module: str, inproject: bool) -> dict[str, ScrapyCommand]:
d: dict[str, ScrapyCommand] = {}
for cmd in _iter_command_classes(module):
if inproject or not cmd.requires_project:
cmdname = cmd.__module__.split(".")[-1]
d[cmdname] = cmd()
return d
def _get_commands_from_entry_points(
inproject: bool, group: str = "scrapy.commands"
) -> dict[str, ScrapyCommand]:
cmds: dict[str, ScrapyCommand] = {}
for entry_point in entry_points(group=group):
obj = entry_point.load()
if inspect.isclass(obj):
cmds[entry_point.name] = obj()
else:
raise ValueError(f"Invalid entry point {entry_point.name}")
return cmds
def _get_commands_dict(
settings: BaseSettings, inproject: bool
) -> dict[str, ScrapyCommand]:
cmds = _get_commands_from_module("scrapy.commands", inproject)
cmds.update(_get_commands_from_entry_points(inproject))
cmds_module = settings["COMMANDS_MODULE"]
if cmds_module:
cmds.update(_get_commands_from_module(cmds_module, inproject))
return cmds
def _get_project_only_cmds(settings: BaseSettings) -> set[str]:
return set(_get_commands_dict(settings, inproject=True)) - set(
_get_commands_dict(settings, inproject=False)
)
def _pop_command_name(argv: list[str]) -> str | None:
for i in range(1, len(argv)):
if not argv[i].startswith("-"):
return argv.pop(i)
return None
def _print_header(settings: BaseSettings, inproject: bool) -> None:
version = scrapy.__version__
if inproject:
print(f"Scrapy {version} - active project: {settings['BOT_NAME']}\n")
else:
print(f"Scrapy {version} - no active project\n")
def _print_commands(settings: BaseSettings, inproject: bool) -> None:
_print_header(settings, inproject)
print("Usage:")
print(" scrapy <command> [options] [args]\n")
print("Available commands:")
cmds = _get_commands_dict(settings, inproject)
for cmdname, cmdclass in sorted(cmds.items()):
print(f" {cmdname:<13} {cmdclass.short_desc()}")
if not inproject:
print()
print(" [ more ] More commands available when run from project directory")
print()
print('Use "scrapy <command> -h" to see more info about a command')
def _print_unknown_command_msg(
settings: BaseSettings, cmdname: str, inproject: bool
) -> None:
proj_only_cmds = _get_project_only_cmds(settings)
if cmdname in proj_only_cmds and not inproject:
cmd_list = ", ".join(sorted(proj_only_cmds))
print(
f"The {cmdname} command is not available from this location.\n"
f"These commands are only available from within a project: {cmd_list}.\n"
)
else:
print(f"Unknown command: {cmdname}\n")
def _print_unknown_command(
settings: BaseSettings, cmdname: str, inproject: bool
) -> None:
_print_header(settings, inproject)
_print_unknown_command_msg(settings, cmdname, inproject)
print('Use "scrapy" to see available commands')
def _run_print_help(
parser: argparse.ArgumentParser,
func: Callable[_P, None],
*a: _P.args,
**kw: _P.kwargs,
) -> None:
try:
func(*a, **kw)
except UsageError as e:
if str(e):
parser.error(str(e))
if e.print_help:
parser.print_help()
sys.exit(2)
def execute(argv: list[str] | None = None, settings: Settings | None = None) -> None:
if argv is None:
argv = sys.argv
if settings is None:
settings = get_project_settings()
# set EDITOR from environment if available
try:
editor = os.environ["EDITOR"]
except KeyError:
pass
else:
settings["EDITOR"] = editor
inproject = inside_project()
cmds = _get_commands_dict(settings, inproject)
cmdname = _pop_command_name(argv)
if not cmdname:
_print_commands(settings, inproject)
sys.exit(0)
elif cmdname not in cmds:
_print_unknown_command(settings, cmdname, inproject)
sys.exit(2)
cmd = cmds[cmdname]
parser = ScrapyArgumentParser(
formatter_class=ScrapyHelpFormatter,
usage=f"scrapy {cmdname} {cmd.syntax()}",
conflict_handler="resolve",
description=cmd.long_desc(),
)
settings.setdict(cmd.default_settings, priority="command")
cmd.settings = settings
cmd.add_options(parser)
opts, args = parser.parse_known_args(args=argv[1:])
_run_print_help(parser, cmd.process_options, args, opts)
if cmd.requires_crawler_process:
if settings[
"TWISTED_REACTOR"
] == _asyncio_reactor_path and not settings.getbool("FORCE_CRAWLER_PROCESS"):
cmd.crawler_process = AsyncCrawlerProcess(settings)
else:
cmd.crawler_process = CrawlerProcess(settings)
_run_print_help(parser, _run_command, cmd, args, opts)
sys.exit(cmd.exitcode)
def _run_command(cmd: ScrapyCommand, args: list[str], opts: argparse.Namespace) -> None:
if opts.profile:
_run_command_profiled(cmd, args, opts)
else:
cmd.run(args, opts)
def _run_command_profiled(
cmd: ScrapyCommand, args: list[str], opts: argparse.Namespace
) -> None:
if opts.profile:
sys.stderr.write(f"scrapy: writing cProfile stats to {opts.profile!r}\n")
loc = locals()
p = cProfile.Profile()
p.runctx("cmd.run(args, opts)", globals(), loc)
if opts.profile:
p.dump_stats(opts.profile)
if __name__ == "__main__":
try:
execute()
finally:
# Twisted prints errors in DebugInfo.__del__, but PyPy does not run gc.collect() on exit:
# http://doc.pypy.org/en/latest/cpython_differences.html
# ?highlight=gc.collect#differences-related-to-garbage-collection-strategies
garbage_collect()
| ScrapyArgumentParser |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/partial5.py | {
"start": 199,
"end": 721
} | class ____:
def __init__(self, x: int, y: int) -> None: ...
# This should generate an error because "y" has the wrong type.
v1 = partial(A, x=1, y="a")
v2 = partial(A, x=1, y=2)
reveal_type(v2, expected_text="partial[A]")
v2()
v2(x=2)
T = TypeVar("T", bound=A)
def func1(x: type[T]):
# This should generate an error because "z" is not a valid parameter.
v1 = partial(x, x=1, z="a")
v2 = partial(x, y=1)
# This should generate an error because it's missing "x".
v2()
v2(x=1)
@dataclass
| A |
python | walkccc__LeetCode | solutions/3042. Count Prefix and Suffix Pairs I/3042.py | {
"start": 0,
"end": 115
} | class ____:
def __init__(self):
self.children: dict[tuple[str, str], TrieNode] = {}
self.count = 0
| TrieNode |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/vector/vectorize_observation.py | {
"start": 13747,
"end": 14634
} | class ____(VectorizeTransformObservation):
"""Resizes image observations using OpenCV to shape.
Example:
>>> import gymnasium as gym
>>> envs = gym.make_vec("CarRacing-v3", num_envs=3, vectorization_mode="sync")
>>> obs, info = envs.reset(seed=123)
>>> obs.shape
(3, 96, 96, 3)
>>> envs = ResizeObservation(envs, shape=(28, 28))
>>> obs, info = envs.reset(seed=123)
>>> obs.shape
(3, 28, 28, 3)
>>> envs.close()
"""
def __init__(self, env: VectorEnv, shape: tuple[int, ...]):
"""Constructor that requires an image environment observation space with a shape.
Args:
env: The vector environment to wrap
shape: The resized observation shape
"""
super().__init__(env, transform_observation.ResizeObservation, shape=shape)
| ResizeObservation |
python | django__django | tests/inspectdb/models.py | {
"start": 5478,
"end": 5936
} | class ____(models.Model):
fk_do_nothing = models.ForeignKey(UniqueTogether, on_delete=models.DO_NOTHING)
fk_db_cascade = models.ForeignKey(ColumnTypes, on_delete=models.DB_CASCADE)
fk_set_null = models.ForeignKey(
DigitsInColumnName, on_delete=models.DB_SET_NULL, null=True
)
class Meta:
required_db_features = {
"supports_on_delete_db_cascade",
"supports_on_delete_db_null",
}
| DbOnDeleteModel |
python | apache__airflow | providers/celery/tests/unit/celery/cli/test_celery_command.py | {
"start": 16946,
"end": 21959
} | class ____:
@classmethod
def setup_class(cls):
with conf_vars({("core", "executor"): "CeleryExecutor"}):
importlib.reload(executor_loader)
importlib.reload(cli_parser)
cls.parser = cli_parser.get_parser()
@pytest.mark.db_test
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.inspect")
def test_list_celery_workers(self, mock_inspect):
args = self.parser.parse_args(["celery", "list-workers", "--output", "json"])
mock_instance = MagicMock()
mock_instance.active_queues.return_value = {
"celery@host_1": [{"name": "queue1"}, {"name": "queue2"}],
"celery@host_2": [{"name": "queue3"}],
}
mock_inspect.return_value = mock_instance
with contextlib.redirect_stdout(StringIO()) as temp_stdout:
celery_command.list_workers(args)
out = temp_stdout.getvalue()
celery_workers = json.loads(out)
for key in ["worker_name", "queues"]:
assert key in celery_workers[0]
assert any("celery@host_1" in h["worker_name"] for h in celery_workers)
@pytest.mark.db_test
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.shutdown")
def test_shutdown_worker(self, mock_shutdown):
args = self.parser.parse_args(["celery", "shutdown-worker", "-H", "celery@host_1"])
with patch(
"airflow.providers.celery.cli.celery_command._check_if_active_celery_worker", return_value=None
):
celery_command.shutdown_worker(args)
mock_shutdown.assert_called_once_with(destination=["celery@host_1"])
@pytest.mark.db_test
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.broadcast")
def test_shutdown_all_workers(self, mock_broadcast):
args = self.parser.parse_args(["celery", "shutdown-all-workers", "-y"])
with patch(
"airflow.providers.celery.cli.celery_command._check_if_active_celery_worker", return_value=None
):
celery_command.shutdown_all_workers(args)
mock_broadcast.assert_called_once_with("shutdown")
@pytest.mark.db_test
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.add_consumer")
def test_add_queue(self, mock_add_consumer):
args = self.parser.parse_args(["celery", "add-queue", "-q", "test1", "-H", "celery@host_1"])
with patch(
"airflow.providers.celery.cli.celery_command._check_if_active_celery_worker", return_value=None
):
celery_command.add_queue(args)
mock_add_consumer.assert_called_once_with("test1", destination=["celery@host_1"])
@pytest.mark.db_test
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.cancel_consumer")
def test_remove_queue(self, mock_cancel_consumer):
args = self.parser.parse_args(["celery", "remove-queue", "-q", "test1", "-H", "celery@host_1"])
with patch(
"airflow.providers.celery.cli.celery_command._check_if_active_celery_worker", return_value=None
):
celery_command.remove_queue(args)
mock_cancel_consumer.assert_called_once_with("test1", destination=["celery@host_1"])
@pytest.mark.db_test
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.cancel_consumer")
@mock.patch("airflow.providers.celery.executors.celery_executor.app.control.inspect")
def test_remove_all_queues(self, mock_inspect, mock_cancel_consumer):
args = self.parser.parse_args(["celery", "remove-all-queues", "-H", "celery@host_1"])
mock_instance = MagicMock()
mock_instance.active_queues.return_value = {
"celery@host_1": [{"name": "queue1"}, {"name": "queue2"}],
"celery@host_2": [{"name": "queue3"}],
}
mock_inspect.return_value = mock_instance
with patch(
"airflow.providers.celery.cli.celery_command._check_if_active_celery_worker", return_value=None
):
celery_command.remove_all_queues(args)
# Verify cancel_consumer was called for each queue
expected_calls = [
mock.call("queue1", destination=["celery@host_1"]),
mock.call("queue2", destination=["celery@host_1"]),
]
mock_cancel_consumer.assert_has_calls(expected_calls, any_order=True)
assert mock_cancel_consumer.call_count == 2
@patch("airflow.providers.celery.cli.celery_command.Process")
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Doesn't apply to pre-3.0")
def test_stale_bundle_cleanup(mock_process):
mock_process.__bool__.return_value = True
with _run_stale_bundle_cleanup():
...
calls = mock_process.call_args_list
assert len(calls) == 1
actual = [x.kwargs["target"] for x in calls]
assert actual[0].__name__ == "bundle_cleanup_main"
| TestRemoteCeleryControlCommands |
python | pytorch__pytorch | test/fx/quantization.py | {
"start": 3263,
"end": 6103
} | class ____(MinMaxObserver):
def __init__(self, quantizer, node):
super().__init__(quantizer, node)
self.relu_node, self.bn_node = None, None
if isinstance(quantizer.modules[node.target], torch.nn.ReLU):
self.relu_node = node
node = node.args[0]
if isinstance(quantizer.modules[node.target], torch.nn.BatchNorm2d):
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
node = node.args[0]
assert isinstance(quantizer.modules[node.target], torch.nn.modules.Conv2d)
self.conv_node = node
self.conv = quantizer.modules[self.conv_node.target]
def quantize(self, quantizer, node, load_arg):
mod = self.conv
weight, bias = mod.weight, mod.bias
if self.bn_node is not None:
weight, bias = fuse_conv_bn_weights(
weight,
bias,
self.bn.running_mean,
self.bn.running_var,
self.bn.eps,
self.bn.weight,
self.bn.bias,
)
min_val, max_val = float(weight.min()), float(weight.max())
act_scale, act_zp = self.scale_zeropoint()
weight_scale, weight_zp = _minmax_scale_zeropoint(min_val, max_val)
qweight = torch.quantize_per_tensor(
weight, weight_scale, weight_zp, torch.qint8
)
ctor = (
torch.ao.nn.intrinsic.quantized.ConvReLU2d
if self.relu_node is not None
else torch.ao.nn.quantized.Conv2d
)
qconv = ctor(
mod.in_channels,
mod.out_channels,
mod.kernel_size,
mod.stride,
mod.padding,
mod.dilation,
mod.groups,
mod.bias is not None,
mod.padding_mode,
)
qconv.set_weight_bias(qweight, bias)
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
parent_name, name = _parent_name(self.conv_node.target)
setattr(quantizer.modules[parent_name], name, qconv)
if self.bn_node is not None:
_, bn_name = _parent_name(self.bn_node.target)
# we can't just delete this because submodules's forwards (which are not longer use)
# try to call it, so replace with something that does nothing.
setattr(quantizer.modules[parent_name], bn_name, IdentityModule())
return quantizer.quantized_graph.create_node(
"call_module",
self.conv_node.target,
(load_arg(self.conv_node.args[0]),),
{},
)
# turn foo.bar -> ['foo', 'bar']
def _parent_name(target):
r = target.rsplit(".", 1)
if len(r) == 1:
return "", r[0]
else:
return r[0], r[1]
| ConvNormRelu |
python | kamyu104__LeetCode-Solutions | Python/data-stream-as-disjoint-intervals.py | {
"start": 201,
"end": 1353
} | class ____(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__intervals = []
def addNum(self, val):
"""
:type val: int
:rtype: void
"""
def upper_bound(nums, target):
left, right = 0, len(nums) - 1
while left <= right:
mid = left + (right - left) / 2
if nums[mid].start > target:
right = mid - 1
else:
left = mid + 1
return left
i = upper_bound(self.__intervals, val)
start, end = val, val
if i != 0 and self.__intervals[i-1].end + 1 >= val:
i -= 1
while i != len(self.__intervals) and \
end + 1 >= self.__intervals[i].start:
start = min(start, self.__intervals[i].start)
end = max(end, self.__intervals[i].end)
del self.__intervals[i]
self.__intervals.insert(i, Interval(start, end))
def getIntervals(self):
"""
:rtype: List[Interval]
"""
return self.__intervals
| SummaryRanges |
python | django__django | tests/template_tests/syntax_tests/test_comment.py | {
"start": 68,
"end": 3685
} | class ____(SimpleTestCase):
@setup({"comment-syntax01": "{# this is hidden #}hello"})
def test_comment_syntax01(self):
output = self.engine.render_to_string("comment-syntax01")
self.assertEqual(output, "hello")
@setup({"comment-syntax02": "{# this is hidden #}hello{# foo #}"})
def test_comment_syntax02(self):
output = self.engine.render_to_string("comment-syntax02")
self.assertEqual(output, "hello")
@setup({"comment-syntax03": "foo{# {% if %} #}"})
def test_comment_syntax03(self):
output = self.engine.render_to_string("comment-syntax03")
self.assertEqual(output, "foo")
@setup({"comment-syntax04": "foo{# {% endblock %} #}"})
def test_comment_syntax04(self):
output = self.engine.render_to_string("comment-syntax04")
self.assertEqual(output, "foo")
@setup({"comment-syntax05": "foo{# {% somerandomtag %} #}"})
def test_comment_syntax05(self):
output = self.engine.render_to_string("comment-syntax05")
self.assertEqual(output, "foo")
@setup({"comment-syntax06": "foo{# {% #}"})
def test_comment_syntax06(self):
output = self.engine.render_to_string("comment-syntax06")
self.assertEqual(output, "foo")
@setup({"comment-syntax07": "foo{# %} #}"})
def test_comment_syntax07(self):
output = self.engine.render_to_string("comment-syntax07")
self.assertEqual(output, "foo")
@setup({"comment-syntax08": "foo{# %} #}bar"})
def test_comment_syntax08(self):
output = self.engine.render_to_string("comment-syntax08")
self.assertEqual(output, "foobar")
@setup({"comment-syntax09": "foo{# {{ #}"})
def test_comment_syntax09(self):
output = self.engine.render_to_string("comment-syntax09")
self.assertEqual(output, "foo")
@setup({"comment-syntax10": "foo{# }} #}"})
def test_comment_syntax10(self):
output = self.engine.render_to_string("comment-syntax10")
self.assertEqual(output, "foo")
@setup({"comment-syntax11": "foo{# { #}"})
def test_comment_syntax11(self):
output = self.engine.render_to_string("comment-syntax11")
self.assertEqual(output, "foo")
@setup({"comment-syntax12": "foo{# } #}"})
def test_comment_syntax12(self):
output = self.engine.render_to_string("comment-syntax12")
self.assertEqual(output, "foo")
@setup({"comment-tag01": "{% comment %}this is hidden{% endcomment %}hello"})
def test_comment_tag01(self):
output = self.engine.render_to_string("comment-tag01")
self.assertEqual(output, "hello")
@setup(
{
"comment-tag02": "{% comment %}this is hidden{% endcomment %}"
"hello{% comment %}foo{% endcomment %}"
}
)
def test_comment_tag02(self):
output = self.engine.render_to_string("comment-tag02")
self.assertEqual(output, "hello")
@setup({"comment-tag03": "foo{% comment %} {% if %} {% endcomment %}"})
def test_comment_tag03(self):
output = self.engine.render_to_string("comment-tag03")
self.assertEqual(output, "foo")
@setup({"comment-tag04": "foo{% comment %} {% endblock %} {% endcomment %}"})
def test_comment_tag04(self):
output = self.engine.render_to_string("comment-tag04")
self.assertEqual(output, "foo")
@setup({"comment-tag05": "foo{% comment %} {% somerandomtag %} {% endcomment %}"})
def test_comment_tag05(self):
output = self.engine.render_to_string("comment-tag05")
self.assertEqual(output, "foo")
| CommentSyntaxTests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.