language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 100198,
"end": 102417
} | class ____(IR):
"""Merge sorted operation."""
__slots__ = ("key",)
_non_child = ("schema", "key")
key: str
"""Key that is sorted."""
def __init__(self, schema: Schema, key: str, left: IR, right: IR):
# Children must be Sort or Repartition(Sort).
# The Repartition(Sort) case happens during fallback.
left_sort_child = left if isinstance(left, Sort) else left.children[0]
right_sort_child = right if isinstance(right, Sort) else right.children[0]
assert isinstance(left_sort_child, Sort)
assert isinstance(right_sort_child, Sort)
assert left_sort_child.order == right_sort_child.order
assert len(left.schema.keys()) <= len(right.schema.keys())
self.schema = schema
self.key = key
self.children = (left, right)
self._non_child_args = (key,)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="MergeSorted")
def do_evaluate(
cls, key: str, *dfs: DataFrame, context: IRExecutionContext
) -> DataFrame:
"""Evaluate and return a dataframe."""
stream = get_joined_cuda_stream(
context.get_cuda_stream, upstreams=[df.stream for df in dfs]
)
left, right = dfs
right = right.discard_columns(right.column_names_set - left.column_names_set)
on_col_left = left.select_columns({key})[0]
on_col_right = right.select_columns({key})[0]
result = DataFrame.from_table(
plc.merge.merge(
[right.table, left.table],
[left.column_names.index(key), right.column_names.index(key)],
[on_col_left.order, on_col_right.order],
[on_col_left.null_order, on_col_right.null_order],
stream=stream,
),
left.column_names,
left.dtypes,
stream=stream,
)
# Join the original streams back into the result stream to ensure that the
# deallocations (on the original streams) happen after the result is ready
join_cuda_streams(
downstreams=[df.stream for df in dfs], upstreams=(result.stream,)
)
return result
| MergeSorted |
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 20344,
"end": 20498
} | class ____(serializers.ModelSerializer):
class Meta:
model = DjangoFilterOrderingModel
fields = '__all__'
| DjangoFilterOrderingSerializer |
python | apache__airflow | helm-tests/tests/helm_tests/security/test_rbac_pod_log_reader.py | {
"start": 914,
"end": 5149
} | class ____:
"""Tests RBAC Pod Reader."""
@pytest.mark.parametrize(
("webserver", "airflow_version", "expected"),
[
(True, "2.9.0", ["release-name-airflow-webserver"]),
(False, "2.9.0", []),
(True, "3.0.0", ["release-name-airflow-api-server"]),
(False, "3.0.0", ["release-name-airflow-api-server"]),
],
)
def test_pod_log_reader_rolebinding(self, webserver, airflow_version, expected):
docs = render_chart(
values={
"webserver": {"allowPodLogReading": webserver},
"apiServer": {"allowPodLogReading": airflow_version >= "3.0.0"},
"airflowVersion": airflow_version,
},
show_only=["templates/rbac/pod-log-reader-rolebinding.yaml"],
)
actual = jmespath.search("subjects[*].name", docs[0]) if docs else []
assert actual == expected
@pytest.mark.parametrize(
("webserver", "expected"),
[
(True, "release-name-pod-log-reader-role"),
(False, None),
],
)
def test_pod_log_reader_role(self, webserver, expected):
docs = render_chart(
values={
"webserver": {"allowPodLogReading": webserver},
},
show_only=["templates/rbac/pod-log-reader-role.yaml"],
)
actual = jmespath.search("metadata.name", docs[0]) if docs else None
assert actual == expected
@pytest.mark.parametrize(
("multiNamespaceMode", "namespace", "expectedRole", "expectedRoleBinding"),
[
(
True,
"namespace",
"namespace-release-name-pod-log-reader-role",
"namespace-release-name-pod-log-reader-rolebinding",
),
(
True,
"other-ns",
"other-ns-release-name-pod-log-reader-role",
"other-ns-release-name-pod-log-reader-rolebinding",
),
(
False,
"namespace",
"release-name-pod-log-reader-role",
"release-name-pod-log-reader-rolebinding",
),
],
)
def test_pod_log_reader_rolebinding_multi_namespace(
self, multiNamespaceMode, namespace, expectedRole, expectedRoleBinding
):
docs = render_chart(
namespace=namespace,
values={"webserver": {"allowPodLogReading": True}, "multiNamespaceMode": multiNamespaceMode},
show_only=["templates/rbac/pod-log-reader-rolebinding.yaml"],
)
actualRoleBinding = jmespath.search("metadata.name", docs[0])
assert actualRoleBinding == expectedRoleBinding
actualRoleRef = jmespath.search("roleRef.name", docs[0])
assert actualRoleRef == expectedRole
actualKind = jmespath.search("kind", docs[0])
actualRoleRefKind = jmespath.search("roleRef.kind", docs[0])
if multiNamespaceMode:
assert actualKind == "ClusterRoleBinding"
assert actualRoleRefKind == "ClusterRole"
else:
assert actualKind == "RoleBinding"
assert actualRoleRefKind == "Role"
@pytest.mark.parametrize(
("multiNamespaceMode", "namespace", "expectedRole"),
[
(True, "namespace", "namespace-release-name-pod-log-reader-role"),
(True, "other-ns", "other-ns-release-name-pod-log-reader-role"),
(False, "namespace", "release-name-pod-log-reader-role"),
],
)
def test_pod_log_reader_role_multi_namespace(self, multiNamespaceMode, namespace, expectedRole):
docs = render_chart(
namespace=namespace,
values={"webserver": {"allowPodLogReading": True}, "multiNamespaceMode": multiNamespaceMode},
show_only=["templates/rbac/pod-log-reader-role.yaml"],
)
actualRole = jmespath.search("metadata.name", docs[0])
assert actualRole == expectedRole
actualKind = jmespath.search("kind", docs[0])
if multiNamespaceMode:
assert actualKind == "ClusterRole"
else:
assert actualKind == "Role"
| TestPodReader |
python | PrefectHQ__prefect | tests/utilities/test_visualization.py | {
"start": 6484,
"end": 9817
} | class ____:
@pytest.mark.parametrize(
"test_flow",
[
simple_sync_flow,
simple_async_flow_with_async_tasks,
simple_async_flow_with_sync_tasks,
async_flow_with_subflow,
flow_with_task_interaction,
flow_with_mixed_tasks,
flow_with_untrackable_task_result,
flow_with_flow_params,
],
)
def test_visualize_does_not_raise(self, test_flow, monkeypatch):
monkeypatch.setattr(
"prefect.utilities.visualization.visualize_task_dependencies",
MagicMock(return_value=None),
)
test_flow.visualize()
@pytest.mark.parametrize(
"test_flow, expected_nodes",
[
(
simple_sync_flow,
{
'\t"sync_task_b-0"\n',
'\t"sync_task_a-0"\n',
'\t"sync_task_a-0" -> "sync_task_b-0"\n',
},
),
(
simple_async_flow_with_async_tasks,
{
'\t"async_task_a-0"\n',
'\t"async_task_b-0"\n',
'\t"async_task_a-0" -> "async_task_b-0"\n',
},
),
(
simple_async_flow_with_sync_tasks,
{
'\t"sync_task_a-0"\n',
'\t"sync_task_b-0"\n',
'\t"sync_task_a-0" -> "sync_task_b-0"\n',
},
),
(
async_flow_with_subflow,
{
'\t"sync_task_a-0" -> "sync_task_b-0"\n',
'\t"sync_task_b-0"\n',
'\t"simple-async-flow-with-sync-tasks-0"\n',
'\t"sync_task_a-0"\n',
},
),
(
flow_with_task_interaction,
{
'\t"sync_task_a-0"\n',
'\t"sync_task_b-0"\n',
},
),
(
flow_with_mixed_tasks,
{
'\t"sync_task_a-0"\n',
'\t"async_task_b-0"\n',
'\t"sync_task_a-1"\n',
'\t"sync_task_a-0" -> "async_task_b-0"\n',
},
),
(
flow_with_untrackable_task_result,
{
'\t"untrackable_task_result-0"\n',
'\t"sync_task_b-0"\n',
},
),
(
flow_with_flow_params,
{
'\t"sync_task_a-0"\n',
'\t"sync_task_b-0"\n',
},
),
],
)
async def test_visualize_graph_contents(
self, test_flow, expected_nodes, monkeypatch
):
mock_visualize = MagicMock(return_value=None)
monkeypatch.setattr(
"prefect.utilities.visualization.visualize_task_dependencies",
mock_visualize,
)
await test_flow.visualize()
graph = mock_visualize.call_args[0][0]
actual_nodes = set(graph.body)
assert actual_nodes == expected_nodes, (
f"Expected nodes {expected_nodes} but found {actual_nodes}"
)
| TestFlowVisualise |
python | getsentry__sentry | src/sentry/flags/providers.py | {
"start": 12487,
"end": 13047
} | class ____(serializers.Serializer):
eventName = serializers.CharField(required=True)
timestamp = serializers.CharField(required=True)
metadata = serializers.DictField(required=True)
user = serializers.DictField(required=False, child=serializers.CharField())
userID = serializers.CharField(required=False)
value = serializers.CharField(required=False)
statsigMetadata = serializers.DictField(required=False)
timeUUID = serializers.UUIDField(required=False)
unitID = serializers.CharField(required=False)
| StatsigEventSerializer |
python | readthedocs__readthedocs.org | readthedocs/storage/s3_storage.py | {
"start": 3181,
"end": 3685
} | class ____(S3StaticStorageMixin, OverrideHostnameMixin, S3Boto3Storage):
"""
Storage backend for static files used outside Django's static files.
This is the same as S3StaticStorage, but without inheriting from S3ManifestStaticStorage,
this way we can get the URL of any file in that bucket, even hashed ones.
"""
# Root path of the nginx internal redirect
# that will serve files from this storage.
internal_redirect_root_path = "proxito-static"
| NoManifestS3StaticStorage |
python | pandas-dev__pandas | pandas/tests/frame/constructors/test_from_dict.py | {
"start": 195,
"end": 7988
} | class ____:
# Note: these tests are specific to the from_dict method, not for
# passing dictionaries to DataFrame.__init__
def test_constructor_list_of_odicts(self):
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
result = DataFrame(data)
expected = DataFrame.from_dict(
dict(zip(range(len(data)), data)), orient="index"
)
tm.assert_frame_equal(result, expected.reindex(result.index))
def test_constructor_single_row(self):
data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
result.index
)
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(["x", "y"], data))
idx = Index(["a", "b", "c"])
# all named
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx, name="y"),
]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [
Series([1.5, 3, 4], idx, dtype="O", name="x"),
Series([1.5, 3, 6], idx),
]
result = DataFrame(data2)
sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
# none named
data = [
OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
OrderedDict([["a", 1.5], ["d", 6]]),
OrderedDict(),
OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6, dtype=np.int64))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series(dtype=object)])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(["a", "b", "c"])
data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient="index")
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self, float_string_frame):
data_dict = float_string_frame.T._series
recons = DataFrame.from_dict(data_dict, orient="index")
expected = float_string_frame.reindex(index=recons.index)
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {"hi": [32, 3, 3], "there": [3, 5, 3]}
rs = DataFrame.from_dict(a, orient="index")
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_constructor_from_ordered_dict(self):
# GH#8425
a = OrderedDict(
[
("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),
("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),
("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),
]
)
expected = DataFrame.from_dict(a, orient="columns").T
result = DataFrame.from_dict(a, orient="index")
tm.assert_frame_equal(result, expected)
def test_from_dict_columns_parameter(self):
# GH#18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(
OrderedDict([("A", [1, 2]), ("B", [4, 5])]),
orient="index",
columns=["one", "two"],
)
expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(
{"A": [1, 2], "B": [4, 5]},
orient="columns",
columns=["one", "two"],
)
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"])
@pytest.mark.parametrize(
"data_dict, orient, expected",
[
({}, "index", RangeIndex(0)),
(
[{("a",): 1}, {("a",): 2}],
"columns",
Index([("a",)], tupleize_cols=False),
),
(
[OrderedDict([(("a",), 1), (("b",), 2)])],
"columns",
Index([("a",), ("b",)], tupleize_cols=False),
),
([{("a", "b"): 1}], "columns", Index([("a", "b")], tupleize_cols=False)),
],
)
def test_constructor_from_dict_tuples(self, data_dict, orient, expected):
# GH#16769
df = DataFrame.from_dict(data_dict, orient)
result = df.columns
tm.assert_index_equal(result, expected)
def test_frame_dict_constructor_empty_series(self):
s1 = Series(
[1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)])
)
s2 = Series(
[1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)])
)
s3 = Series(dtype=object)
# it works!
DataFrame({"foo": s1, "bar": s2, "baz": s3})
DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2})
def test_from_dict_scalars_requires_index(self):
msg = "If using all scalar values, you must pass an index"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
def test_from_dict_orient_invalid(self):
msg = (
"Expected 'index', 'columns' or 'tight' for orient parameter. "
"Got 'abc' instead"
)
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict({"foo": 1, "baz": 3, "bar": 2}, orient="abc")
def test_from_dict_order_with_single_column(self):
data = {
"alpha": {
"value2": 123,
"value1": 532,
"animal": 222,
"plant": False,
"name": "test",
}
}
result = DataFrame.from_dict(
data,
orient="columns",
)
expected = DataFrame(
[[123], [532], [222], [False], ["test"]],
index=["value2", "value1", "animal", "plant", "name"],
columns=["alpha"],
)
tm.assert_frame_equal(result, expected)
| TestFromDict |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/external-systems/apis/minimal_resource.py | {
"start": 108,
"end": 581
} | class ____(dg.ConfigurableResource):
# highlight-end
@property
def query_string(self) -> str:
latittude = "37.615223"
longitude = "-122.389977"
time_zone = "America/Los_Angeles"
return f"https://api.sunrise-sunset.org/json?lat={latittude}&lng={longitude}&date=today&tzid={time_zone}"
def sunrise(self) -> str:
data = requests.get(self.query_string, timeout=5).json()
return data["results"]["sunrise"]
| SunResource |
python | huggingface__transformers | tests/models/prophetnet/test_modeling_prophetnet.py | {
"start": 17190,
"end": 26010
} | class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
hidden_size=16,
encoder_seq_length=7,
decoder_seq_length=7,
# For common tests
is_training=True,
is_decoder=True,
use_attention_mask=True,
add_cross_attention=False,
use_cache=False,
use_labels=True,
decoder_start_token_id=0,
encoder_ffn_dim=32,
num_encoder_layers=2,
num_encoder_attention_heads=4,
decoder_ffn_dim=32,
num_decoder_layers=2,
num_decoder_attention_heads=4,
max_position_embeddings=30,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
ngram=2,
num_buckets=32,
relative_max_distance=128,
disable_ngram_loss=False,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.num_attention_heads = num_decoder_attention_heads
self.num_encoder_attention_heads = num_encoder_attention_heads
self.num_decoder_attention_heads = num_decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.ngram = ngram
self.num_buckets = num_buckets
self.relative_max_distance = relative_max_distance
self.use_cache = use_cache
self.disable_ngram_loss = disable_ngram_loss
self.max_position_embeddings = max_position_embeddings
self.add_cross_attention = add_cross_attention
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.num_hidden_states_types = 2 # decoder_main, decoder_ngram
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
config = ProphetNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_encoder_layers=self.num_encoder_layers,
num_decoder_layers=self.num_decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_ffn_dim=self.encoder_ffn_dim,
num_encoder_attention_heads=self.num_encoder_attention_heads,
num_decoder_attention_heads=self.num_decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
ngram=self.ngram,
num_buckets=self.num_buckets,
relative_max_distance=self.relative_max_distance,
disable_ngram_loss=self.disable_ngram_loss,
max_position_embeddings=self.max_position_embeddings,
add_cross_attention=self.add_cross_attention,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = ProphetNetDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = ProphetNetDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values, use_cache=True)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
| ProphetNetStandaloneDecoderModelTester |
python | rapidsai__cudf | python/cudf/cudf/core/column/decimal.py | {
"start": 18809,
"end": 24752
} | class ____(DecimalBaseColumn):
_VALID_PLC_TYPES = {plc.TypeId.DECIMAL64}
def __init__(
self,
plc_column: plc.Column,
size: int,
dtype: Decimal64Dtype,
offset: int,
null_count: int,
exposed: bool,
) -> None:
if not isinstance(dtype, Decimal64Dtype):
raise ValueError(f"{dtype=} must be a Decimal64Dtype instance")
super().__init__(
plc_column=plc_column,
size=size,
dtype=dtype,
offset=offset,
null_count=null_count,
exposed=exposed,
)
@classmethod
def from_arrow(cls, data: pa.Array | pa.ChunkedArray) -> Self:
return cls._from_32_64_arrow(
data, view_type="int64", plc_type=plc.TypeId.DECIMAL64, step=2
)
def to_arrow(self) -> pa.Array:
data_buf_64 = np.array(self.base_data.memoryview()).view("int64") # type: ignore[union-attr]
data_buf_128: np.ndarray = np.empty(
len(data_buf_64) * 2, dtype="int64"
)
# use striding to set the first 64 bits of each 128-bit chunk:
data_buf_128[::2] = data_buf_64
# use striding again to set the remaining bits of each 128-bit chunk:
# 0 for non-negative values, -1 for negative values:
data_buf_128[1::2] = np.piecewise(
data_buf_64, [data_buf_64 < 0], [-1, 0]
)
data_buf = pa.py_buffer(data_buf_128)
mask_buf = (
self.base_mask
if self.base_mask is None
else pa.py_buffer(self.base_mask.memoryview())
)
return pa.Array.from_buffers(
type=self.dtype.to_arrow(), # type: ignore[union-attr]
offset=self._offset,
length=self.size,
# PyArrow stubs are too strict - from_buffers should accept None for missing buffers
buffers=[mask_buf, data_buf], # type: ignore[list-item]
)
def _with_type_metadata(self: Self, dtype: DtypeObj) -> Self:
if isinstance(dtype, Decimal64Dtype):
self.dtype.precision = dtype.precision # type: ignore[union-attr]
if cudf.get_option("mode.pandas_compatible"):
self._dtype = get_dtype_of_same_type(dtype, self.dtype)
return self
def _get_decimal_type(
lhs_dtype: DecimalDtype,
rhs_dtype: DecimalDtype,
op: str,
) -> DecimalDtype:
"""
Returns the resulting decimal type after calculating
precision & scale when performing the binary operation
`op` for the given dtypes.
For precision & scale calculations see : https://docs.microsoft.com/en-us/sql/t-sql/data-types/precision-scale-and-length-transact-sql
"""
# This should at some point be hooked up to libcudf's
# binary_operation_fixed_point_scale
# Note: libcudf decimal types don't have a concept of precision
p1, p2 = lhs_dtype.precision, rhs_dtype.precision
s1, s2 = lhs_dtype.scale, rhs_dtype.scale
if op in {"__add__", "__sub__"}:
scale = max(s1, s2)
precision = scale + max(p1 - s1, p2 - s2) + 1
if precision > Decimal128Dtype.MAX_PRECISION:
precision = Decimal128Dtype.MAX_PRECISION
scale = Decimal128Dtype.MAX_PRECISION - max(p1 - s1, p2 - s2)
elif op in {"__mul__", "__div__"}:
if op == "__mul__":
scale = s1 + s2
precision = p1 + p2 + 1
else:
scale = max(6, s1 + p2 + 1)
precision = p1 - s1 + s2 + scale
if precision > Decimal128Dtype.MAX_PRECISION:
integral = precision - scale
if integral < 32:
scale = min(scale, Decimal128Dtype.MAX_PRECISION - integral)
elif scale > 6 and integral > 32:
scale = 6
precision = Decimal128Dtype.MAX_PRECISION
else:
raise NotImplementedError()
try:
if isinstance(lhs_dtype, type(rhs_dtype)):
# SCENARIO 1: If `lhs_dtype` & `rhs_dtype` are same, then try to
# see if `precision` & `scale` can be fit into this type.
return lhs_dtype.__class__(precision=precision, scale=scale)
else:
# SCENARIO 2: If `lhs_dtype` & `rhs_dtype` are of different dtypes,
# then try to see if `precision` & `scale` can be fit into the type
# with greater MAX_PRECISION (i.e., the bigger dtype).
if lhs_dtype.MAX_PRECISION >= rhs_dtype.MAX_PRECISION:
return lhs_dtype.__class__(precision=precision, scale=scale)
else:
return rhs_dtype.__class__(precision=precision, scale=scale)
except ValueError:
# Call to _validate fails, which means we need
# to goto SCENARIO 3.
pass
# SCENARIO 3: If either of the above two scenarios fail, then get the
# MAX_PRECISION of `lhs_dtype` & `rhs_dtype` so that we can only check
# and return a dtype that is greater than or equal to input dtype that
# can fit `precision` & `scale`.
max_precision = max(lhs_dtype.MAX_PRECISION, rhs_dtype.MAX_PRECISION)
for decimal_type in (
Decimal32Dtype,
Decimal64Dtype,
Decimal128Dtype,
):
if decimal_type.MAX_PRECISION >= max_precision:
try:
return decimal_type(precision=precision, scale=scale)
except ValueError:
# Call to _validate fails, which means we need
# to try the next dtype
continue
# if we've reached this point, we cannot create a decimal type without
# overflow; raise an informative error
raise ValueError(
f"Performing {op} between columns of type {lhs_dtype!r} and "
f"{rhs_dtype!r} would result in overflow"
)
def _same_precision_and_scale(lhs: DecimalDtype, rhs: DecimalDtype) -> bool:
return lhs.precision == rhs.precision and lhs.scale == rhs.scale
| Decimal64Column |
python | openai__openai-python | src/openai/resources/beta/threads/messages.py | {
"start": 29640,
"end": 30794
} | class ____:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.list, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
async_to_streamed_response_wrapper(
messages.delete, # pyright: ignore[reportDeprecated],
)
)
| AsyncMessagesWithStreamingResponse |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/string_conversion.py | {
"start": 320,
"end": 403
} | class ____:
def __str__(self):
return request.GET["tainted"]
| StrIsTainted |
python | streamlit__streamlit | lib/tests/streamlit/form_test.py | {
"start": 17556,
"end": 18145
} | class ____(DeltaGeneratorTestCase):
def test_exception_for_callbacks_on_widgets(self):
with pytest.raises(StreamlitAPIException):
with st.form("form"):
st.radio("radio", ["a", "b", "c"], 0, on_change=lambda x: x)
st.form_submit_button()
def test_no_exception_for_callbacks_on_submit_button(self):
with st.form("form"):
st.radio("radio", ["a", "b", "c"], 0)
st.form_submit_button(on_click=lambda x: x)
@patch("streamlit.runtime.Runtime.exists", MagicMock(return_value=True))
| FormStateInteractionTest |
python | aimacode__aima-python | text.py | {
"start": 11838,
"end": 14286
} | class ____:
"""This is a much harder problem than the shift decoder. There are 26!
permutations, so we can't try them all. Instead we have to search.
We want to search well, but there are many things to consider:
Unigram probabilities (E is the most common letter); Bigram probabilities
(TH is the most common bigram); word probabilities (I and A are the most
common one-letter words, etc.); etc.
We could represent a search state as a permutation of the 26 letters,
and alter the solution through hill climbing. With an initial guess
based on unigram probabilities, this would probably fare well. However,
I chose instead to have an incremental representation. A state is
represented as a letter-to-letter map; for example {'z': 'e'} to
represent that 'z' will be translated to 'e'."""
def __init__(self, training_text, ciphertext=None):
self.Pwords = UnigramWordModel(words(training_text))
self.P1 = UnigramWordModel(training_text) # By letter
self.P2 = NgramWordModel(2, words(training_text)) # By letter pair
def decode(self, ciphertext):
"""Search for a decoding of the ciphertext."""
self.ciphertext = canonicalize(ciphertext)
# reduce domain to speed up search
self.chardomain = {c for c in self.ciphertext if c != ' '}
problem = PermutationDecoderProblem(decoder=self)
solution = search.best_first_graph_search(
problem, lambda node: self.score(node.state))
solution.state[' '] = ' '
return translate(self.ciphertext, lambda c: solution.state[c])
def score(self, code):
"""Score is product of word scores, unigram scores, and bigram scores.
This can get very small, so we use logs and exp."""
# remake code dictionary to contain translation for all characters
full_code = code.copy()
full_code.update({x: x for x in self.chardomain if x not in code})
full_code[' '] = ' '
text = translate(self.ciphertext, lambda c: full_code[c])
# add small positive value to prevent computing log(0)
# TODO: Modify the values to make score more accurate
logP = (sum(np.log(self.Pwords[word] + 1e-20) for word in words(text)) +
sum(np.log(self.P1[c] + 1e-5) for c in text) +
sum(np.log(self.P2[b] + 1e-10) for b in bigrams(text)))
return -np.exp(logP)
| PermutationDecoder |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_html.py | {
"start": 14372,
"end": 26547
} | class ____:
@pytest.fixture
def df(self):
index = ["foo", "bar", "baz"]
df = DataFrame(
{"A": [1, 2, 3], "B": [1.2, 3.4, 5.6], "C": ["one", "two", np.nan]},
columns=["A", "B", "C"],
index=index,
)
return df
@pytest.fixture
def expected_without_index(self, datapath):
return expected_html(datapath, "index_2")
def test_to_html_flat_index_without_name(
self, datapath, df, expected_without_index
):
expected_with_index = expected_html(datapath, "index_1")
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in df.index:
assert i not in result
assert result == expected_without_index
def test_to_html_flat_index_with_name(self, datapath, df, expected_without_index):
df.index = Index(["foo", "bar", "baz"], name="idx")
expected_with_index = expected_html(datapath, "index_3")
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
def test_to_html_multiindex_without_names(
self, datapath, df, expected_without_index
):
tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = expected_html(datapath, "index_4")
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in ["foo", "bar", "car", "bike"]:
assert i not in result
# must be the same result as normal index
assert result == expected_without_index
def test_to_html_multiindex_with_names(self, datapath, df, expected_without_index):
tuples = [("foo", "car"), ("foo", "bike"), ("bar", "car")]
df.index = MultiIndex.from_tuples(tuples, names=["idx1", "idx2"])
expected_with_index = expected_html(datapath, "index_5")
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
@pytest.mark.parametrize("classes", ["sortable draggable", ["sortable", "draggable"]])
def test_to_html_with_classes(classes, datapath):
df = DataFrame()
expected = expected_html(datapath, "with_classes")
result = df.to_html(classes=classes)
assert result == expected
def test_to_html_no_index_max_rows(datapath):
# GH 14998
df = DataFrame({"A": [1, 2, 3, 4]})
result = df.to_html(index=False, max_rows=1)
expected = expected_html(datapath, "gh14998_expected_output")
assert result == expected
def test_to_html_multiindex_max_cols(datapath):
# GH 6131
index = MultiIndex(
levels=[["ba", "bb", "bc"], ["ca", "cb", "cc"]],
codes=[[0, 1, 2], [0, 1, 2]],
names=["b", "c"],
)
columns = MultiIndex(
levels=[["d"], ["aa", "ab", "ac"]],
codes=[[0, 0, 0], [0, 1, 2]],
names=[None, "a"],
)
data = np.array(
[[1.0, np.nan, np.nan], [np.nan, 2.0, np.nan], [np.nan, np.nan, 3.0]]
)
df = DataFrame(data, index, columns)
result = df.to_html(max_cols=2)
expected = expected_html(datapath, "gh6131_expected_output")
assert result == expected
def test_to_html_multi_indexes_index_false(datapath):
# GH 22579
df = DataFrame(
{"a": range(10), "b": range(10, 20), "c": range(10, 20), "d": range(10, 20)}
)
df.columns = MultiIndex.from_product([["a", "b"], ["c", "d"]])
df.index = MultiIndex.from_product([["a", "b"], ["c", "d", "e", "f", "g"]])
result = df.to_html(index=False)
expected = expected_html(datapath, "gh22579_expected_output")
assert result == expected
@pytest.mark.parametrize("index_names", [True, False])
@pytest.mark.parametrize("header", [True, False])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize(
"column_index, column_type",
[
(Index([0, 1]), "unnamed_standard"),
(Index([0, 1], name="columns.name"), "named_standard"),
(MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
(
MultiIndex.from_product(
[["a"], ["b", "c"]], names=["columns.name.0", "columns.name.1"]
),
"named_multi",
),
],
)
@pytest.mark.parametrize(
"row_index, row_type",
[
(Index([0, 1]), "unnamed_standard"),
(Index([0, 1], name="index.name"), "named_standard"),
(MultiIndex.from_product([["a"], ["b", "c"]]), "unnamed_multi"),
(
MultiIndex.from_product(
[["a"], ["b", "c"]], names=["index.name.0", "index.name.1"]
),
"named_multi",
),
],
)
def test_to_html_basic_alignment(
datapath, row_index, row_type, column_index, column_type, index, header, index_names
):
# GH 22747, GH 22579
df = DataFrame(np.zeros((2, 2), dtype=int), index=row_index, columns=column_index)
result = df.to_html(index=index, header=header, index_names=index_names)
if not index:
row_type = "none"
elif not index_names and row_type.startswith("named"):
row_type = "un" + row_type
if not header:
column_type = "none"
elif not index_names and column_type.startswith("named"):
column_type = "un" + column_type
filename = "index_" + row_type + "_columns_" + column_type
expected = expected_html(datapath, filename)
assert result == expected
@pytest.mark.parametrize("index_names", [True, False])
@pytest.mark.parametrize("header", [True, False])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize(
"column_index, column_type",
[
(Index(np.arange(8)), "unnamed_standard"),
(Index(np.arange(8), name="columns.name"), "named_standard"),
(
MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
"unnamed_multi",
),
(
MultiIndex.from_product(
[["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
),
"named_multi",
),
],
)
@pytest.mark.parametrize(
"row_index, row_type",
[
(Index(np.arange(8)), "unnamed_standard"),
(Index(np.arange(8), name="index.name"), "named_standard"),
(
MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
"unnamed_multi",
),
(
MultiIndex.from_product(
[["a", "b"], ["c", "d"], ["e", "f"]], names=["foo", None, "baz"]
),
"named_multi",
),
],
)
def test_to_html_alignment_with_truncation(
datapath, row_index, row_type, column_index, column_type, index, header, index_names
):
# GH 22747, GH 22579
df = DataFrame(np.arange(64).reshape(8, 8), index=row_index, columns=column_index)
result = df.to_html(
max_rows=4, max_cols=4, index=index, header=header, index_names=index_names
)
if not index:
row_type = "none"
elif not index_names and row_type.startswith("named"):
row_type = "un" + row_type
if not header:
column_type = "none"
elif not index_names and column_type.startswith("named"):
column_type = "un" + column_type
filename = "trunc_df_index_" + row_type + "_columns_" + column_type
expected = expected_html(datapath, filename)
assert result == expected
@pytest.mark.parametrize("index", [False, 0])
def test_to_html_truncation_index_false_max_rows(datapath, index):
# GH 15019
data = [
[1.764052, 0.400157],
[0.978738, 2.240893],
[1.867558, -0.977278],
[0.950088, -0.151357],
[-0.103219, 0.410599],
]
df = DataFrame(data)
result = df.to_html(max_rows=4, index=index)
expected = expected_html(datapath, "gh15019_expected_output")
assert result == expected
@pytest.mark.parametrize("index", [False, 0])
@pytest.mark.parametrize(
"col_index_named, expected_output",
[(False, "gh22783_expected_output"), (True, "gh22783_named_columns_index")],
)
def test_to_html_truncation_index_false_max_cols(
datapath, index, col_index_named, expected_output
):
# GH 22783
data = [
[1.764052, 0.400157, 0.978738, 2.240893, 1.867558],
[-0.977278, 0.950088, -0.151357, -0.103219, 0.410599],
]
df = DataFrame(data)
if col_index_named:
df.columns.rename("columns.name", inplace=True)
result = df.to_html(max_cols=4, index=index)
expected = expected_html(datapath, expected_output)
assert result == expected
@pytest.mark.parametrize("notebook", [True, False])
def test_to_html_notebook_has_style(notebook):
df = DataFrame({"A": [1, 2, 3]})
result = df.to_html(notebook=notebook)
if notebook:
assert "tbody tr th:only-of-type" in result
assert "vertical-align: middle;" in result
assert "thead th" in result
else:
assert "tbody tr th:only-of-type" not in result
assert "vertical-align: middle;" not in result
assert "thead th" not in result
def test_to_html_with_index_names_false():
# GH 16493
df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
result = df.to_html(index_names=False)
assert "myindexname" not in result
def test_to_html_with_id():
# GH 8496
df = DataFrame({"A": [1, 2]}, index=Index(["a", "b"], name="myindexname"))
result = df.to_html(index_names=False, table_id="TEST_ID")
assert ' id="TEST_ID"' in result
@pytest.mark.parametrize(
"value,float_format,expected",
[
(0.19999, "%.3f", "gh21625_expected_output"),
(100.0, "%.0f", "gh22270_expected_output"),
],
)
def test_to_html_float_format_no_fixed_width(value, float_format, expected, datapath):
# GH 21625, GH 22270
df = DataFrame({"x": [value]})
expected = expected_html(datapath, expected)
result = df.to_html(float_format=float_format)
assert result == expected
@pytest.mark.parametrize(
"render_links,expected",
[(True, "render_links_true"), (False, "render_links_false")],
)
def test_to_html_render_links(render_links, expected, datapath):
# GH 2679
data = [
[0, "https://pandas.pydata.org/?q1=a&q2=b", "pydata.org"],
[0, "www.pydata.org", "pydata.org"],
]
df = DataFrame(data, columns=Index(["foo", "bar", None], dtype=object))
result = df.to_html(render_links=render_links)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize(
"method,expected",
[
("to_html", lambda x: lorem_ipsum),
("_repr_html_", lambda x: lorem_ipsum[: x - 4] + "..."), # regression case
],
)
@pytest.mark.parametrize("max_colwidth", [10, 20, 50, 100])
def test_ignore_display_max_colwidth(method, expected, max_colwidth):
# see gh-17004
df = DataFrame([lorem_ipsum])
with option_context("display.max_colwidth", max_colwidth):
result = getattr(df, method)()
expected = expected(max_colwidth)
assert expected in result
@pytest.mark.parametrize("classes", [True, 0])
def test_to_html_invalid_classes_type(classes):
# GH 25608
df = DataFrame()
msg = "classes must be a string, list, or tuple"
with pytest.raises(TypeError, match=msg):
df.to_html(classes=classes)
def test_to_html_round_column_headers():
# GH 17280
df = DataFrame([1], columns=[0.55555])
with option_context("display.precision", 3):
html = df.to_html(notebook=False)
notebook = df.to_html(notebook=True)
assert "0.55555" in html
assert "0.556" in notebook
@pytest.mark.parametrize("unit", ["100px", "10%", "5em", 150])
def test_to_html_with_col_space_units(unit):
# GH 25941
df = DataFrame(np.random.default_rng(2).random(size=(1, 3)))
result = df.to_html(col_space=unit)
result = result.split("tbody")[0]
hdrs = [x for x in result.split("\n") if re.search(r"<th[>\s]", x)]
if isinstance(unit, int):
unit = str(unit) + "px"
for h in hdrs:
expected = f'<th style="min-width: {unit};">'
assert expected in h
| TestHTMLIndex |
python | tensorflow__tensorflow | tensorflow/python/trackable/python_state_test.py | {
"start": 1065,
"end": 3845
} | class ____(module.Module):
"""A checkpointable object whose NumPy array attributes are saved/restored.
Example usage:
```python
arrays = _NumpyState()
checkpoint = tf.train.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save("/tmp/ckpt")
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
assert (arrays.x == numpy.zeros([3, 4])).all()
second_checkpoint = tf.train.Checkpoint(
numpy_arrays=_NumpyState())
# Attributes of NumpyState objects are created automatically by restore()
second_checkpoint.restore(save_path)
assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all()
```
Note that `NumpyState` objects re-create the attributes of the previously
saved object on `restore()`. This is in contrast to TensorFlow variables, for
which a `Variable` object must be created and assigned to an attribute.
This snippet works both when graph building and when executing eagerly. On
save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via
a placeholder when graph building, or as a string constant when executing
eagerly). When restoring they skip the TensorFlow graph entirely, and so no
restore ops need be run. This means that restoration always happens eagerly,
rather than waiting for `checkpoint.restore(...).run_restore_ops()` like
TensorFlow variables when graph building.
"""
def __init__(self):
super(_NumpyState, self).__setattr__("_arrays", module.Module())
def __getattribute__(self, name):
"""Un-wrap `_NumpyWrapper` objects when accessing attributes."""
try:
arrays = super(_NumpyState, self).__getattribute__("_arrays")
except AttributeError:
# _arrays hasn't been assigned yet
return super(_NumpyState, self).__getattribute__(name)
try:
value = getattr(arrays, name)
except AttributeError:
dummy_array = numpy.array([])
setattr(arrays, name, _NumpyWrapper(dummy_array))
value = getattr(arrays, name)
if value.array is dummy_array:
# No set or restored attribute with this name
delattr(arrays, name)
return super(_NumpyState, self).__getattribute__(name)
if isinstance(value, _NumpyWrapper):
return value.array
return super(_NumpyState, self).__getattribute__(name)
def __setattr__(self, name, value):
"""Automatically wrap NumPy arrays assigned to attributes."""
if isinstance(value, (numpy.ndarray, numpy.generic)):
try:
existing = getattr(self._arrays, name)
existing.array = value
return
except AttributeError:
value = _NumpyWrapper(value)
setattr(self._arrays, name, value)
return
super(_NumpyState, self).__setattr__(name, value)
| _NumpyState |
python | getsentry__sentry | tests/sentry/models/test_recentsearch.py | {
"start": 651,
"end": 1561
} | class ____(TestCase):
def test(self) -> None:
with patch("sentry.models.recentsearch.MAX_RECENT_SEARCHES", new=1):
RecentSearch.objects.create(
organization=self.organization,
user_id=self.user.id,
type=0,
query="hello",
last_seen=timezone.now() - timedelta(minutes=10),
)
remove_excess_recent_searches(self.organization, self.user, 0)
assert list(RecentSearch.objects.all().values_list("query", flat=True)) == ["hello"]
RecentSearch.objects.create(
organization=self.organization, user_id=self.user.id, type=0, query="goodbye"
)
remove_excess_recent_searches(self.organization, self.user, 0)
assert list(RecentSearch.objects.all().values_list("query", flat=True)) == ["goodbye"]
| RemoveExcessRecentSearchesTest |
python | ansible__ansible | lib/ansible/_internal/_yaml/_loader.py | {
"start": 1886,
"end": 2436
} | class ____(_YamlParser, AnsibleConstructor, Resolver):
"""Ansible loader which supports Ansible custom behavior such as `Origin` tagging, as well as Ansible-specific YAML tags."""
def __init__(self, stream: str | bytes | _io.IOBase) -> None:
_YamlParser.__init__(self, stream)
AnsibleConstructor.__init__(
self,
origin=_tags.Origin.get_or_create_tag(stream, self.name),
trusted_as_template=_tags.TrustedAsTemplate.is_tagged_on(stream),
)
Resolver.__init__(self)
| AnsibleLoader |
python | doocs__leetcode | solution/2700-2799/2751.Robot Collisions/Solution.py | {
"start": 0,
"end": 1100
} | class ____:
def survivedRobotsHealths(
self, positions: List[int], healths: List[int], directions: str
) -> List[int]:
n = len(positions)
indices = list(range(n))
stack = []
indices.sort(key=lambda i: positions[i])
for currentIndex in indices:
if directions[currentIndex] == "R":
stack.append(currentIndex)
else:
while stack and healths[currentIndex] > 0:
topIndex = stack.pop()
if healths[topIndex] > healths[currentIndex]:
healths[topIndex] -= 1
healths[currentIndex] = 0
stack.append(topIndex)
elif healths[topIndex] < healths[currentIndex]:
healths[currentIndex] -= 1
healths[topIndex] = 0
else:
healths[currentIndex] = 0
healths[topIndex] = 0
result = [health for health in healths if health > 0]
return result
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_13/organization.py | {
"start": 2971,
"end": 5225
} | class ____(Response):
"""
Response of organization.get_tags endpoint.
:param tags: The list of unique tag values
:type tags: Sequence[str]
:param system_tags: The list of unique system tag values. Returned only if
'include_system' is set to 'true' in the request
:type system_tags: Sequence[str]
"""
_service = "organization"
_action = "get_tags"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"system_tags": {
"description": "The list of unique system tag values. Returned only if 'include_system' is set to 'true' in the request",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "The list of unique tag values",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self, tags: Optional[List[str]] = None, system_tags: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(GetTagsResponse, self).__init__(**kwargs)
self.tags = tags
self.system_tags = system_tags
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
response_mapping = {GetTagsRequest: GetTagsResponse}
| GetTagsResponse |
python | kamyu104__LeetCode-Solutions | Python/maximize-value-of-function-in-a-ball-passing-game.py | {
"start": 100,
"end": 2694
} | class ____(object):
def getMaxFunctionValue(self, receiver, k):
"""
:type receiver: List[int]
:type k: int
:rtype: int
"""
def find_cycles(adj):
result = []
lookup = [0]*len(adj)
idx = 0
for u in xrange(len(adj)):
prev = idx
while not lookup[u]:
idx += 1
lookup[u] = idx
u = adj[u]
if lookup[u] > prev:
result.append((u, idx-lookup[u]+1))
return result
def find_prefixes():
lookup = [(-1, -1)]*len(receiver)
prefixes = [[0] for _ in xrange(len(cycles))]
for idx, (u, l) in enumerate(cycles):
for i in xrange(l):
lookup[u] = (idx, i)
prefixes[idx].append(prefixes[idx][i]+u)
u = receiver[u]
return lookup, prefixes
def get_sum(prefix, i, cnt):
l = len(prefix)-1
q, r = divmod(cnt, l)
return (q*prefix[-1]+
(prefix[min(i+r, l)]-prefix[i])+
(prefix[max(((i+r)-l, 0))]-prefix[0]))
def start_inside_cycle():
result = 0
for u, l in cycles:
for _ in xrange(l):
idx, i = lookup[u]
result = max(result, get_sum(prefixes[idx], i, k+1))
u = receiver[u]
return result
def start_outside_cycle():
result = 0
degree = [0]*len(receiver)
for x in receiver:
degree[x] += 1
for u in xrange(len(receiver)):
if degree[u]:
continue
curr = 0
dq = collections.deque()
while lookup[u][0] == -1:
curr += u
dq.append(u)
if len(dq) == k+1:
result = max(result, curr)
curr -= dq.popleft()
u = receiver[u]
idx, i = lookup[u]
while dq:
result = max(result, curr+get_sum(prefixes[idx], i, (k+1)-len(dq)))
curr -= dq.popleft()
return result
cycles = find_cycles(receiver)
lookup, prefixes = find_prefixes()
return max(start_inside_cycle(), start_outside_cycle())
# Time: O(nlogk)
# Space: O(nlogk)
# binary lifting
| Solution |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_fine_tune.py | {
"start": 1324,
"end": 1975
} | class ____(nn.Module):
def __init__(self, freeze: bool):
super().__init__()
self.layer0 = LinearUnusedInput(4, 4)
self.layer1_frozen = LinearUnusedInput(4, 4)
if freeze:
for param in self.layer1_frozen.parameters():
param.requires_grad = False
self.layer2 = LinearUnusedInput(4, 4)
def forward(self, frozen_input, learnable_input):
x = self.layer0(frozen_input, learnable_input)
y = self.layer1_frozen(frozen_input, learnable_input)
z = self.layer2(frozen_input, learnable_input)
return torch.concat([x, y, z, learnable_input])
| ModelUnusedInput |
python | networkx__networkx | networkx/classes/tests/test_special.py | {
"start": 3739,
"end": 3893
} | class ____(_TestMultiGraph):
def setup_method(self):
_TestMultiGraph.setup_method(self)
self.Graph = nx.MultiGraph
| TestSpecialMultiGraph |
python | spack__spack | lib/spack/spack/traverse.py | {
"start": 3683,
"end": 26643
} | class ____:
"""Visits all unique edges of the sub-DAG induced by direct dependencies of type ``direct``
and transitive dependencies of type ``transitive``. An example use for this is traversing build
type dependencies non-recursively, and link dependencies recursively."""
def __init__(
self,
*,
direct: dt.DepFlag,
transitive: dt.DepFlag,
key: Callable[["spack.spec.Spec"], Any] = id,
) -> None:
self.direct_type = direct
self.transitive_type = transitive
self.key = key
self.seen: Set[Any] = set()
self.seen_roots: Set[Any] = set()
def accept(self, item: EdgeAndDepth) -> bool:
# Do not accept duplicate root nodes. This only happens if the user starts iterating from
# multiple roots and lists one of the roots multiple times.
if item.edge.parent is None:
node_id = self.key(item.edge.spec)
if node_id in self.seen_roots:
return False
self.seen_roots.add(node_id)
return True
def neighbors(self, item: EdgeAndDepth) -> List[EdgeAndDepth]:
# If we're here through an artificial source node, it's a root, and we return all
# direct_type and transitive_type edges. If we're here through a transitive_type edge, we
# return all transitive_type edges. To avoid returning the same edge twice:
# 1. If we had already encountered the current node through a transitive_type edge, we
# don't need to return transitive_type edges again.
# 2. If we encounter the current node through a direct_type edge, and we had already seen
# it through a transitive_type edge, only return the non-transitive_type, direct_type
# edges.
node_id = self.key(item.edge.spec)
seen = node_id in self.seen
is_root = item.edge.parent is None
follow_transitive = is_root or bool(item.edge.depflag & self.transitive_type)
follow = self.direct_type if is_root else dt.NONE
if follow_transitive and not seen:
follow |= self.transitive_type
self.seen.add(node_id)
elif follow == dt.NONE:
return []
edges = item.edge.spec.edges_to_dependencies(depflag=follow)
# filter direct_type edges already followed before becuase they were also transitive_type.
if seen:
edges = [edge for edge in edges if not edge.depflag & self.transitive_type]
return sort_edges(edges)
def get_visitor_from_args(
cover, direction, depflag: Union[dt.DepFlag, dt.DepTypes], key=id, visited=None, visitor=None
):
"""
Create a visitor object from common keyword arguments.
Arguments:
cover (str): Determines how extensively to cover the dag. Possible values:
``nodes`` -- Visit each unique node in the dag only once.
``edges`` -- If a node has been visited once but is reached along a
new path, it's accepted, but not recurisvely followed. This traverses
each 'edge' in the DAG once.
``paths`` -- Explore every unique path reachable from the root.
This descends into visited subtrees and will accept nodes multiple
times if they're reachable by multiple paths.
direction (str): ``children`` or ``parents``. If ``children``, does a traversal
of this spec's children. If ``parents``, traverses upwards in the DAG
towards the root.
deptype: allowed dependency types
key: function that takes a spec and outputs a key for uniqueness test.
visited (set or None): a set of nodes not to follow (when using cover=nodes/edges)
visitor: An initial visitor that is used for composition.
Returns:
A visitor
"""
if not isinstance(depflag, dt.DepFlag):
depflag = dt.canonicalize(depflag)
visitor = visitor or BaseVisitor(depflag)
if cover == "nodes":
visitor = CoverNodesVisitor(visitor, key, visited)
elif cover == "edges":
visitor = CoverEdgesVisitor(visitor, key, visited)
if direction == "parents":
visitor = ReverseVisitor(visitor, depflag)
return visitor
def with_artificial_edges(specs):
"""Initialize a deque of edges from an artificial root node to the root specs."""
from spack.spec import DependencySpec
return deque(
EdgeAndDepth(edge=DependencySpec(parent=None, spec=s, depflag=0, virtuals=()), depth=0)
for s in specs
)
def traverse_depth_first_edges_generator(edges, visitor, post_order=False, root=True, depth=False):
"""Generator that takes explores a DAG in depth-first fashion starting from
a list of edges. Note that typically DFS would take a vertex not a list of edges,
but the API is like this so we don't have to create an artificial root node when
traversing from multiple roots in a DAG.
Arguments:
edges (list): List of EdgeAndDepth instances
visitor: class instance implementing accept() and neigbors()
post_order (bool): Whether to yield nodes when backtracking
root (bool): whether to yield at depth 0
depth (bool): when ``True`` yield a tuple of depth and edge, otherwise only the
edge.
"""
for edge in edges:
if not visitor.accept(edge):
continue
yield_me = root or edge.depth > 0
# Pre
if yield_me and not post_order:
yield (edge.depth, edge.edge) if depth else edge.edge
neighbors = [EdgeAndDepth(edge=n, depth=edge.depth + 1) for n in visitor.neighbors(edge)]
# This extra branch is just for efficiency.
if len(neighbors) > 0:
for item in traverse_depth_first_edges_generator(
neighbors, visitor, post_order, root, depth
):
yield item
# Post
if yield_me and post_order:
yield (edge.depth, edge.edge) if depth else edge.edge
def traverse_breadth_first_edges_generator(queue: deque, visitor, root=True, depth=False):
while len(queue) > 0:
edge = queue.popleft()
# If the visitor doesn't accept the node, we don't yield it nor follow its edges.
if not visitor.accept(edge):
continue
if root or edge.depth > 0:
yield (edge.depth, edge.edge) if depth else edge.edge
for e in visitor.neighbors(edge):
queue.append(EdgeAndDepth(e, edge.depth + 1))
def traverse_breadth_first_with_visitor(specs, visitor):
"""Performs breadth first traversal for a list of specs (not a generator).
Arguments:
specs (list): List of Spec instances.
visitor: object that implements accept and neighbors interface, see
for example BaseVisitor.
"""
queue = with_artificial_edges(specs)
while len(queue) > 0:
edge = queue.popleft()
# If the visitor doesn't accept the node, we don't traverse it further.
if not visitor.accept(edge):
continue
for e in visitor.neighbors(edge):
queue.append(EdgeAndDepth(e, edge.depth + 1))
def traverse_depth_first_with_visitor(edges, visitor):
"""Traverse a DAG in depth-first fashion using a visitor, starting from
a list of edges. Note that typically DFS would take a vertex not a list of edges,
but the API is like this so we don't have to create an artificial root node when
traversing from multiple roots in a DAG.
Arguments:
edges (list): List of EdgeAndDepth instances
visitor: class instance implementing accept(), pre(), post() and neighbors()
"""
for edge in edges:
if not visitor.accept(edge):
continue
visitor.pre(edge)
neighbors = [EdgeAndDepth(edge=e, depth=edge.depth + 1) for e in visitor.neighbors(edge)]
traverse_depth_first_with_visitor(neighbors, visitor)
visitor.post(edge)
# Helper functions for generating a tree using breadth-first traversal
def breadth_first_to_tree_edges(roots, deptype="all", key=id):
"""This produces an adjacency list (with edges) and a map of parents.
There may be nodes that are reached through multiple edges. To print as
a tree, one should use the parents dict to verify if the path leading to
the node is through the correct parent. If not, the branch should be
truncated."""
edges = defaultdict(list)
parents = dict()
for edge in traverse_edges(roots, order="breadth", cover="edges", deptype=deptype, key=key):
parent_id = None if edge.parent is None else key(edge.parent)
child_id = key(edge.spec)
edges[parent_id].append(edge)
if child_id not in parents:
parents[child_id] = parent_id
return edges, parents
def breadth_first_to_tree_nodes(roots, deptype="all", key=id):
"""This produces a list of edges that forms a tree; every node has no more
that one incoming edge."""
edges = defaultdict(list)
for edge in traverse_edges(roots, order="breadth", cover="nodes", deptype=deptype, key=key):
parent_id = None if edge.parent is None else key(edge.parent)
edges[parent_id].append(edge)
return edges
def traverse_breadth_first_tree_edges(parent_id, edges, parents, key=id, depth=0):
"""Do a depth-first search on edges generated by bread-first traversal,
which can be used to produce a tree."""
for edge in edges[parent_id]:
yield (depth, edge)
child_id = key(edge.spec)
# Don't follow further if we're not the parent
if parents[child_id] != parent_id:
continue
yield from traverse_breadth_first_tree_edges(child_id, edges, parents, key, depth + 1)
def traverse_breadth_first_tree_nodes(parent_id, edges, key=id, depth=0):
for edge in edges[parent_id]:
yield (depth, edge)
for item in traverse_breadth_first_tree_nodes(key(edge.spec), edges, key, depth + 1):
yield item
def traverse_topo_edges_generator(edges, visitor, key=id, root=True, all_edges=False):
"""
Returns a list of edges in topological order, in the sense that all in-edges of a vertex appear
before all out-edges.
Arguments:
edges (list): List of EdgeAndDepth instances
visitor: visitor that produces unique edges defining the (sub)DAG of interest.
key: function that takes a spec and outputs a key for uniqueness test.
root (bool): Yield the root nodes themselves
all_edges (bool): When ``False`` only one in-edge per node is returned, when
``True`` all reachable edges are returned.
"""
# Topo order used to be implemented using a DFS visitor, which was relatively efficient in that
# it would visit nodes only once, and it was composable. In practice however it would yield a
# DFS order on DAGs that are trees, which is undesirable in many cases. For example, a list of
# search paths for trees is better in BFS order, so that direct dependencies are listed first.
# That way a transitive dependency cannot shadow a direct one. So, here we collect the sub-DAG
# of interest and then compute a topological order that is the most breadth-first possible.
# maps node identifier to the number of remaining in-edges
in_edge_count = defaultdict(int)
# maps parent identifier to a list of edges, where None is a special identifier
# for the artificial root/source.
node_to_edges = defaultdict(list)
for edge in traverse_breadth_first_edges_generator(edges, visitor, root=True, depth=False):
in_edge_count[key(edge.spec)] += 1
parent_id = key(edge.parent) if edge.parent is not None else None
node_to_edges[parent_id].append(edge)
queue = deque((None,))
while queue:
for edge in node_to_edges[queue.popleft()]:
child_id = key(edge.spec)
in_edge_count[child_id] -= 1
should_yield = root or edge.parent is not None
if all_edges and should_yield:
yield edge
if in_edge_count[child_id] == 0:
if not all_edges and should_yield:
yield edge
queue.append(key(edge.spec))
# High-level API: traverse_edges, traverse_nodes, traverse_tree.
OrderType = Literal["pre", "post", "breadth", "topo"]
CoverType = Literal["nodes", "edges", "paths"]
DirectionType = Literal["children", "parents"]
@overload
def traverse_edges(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = ...,
order: OrderType = ...,
cover: CoverType = ...,
direction: DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[False] = False,
key: Callable[["spack.spec.Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable["spack.spec.DependencySpec"]: ...
@overload
def traverse_edges(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = ...,
order: OrderType = ...,
cover: CoverType = ...,
direction: DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[True],
key: Callable[["spack.spec.Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[Tuple[int, "spack.spec.DependencySpec"]]: ...
@overload
def traverse_edges(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = ...,
order: OrderType = ...,
cover: CoverType = ...,
direction: DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: bool,
key: Callable[["spack.spec.Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[Union["spack.spec.DependencySpec", Tuple[int, "spack.spec.DependencySpec"]]]: ...
def traverse_edges(
specs: Sequence["spack.spec.Spec"],
root: bool = True,
order: OrderType = "pre",
cover: CoverType = "nodes",
direction: DirectionType = "children",
deptype: Union[dt.DepFlag, dt.DepTypes] = "all",
depth: bool = False,
key: Callable[["spack.spec.Spec"], Any] = id,
visited: Optional[Set[Any]] = None,
) -> Iterable[Union["spack.spec.DependencySpec", Tuple[int, "spack.spec.DependencySpec"]]]:
"""
Iterable of edges from the DAG, starting from a list of root specs.
Arguments:
specs: List of root specs (considered to be depth 0)
root: Yield the root nodes themselves
order: What order of traversal to use in the DAG. For depth-first search this can be
``pre`` or ``post``. For BFS this should be ``breadth``. For topological order use
``topo``
cover: Determines how extensively to cover the dag. Possible values:
``nodes`` -- Visit each unique node in the dag only once.
``edges`` -- If a node has been visited once but is reached along a new path, it's
accepted, but not recurisvely followed. This traverses each 'edge' in the DAG once.
``paths`` -- Explore every unique path reachable from the root. This descends into
visited subtrees and will accept nodes multiple times if they're reachable by multiple
paths.
direction: ``children`` or ``parents``. If ``children``, does a traversal of this spec's
children. If ``parents``, traverses upwards in the DAG towards the root.
deptype: allowed dependency types
depth: When ``False``, yield just edges. When ``True`` yield the tuple (depth, edge), where
depth corresponds to the depth at which edge.spec was discovered.
key: function that takes a spec and outputs a key for uniqueness test.
visited: a set of nodes not to follow
Returns:
An iterable of ``DependencySpec`` if depth is ``False`` or a tuple of
``(depth, DependencySpec)`` if depth is ``True``.
"""
# validate input
if order == "topo":
if cover == "paths":
raise ValueError("cover=paths not supported for order=topo")
if visited is not None:
raise ValueError("visited set not implemented for order=topo")
elif order not in ("post", "pre", "breadth"):
raise ValueError(f"Unknown order {order}")
# In topo traversal we need to construct a sub-DAG including all unique edges even if we are
# yielding a subset of them, hence "edges".
_cover = "edges" if order == "topo" else cover
visitor = get_visitor_from_args(_cover, direction, deptype, key, visited)
root_edges = with_artificial_edges(specs)
# Depth-first
if order == "pre" or order == "post":
return traverse_depth_first_edges_generator(
root_edges, visitor, order == "post", root, depth
)
elif order == "breadth":
return traverse_breadth_first_edges_generator(root_edges, visitor, root, depth)
elif order == "topo":
return traverse_topo_edges_generator(
root_edges, visitor, key, root, all_edges=cover == "edges"
)
@overload
def traverse_nodes(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = ...,
order: OrderType = ...,
cover: CoverType = ...,
direction: DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[False] = False,
key: Callable[["spack.spec.Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable["spack.spec.Spec"]: ...
@overload
def traverse_nodes(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = ...,
order: OrderType = ...,
cover: CoverType = ...,
direction: DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: Literal[True],
key: Callable[["spack.spec.Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[Tuple[int, "spack.spec.Spec"]]: ...
@overload
def traverse_nodes(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = ...,
order: OrderType = ...,
cover: CoverType = ...,
direction: DirectionType = ...,
deptype: Union[dt.DepFlag, dt.DepTypes] = ...,
depth: bool,
key: Callable[["spack.spec.Spec"], Any] = ...,
visited: Optional[Set[Any]] = ...,
) -> Iterable[Union["spack.spec.Spec", Tuple[int, "spack.spec.Spec"]]]: ...
def traverse_nodes(
specs: Sequence["spack.spec.Spec"],
*,
root: bool = True,
order: OrderType = "pre",
cover: CoverType = "nodes",
direction: DirectionType = "children",
deptype: Union[dt.DepFlag, dt.DepTypes] = "all",
depth: bool = False,
key: Callable[["spack.spec.Spec"], Any] = id,
visited: Optional[Set[Any]] = None,
) -> Iterable[Union["spack.spec.Spec", Tuple[int, "spack.spec.Spec"]]]:
"""
Iterable of specs from the DAG, starting from a list of root specs.
Arguments:
specs: List of root specs (considered to be depth 0)
root: Yield the root nodes themselves
order: What order of traversal to use in the DAG. For depth-first search this can be
``pre`` or ``post``. For BFS this should be ``breadth``.
cover: Determines how extensively to cover the dag. Possible values:
``nodes`` -- Visit each unique node in the dag only once.
``edges`` -- If a node has been visited once but is reached along a new path, it's
accepted, but not recurisvely followed. This traverses each 'edge' in the DAG once.
``paths`` -- Explore every unique path reachable from the root. This descends into
visited subtrees and will accept nodes multiple times if they're reachable by multiple
paths.
direction: ``children`` or ``parents``. If ``children``, does a traversal of this spec's
children. If ``parents``, traverses upwards in the DAG towards the root.
deptype: allowed dependency types
depth: When ``False``, yield just edges. When ``True`` yield the tuple ``(depth, edge)``,
where depth corresponds to the depth at which ``edge.spec`` was discovered.
key: function that takes a spec and outputs a key for uniqueness test.
visited: a set of nodes not to follow
Yields:
By default :class:`~spack.spec.Spec`, or a tuple ``(depth, Spec)`` if depth is
set to ``True``.
"""
for item in traverse_edges(
specs,
root=root,
order=order,
cover=cover,
direction=direction,
deptype=deptype,
depth=depth,
key=key,
visited=visited,
):
yield (item[0], item[1].spec) if depth else item.spec # type: ignore
def traverse_tree(
specs: Sequence["spack.spec.Spec"],
cover: CoverType = "nodes",
deptype: Union[dt.DepFlag, dt.DepTypes] = "all",
key: Callable[["spack.spec.Spec"], Any] = id,
depth_first: bool = True,
) -> Iterable[Tuple[int, "spack.spec.DependencySpec"]]:
"""
Generator that yields ``(depth, DependencySpec)`` tuples in the depth-first
pre-order, so that a tree can be printed from it.
Arguments:
specs: List of root specs (considered to be depth 0)
cover: Determines how extensively to cover the dag. Possible values:
``nodes`` -- Visit each unique node in the dag only once.
``edges`` -- If a node has been visited once but is reached along a
new path, it's accepted, but not recurisvely followed. This traverses each 'edge' in
the DAG once.
``paths`` -- Explore every unique path reachable from the root. This descends into
visited subtrees and will accept nodes multiple times if they're reachable by multiple
paths.
deptype: allowed dependency types
key: function that takes a spec and outputs a key for uniqueness test.
depth_first: Explore the tree in depth-first or breadth-first order. When setting
``depth_first=True`` and ``cover=nodes``, each spec only occurs once at the shallowest
level, which is useful when rendering the tree in a terminal.
Returns:
A generator that yields ``(depth, DependencySpec)`` tuples in such an order that a tree can
be printed.
"""
# BFS only makes sense when going over edges and nodes, for paths the tree is
# identical to DFS, which is much more efficient then.
if not depth_first and cover == "edges":
edges, parents = breadth_first_to_tree_edges(specs, deptype, key)
return traverse_breadth_first_tree_edges(None, edges, parents, key)
elif not depth_first and cover == "nodes":
edges = breadth_first_to_tree_nodes(specs, deptype, key)
return traverse_breadth_first_tree_nodes(None, edges, key)
return traverse_edges(specs, order="pre", cover=cover, deptype=deptype, key=key, depth=True)
def by_dag_hash(s: "spack.spec.Spec") -> str:
"""Used very often as a key function for traversals."""
return s.dag_hash()
| MixedDepthVisitor |
python | ansible__ansible | test/lib/ansible_test/_internal/ci/local.py | {
"start": 601,
"end": 4196
} | class ____(CIProvider):
"""CI provider implementation when not using CI."""
priority = 1000
@staticmethod
def is_supported() -> bool:
"""Return True if this provider is supported in the current running environment."""
return True
@property
def code(self) -> str:
"""Return a unique code representing this provider."""
return CODE
@property
def name(self) -> str:
"""Return descriptive name for this provider."""
return 'Local'
def generate_resource_prefix(self) -> str:
"""Return a resource prefix specific to this CI provider."""
prefix = 'ansible-test-%d-%s' % (
random.randint(10000000, 99999999),
platform.node().split('.')[0],
)
return prefix
def get_base_commit(self, args: CommonConfig) -> str:
"""Return the base commit or an empty string."""
return ''
def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
"""Initialize change detection."""
result = LocalChanges(args)
display.info('Detected branch %s forked from %s at commit %s' % (
result.current_branch, result.fork_branch, result.fork_point))
if result.untracked and not args.untracked:
display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
len(result.untracked))
if result.committed and not args.committed:
display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
len(result.committed))
if result.staged and not args.staged:
display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
len(result.staged))
if result.unstaged and not args.unstaged:
display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
len(result.unstaged))
names = set()
if args.tracked:
names |= set(result.tracked)
if args.untracked:
names |= set(result.untracked)
if args.committed:
names |= set(result.committed)
if args.staged:
names |= set(result.staged)
if args.unstaged:
names |= set(result.unstaged)
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
for path in result.untracked:
if is_binary_file(path):
args.metadata.changes[path] = ((0, 0),)
continue
line_count = len(read_text_file(path).splitlines())
args.metadata.changes[path] = ((1, line_count),)
return sorted(names)
def supports_core_ci_auth(self) -> bool:
"""Return True if Ansible Core CI is supported."""
return Authenticator.available()
def prepare_core_ci_request(self, config: dict[str, object], context: AuthContext) -> dict[str, object]:
if not (authenticator := Authenticator.load()):
raise ApplicationError('Ansible Core CI authentication has not been configured.')
display.info(f'Using {authenticator} for Ansible Core CI.', verbosity=1)
return authenticator.prepare_auth_request(config, context)
def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
"""Return details about git in the current environment."""
return None # not yet implemented for local
| Local |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 52868,
"end": 53670
} | class ____(RegularPolygon):
"""A polygon-approximation of a circle patch."""
def __str__(self):
s = "CirclePolygon((%g, %g), radius=%g, resolution=%d)"
return s % (self.xy[0], self.xy[1], self.radius, self.numvertices)
@_docstring.interpd
def __init__(self, xy, radius=5, *,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with *resolution*
sides. For a smoother circle drawn with splines, see `Circle`.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(
xy, resolution, radius=radius, orientation=0, **kwargs)
| CirclePolygon |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 8525,
"end": 8877
} | class ____(WeaviateBaseError):
"""Is raised when adding an invalid new property."""
def __init__(self, message: str):
msg = f"""Could not add the property {message}. Only optional properties or properties with default
value are valid"""
super().__init__(msg)
self.message = message
| WeaviateAddInvalidPropertyError |
python | getsentry__sentry | src/sentry/integrations/discord/webhooks/command.py | {
"start": 1914,
"end": 2712
} | class ____(DiscordInteractionHandler):
"""
Handles logic for Discord Command interactions.
Request passed in constructor must be command interaction.
"""
def handle(self) -> Response:
command_name = self.request.get_command_name()
cmd_input = CommandInput(command_name)
dispatcher = DiscordCommandDispatcher(self.request)
try:
message = dispatcher.dispatch(cmd_input)
except CommandNotMatchedError:
logger.warning(
"discord.interaction.command.unknown",
extra={"command": command_name, **self.request.logging_data},
)
message = dispatcher.dispatch(CommandInput("help"))
return self.send_message(message)
@dataclass(frozen=True)
| DiscordCommandHandler |
python | joke2k__faker | faker/providers/job/hr_HR/__init__.py | {
"start": 42,
"end": 10150
} | class ____(BaseProvider):
jobs = [
"Agent posredovanja u prometu nekretnina",
"Alatničar",
"Arhivist",
"Arhivski savjetnik",
"Arhivski tehničar",
"Autoelektričar",
"Autolakirer",
"Autolimar",
"Automehaničar",
"Autoserviser",
"Bravar",
"Brodostrojarski inspektor",
"Brodski mehaničar",
"Dentalni asistent",
"Dentalni tehničar",
"Dimnjačar",
"Diplomirana medicinska sestra/medicinski tehničar",
"Diplomirani fizioterapeut",
"Diplomirani knjižničar",
"Diplomirani sanitarni inženjer",
"Direktor predstavništva",
"Direktor turističkog ureda turističke zajednice",
"Dispečer leta / operativni kontrolor prometa",
"Djelatnik koji obavlja poslove izvođenja glasnog pucnja",
"Djelatnik koji obavlja poslove izvođenja javnog vatrometa",
"Djelatnik koji obavlja poslove miniranja",
"Djelatnik koji obavlja poslove miniranja pri razminiranju",
"Djelatnik koji obavlja poslove podzemnih miniranja",
"Djelatnik koji obavlja poslove pripreme miniranja",
"Djelatnik koji obavlja poslove specijalnih miniranja",
"Djelatnik službe za zaštitu od požara",
"Djelatnik za obavljanje poslova zaštite od požara i unapređenje stanja zaštite od požara",
"Doktor dentalne medicine",
"Doktor dentalne medicine specijalist",
"Doktor medicine",
"Doktor medicine specijalist",
"Dokumentarist",
"Dokumentarist savjetnik",
"Dočasnik",
"Edukacijski rehabilitator",
"Elektroinstalater",
"Elektromehaničar",
"Elektroničar – mehaničar",
"Farmaceutski tehničar",
"Fasader",
"Fizioterapeutski tehničar",
"Fotograf",
"Frizer",
"Galanterist",
"Geološki tehničar",
"Glavni inspektor zaštite okoliša",
"Glavni nadzornik",
"Glazbalar",
"Graditelj brodova",
"Graditelj orgulja",
"Hidrograđevinski inspektor",
"Informatičar",
"Informatičar savjetnik",
"Inspektor sigurnosti unutarnje plovidbe",
"Inspektor zaštite okoliša",
"Inspektor zaštite prirode",
"Instalater grijanja i klimatizacije",
"Instruktor vožnje",
"Inženjer geologije",
"Inženjer gradilišta",
"Inženjer medicinske radiologije",
"Inženjer medicinsko- laboratorijske dijagnostike",
"Inženjer naftnog rudarstva",
"Inženjer rudarstva",
"Kemijski čistač",
"Klesar",
"Klobučar",
"Knjižničar",
"Knjižničarski savjetnik",
"Kondukter",
"Kontrolor zračnog prometa",
"Konzervator",
"Konzervator savjetnik",
"Konzervator tehničar",
"Konzervator – restaurator savjetnik",
"Konzervator – restaurator",
"Koordinator zaštite na radu",
"Kormilar",
"Kotlar",
"Kovač",
"Kozmetičar",
"Kriminalist",
"Krojač muške odjeće",
"Krojač ženske odjeće",
"Krovopokrivač",
"Krznar",
"Kuhar",
"Kustos",
"Lektor hrvatskog jezika na stranim visokoškolskim ustanovama",
"Limar",
"Ljekarnik",
"Ljekarnik specijalist",
"Ljevač",
"Lovac",
"Lovočuvar",
"Magistar sanitarnog inženjerstva",
"Magistar sestrinstva",
"Maser – kupeljar",
"Medicinska sestra opće njege",
"Medicinska sestra-medicinski tehničar",
"Medicinska sestra/medicinski tehničar specijalist",
"Medicinski biokemičar",
"Medicinski biokemičar specijalist",
"Mehaničar poljoprivredne mehanizacije",
"Mesar",
"Mlinar",
"Mljekar",
"Mornar",
"Muzejski pedagog",
"Muzejski pedagog savjetnik",
"Muzejski savjetnik",
"Muzejski tehničar",
"Nadzornik",
"Nadzornik tehničke ispravnosti vozila",
"Nadzornik za okoliš",
"Nastavnik predmetne nastave u srednjoj školi",
"Natkonobar",
"Nautički inspektor",
"Obućar",
"Ocjenjivač lovačkih trofeja",
"Odgajatelj u učeničkom domu",
"Odgojitelj djece predškolske dobi",
"Odgovorna osoba za ocjenjivanje sukladnosti građevinskih proizvoda",
"Odvjetnik",
"Ortopedski obućar",
"Osoba koja provodi energetske preglede građevina i/ili energetsko certificiranje zgrada",
"Osoba stručno osposobljena za gospodarski ribolov",
"Osoba stručno osposobljena za obavljanje akvakulture",
"Osoba stručno osposobljena za uzgoj riba i drugih morskih organizama",
"Osoblje za održavanje zrakoplova (mehaničari, tehničari i inženjeri)",
"Ovlaštena osoba za izradu elaborata zaštite od požara",
"Ovlašteni arhitekt",
"Ovlašteni arhitekt – urbanist",
"Ovlašteni carinski službenik",
"Ovlašteni carinski zastupnik",
"Ovlašteni inženjer drvne tehnologije",
"Ovlašteni inženjer elektrotehnike",
"Ovlašteni inženjer geodezije",
"Ovlašteni inženjer građevinarstva",
"Ovlašteni inženjer strojarstva",
"Ovlašteni inženjer tehnologije prometa i transporta",
"Ovlašteni inženjer šumarstva",
"Ovlašteni ispitivač",
"Ovlašteni revident iz zaštite od požara",
"Ovlašteni revizor",
"Ovlašteni unutarnji revizor za javni sektor",
"Očni optičar",
"Pediker",
"Pekar",
"Pećar",
"Pirotehničar",
"Pirotehnički nadglednik",
"Pirotehnički nadzornik",
"Pismoslikar",
"Plinoinstalater",
"Policajac",
"Pomoćni djelatnik",
"Pomoćni knjižničar",
"Pomoćnik strojovođe",
"Porezni savjetnik",
"Precizni mehaničar",
"Predavač",
"Pregledač vagona",
"Preparator",
"Primalja",
"Primalja – asistentica",
"Privatni detektiv",
"Profesionalni diplomat",
"Profesionalni pilot aviona",
"Profesionalni pilot helikoptera",
"Prometni pilot aviona",
"Prometni pilot helikoptera",
"Prometnik vlakova",
"Prvostupnik fizioterapije",
"Prvostupnik medicinske radiologije",
"Prvostupnik medicinsko- laboratorijske dijagnostike",
"Prvostupnik primaljstva",
"Prvostupnik radiološke tehnologije",
"Prvostupnik radne terapije",
"Prvostupnik sanitarnog inženjerstva",
"Prvostupnik sestrinstva",
"Prvostupnik sestrinstva specijalist",
"Psiholog",
"Puškar",
"Radio inspektor sigurnosti plovidbe",
"Radiološki tehničar",
"Radnik na stručnim poslovima na izvršenju zadaća turističke zajednice",
"Radnik zaposlen na rukovodećim poslovima",
"Ravnatelj ustanove za predškolski odgoj i naobrazbu",
"Ravnatelj školske ustanove",
"Referent za poslove registracije vozila",
"Restaurator",
"Restaurator majstor",
"Restaurator savjetnik",
"Restaurator tehničar",
"Revident",
"Rudarski tehničar",
"Rukovatelj eksplozivnim tvarima",
"Rukovoditelj tehničkog plovila",
"Sanitarni tehničar",
"Sigurnosni savjetnik",
"Slastičar",
"Soboslikar – ličilac",
"Socijalni radnik",
"Sportski trener",
"Sportski učitelj / instruktor",
"Sportski voditelj",
"Staklar",
"Stalni sudski procjenitelj",
"Stalni sudski tumač",
"Stalni sudski vještak",
"Stolar",
"Strojar unutarnje plovidbe",
"Strojobravar",
"Strojovođa",
"Strukovni učitelj u srednjoj školi",
"Stručni radnik u djelatnosti socijalne skrbi",
"Stručni radnik u ustanovi socijalne skrbi",
"Stručni suradnik ovlaštenog inženjera geodezije",
"Stručni suradnik u predškolskoj ustanovi",
"Stručni suradnik u školskoj ustanovi",
"Stručni voditelj autoškole",
"Stručnjak zaštite na radu",
"Suradnik ovlaštenog inženjera geodezije",
"Suradnik u nastavi",
"Tapetar",
"Tesar",
"Tokar",
"Turistički pratitelj",
"Turistički vodič",
"Upravitelj prijevoza (osoba odgovorna za prijevoz)",
"Urar",
"Učitelj predmetne nastave u osnovnoj školi",
"Učitelj razredne nastave u osnovnoj školi",
"Učitelj stručnih predmeta",
"Veterinar",
"Veterinarski tehničar",
"Viši arhivist",
"Viši arhivski tehničar",
"Viši dokumentarist",
"Viši fotograf",
"Viši informatičar",
"Viši inspektor",
"Viši inspektor cestovnog prometa i cesta",
"Viši inspektor zaštite okoliša",
"Viši inspektor zaštite prirode",
"Viši knjižničar",
"Viši konzervator",
"Viši konzervator tehničar",
"Viši konzervator tehničar II stupnja",
"Viši konzervator – restaurator",
"Viši kustos",
"Viši muzejski pedagog",
"Viši muzejski tehničar",
"Viši preparator",
"Viši restaurator",
"Viši restaurator tehničar",
"Vlakovođa",
"Voditelj ispostave",
"Voditelj poslovnice turističke agencije",
"Voditelj projekta",
"Voditelj radilišta",
"Voditelj radova",
"Voditelj skele u nacionalnoj plovidbi",
"Vodoinstalater",
"Vodoinstalater, instalater grijanja i klimatizacije",
"Vojnik",
"Vozač teretnog motornog vozila i autobusa",
"Vođa palube",
"Zapovjednik plovila (vrsta A, vrsta B, vrsta C)",
"Zaštitar",
"Zaštitar-tehničar",
"Zdravstveno laboratorijski tehničar",
"Zidar",
"Zlatar",
"Časnik",
"Član kabinske posade zrakoplova",
"Čuvar",
"Čuvar prirode",
]
| Provider |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/api.py | {
"start": 2226,
"end": 2499
} | class ____:
SpecialConsoles = 'special_consoles_submenu'
Documentation = 'documentation_submenu'
EnvironmentConsoles = 'environment_consoles_submenu'
ClientContextMenu = 'client_context_menu'
TabsContextMenu = 'tabs_context_menu'
| IPythonConsoleWidgetMenus |
python | huggingface__transformers | src/transformers/models/llama4/convert_llama4_weights_to_hf.py | {
"start": 29268,
"end": 38893
} | class ____(TikTokenConverter):
def __init__(
self,
vocab_file,
special_tokens: list[str],
pattern: str,
model_max_length: int = 0,
chat_template: Optional[str] = None,
**kwargs,
):
super().__init__(vocab_file, pattern=pattern)
self.additional_special_tokens = special_tokens
tokenizer = self.converted()
if chat_template is not None:
kwargs["chat_template"] = chat_template
self.converted_tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
model_input_names=["input_ids", "attention_mask"],
model_max_length=model_max_length,
**kwargs,
)
instruct = chat_template is not None
self.update_post_processor(self.converted_tokenizer)
# finer special_tokens_map.json
self.converted_tokenizer._bos_token = BOS_ADDED_TOKEN
self.converted_tokenizer._eos_token = EOT_ADDED_TOKEN if instruct else EOS_ADDED_TOKEN
# We can't do this while building the tokenizer because we have no easy access to the bos token id
def update_post_processor(self, tokenizer):
tokenizer._tokenizer.post_processor = processors.Sequence(
[
processors.ByteLevel(trim_offsets=False),
processors.TemplateProcessing(
single="<|begin_of_text|> $A",
pair="<|begin_of_text|>:0 $A:0 <|begin_of_text|>:1 $B:1",
special_tokens=[
("<|begin_of_text|>", tokenizer.convert_tokens_to_ids("<|begin_of_text|>")),
],
),
]
)
O200K_PATTERN = r"""[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]*[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(?i:'s|'t|'re|'ve|'m|'ll|'d)?|[^\r\n\p{L}\p{N}]?[\p{Lu}\p{Lt}\p{Lm}\p{Lo}\p{M}]+[\p{Ll}\p{Lm}\p{Lo}\p{M}]*(?i:'s|'t|'re|'ve|'m|'ll|'d)?|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n/]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
def write_tokenizer(args):
tokenizer_path = os.path.join(args.input_dir, "tokenizer.model")
chat_template = "{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %} \n {%- if messages[0]['content'] is string %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- else %}\n {#- FIXME: The processor requires an array, always. #}\n {%- set system_message = messages[0]['content'][0]['text']|trim %}\n {%- endif %}\n {%- set messages = messages[1:] %}\n {%- set user_supplied_system_message = true %}\n{%- else %}\n {%- set system_message = \"\" %}\n {%- set user_supplied_system_message = false %}\n{%- endif %}\n\n{#- System message if the user supplied one #}\n{%- if user_supplied_system_message %}\n {{- \"<|header_start|>system<|header_end|>\n\n\" }}\n {%- if tools is not none %}\n {{- \"Environment: ipython\n\" }}\n {%- endif %}\n {%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\n\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\n\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- \"<|eot|>\" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|header_start|>user<|header_end|>\n\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\n\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\n\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\n\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|header_start|>' + message['role'] + '<|header_end|>\n\n' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- elif 'tool_calls' in message and message.tool_calls|length > 0 %}\n {{- '<|header_start|>assistant<|header_end|>\n\n' -}}\n {{- '<|python_start|>' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|python_end|>' }}\n {%- for tool_call in message.tool_calls %}\n {{- '{\"name\": \"' + tool_call.function.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.function.arguments | tojson }}\n {{- \"}\" }}\n {%- endfor %}\n {{- \"<|eot|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|header_start|>ipython<|header_end|>\n\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|header_start|>assistant<|header_end|>\n\n' }}\n{%- endif %}\n"
special_tokens = BASIC_SPECIAL_TOKENS + LLAMA4_SPECIAL_TOKENS
converter = Llama4Converter(
vocab_file=tokenizer_path,
pattern=O200K_PATTERN,
special_tokens=special_tokens,
chat_template=chat_template if args.instruct else None,
bos_token="<|begin_of_text|>",
eos_token="<|end_of_text|>" if not args.instruct else "<|eot|>",
pad_token="<|finetune_right_pad_id|>",
model_max_length=max_context_length(args.input_dir, args.instruct),
)
tokenizer = converter.converted_tokenizer
image_processor = Llama4ImageProcessorFast()
processor = Llama4Processor(
image_processor=image_processor,
tokenizer=tokenizer,
chat_template=tokenizer.chat_template,
)
processor.save_pretrained(args.output_dir)
del processor
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
type=str,
help="Location of the local folder copied from the Hub.",
)
parser.add_argument(
"--output_dir",
type=str,
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`."
)
parser.add_argument(
"--special_tokens",
default=None,
type=list[str],
help="The list of special tokens that should be added to the model.",
)
parser.add_argument(
"--num_shards",
default=8,
type=int,
help="The number of individual shards used for the model. Does not have to be the same as the number of consolidated_xx.pth",
)
parser.add_argument(
"--instruct",
action="store_true",
help="Whether the model is an instruct model",
)
parser.add_argument(
"--convert_checkpoints",
action="store_true",
help="Whether to convert the original weights (or skip if previously converted)",
)
args = parser.parse_args()
write_tokenizer(args)
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
safe_serialization=args.safe_serialization,
num_shards=args.num_shards,
instruct=args.instruct,
convert_checkpoints=args.convert_checkpoints,
)
| Llama4Converter |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/container_instance.py | {
"start": 1465,
"end": 7201
} | class ____(AzureBaseHook):
"""
A hook to communicate with Azure Container Instances.
This hook requires a service principal in order to work.
After creating this service principal
(Azure Active Directory/App Registrations), you need to fill in the
client_id (Application ID) as login, the generated password as password,
and tenantId and subscriptionId in the extra's field as a json.
:param azure_conn_id: :ref:`Azure connection id<howto/connection:azure>` of
a service principal which will be used to start the container instance.
"""
conn_name_attr = "azure_conn_id"
default_conn_name = "azure_default"
conn_type = "azure_container_instance"
hook_name = "Azure Container Instance"
def __init__(self, azure_conn_id: str = default_conn_name) -> None:
super().__init__(sdk_client=ContainerInstanceManagementClient, conn_id=azure_conn_id)
@cached_property
def connection(self):
return self.get_conn()
def get_conn(self) -> Any:
"""
Authenticate the resource using the connection id passed during init.
:return: the authenticated client.
"""
conn = self.get_connection(self.conn_id)
tenant = conn.extra_dejson.get("tenantId")
key_path = conn.extra_dejson.get("key_path")
if key_path:
if not key_path.endswith(".json"):
raise AirflowException("Unrecognised extension for key file.")
self.log.info("Getting connection using a JSON key file.")
return get_client_from_auth_file(client_class=self.sdk_client, auth_path=key_path)
key_json = conn.extra_dejson.get("key_json")
if key_json:
self.log.info("Getting connection using a JSON config.")
return get_client_from_json_dict(client_class=self.sdk_client, config_dict=key_json)
credential: ClientSecretCredential | DefaultAzureCredential
if all([conn.login, conn.password, tenant]):
self.log.info("Getting connection using specific credentials and subscription_id.")
credential = ClientSecretCredential(
client_id=cast("str", conn.login),
client_secret=cast("str", conn.password),
tenant_id=cast("str", tenant),
)
else:
self.log.info("Using DefaultAzureCredential as credential")
managed_identity_client_id = conn.extra_dejson.get("managed_identity_client_id")
workload_identity_tenant_id = conn.extra_dejson.get("workload_identity_tenant_id")
credential = get_sync_default_azure_credential(
managed_identity_client_id=managed_identity_client_id,
workload_identity_tenant_id=workload_identity_tenant_id,
)
subscription_id = cast("str", conn.extra_dejson.get("subscriptionId"))
return ContainerInstanceManagementClient(
credential=credential,
subscription_id=subscription_id,
)
def create_or_update(self, resource_group: str, name: str, container_group: ContainerGroup) -> None:
"""
Create a new container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:param container_group: the properties of the container group
"""
self.connection.container_groups.begin_create_or_update(resource_group, name, container_group)
def get_state(self, resource_group: str, name: str) -> ContainerGroup:
"""
Get the state of a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:return: ContainerGroup
"""
return self.connection.container_groups.get(resource_group, name)
def get_logs(self, resource_group: str, name: str, tail: int = 1000) -> list:
"""
Get the tail from logs of a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:param tail: the size of the tail
:return: A list of log messages
"""
logs = self.connection.containers.list_logs(resource_group, name, name, tail=tail)
if logs.content is None:
return [None]
return logs.content.splitlines(True)
def delete(self, resource_group: str, name: str) -> None:
"""
Delete a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
"""
self.connection.container_groups.begin_delete(resource_group, name)
def exists(self, resource_group: str, name: str) -> bool:
"""
Test if a container group exists.
:param resource_group: the name of the resource group
:param name: the name of the container group
"""
for container in self.connection.container_groups.list_by_resource_group(resource_group):
if container.name == name:
return True
return False
def test_connection(self):
"""Test a configured Azure Container Instance connection."""
try:
# Attempt to list existing container groups under the configured subscription and retrieve the
# first in the returned iterator. We need to _actually_ try to retrieve an object to properly
# test the connection.
next(self.connection.container_groups.list(), None)
except Exception as e:
return False, str(e)
return True, "Successfully connected to Azure Container Instance."
| AzureContainerInstanceHook |
python | django__django | tests/migrations/test_migrations_noop/0001_initial.py | {
"start": 35,
"end": 170
} | class ____(migrations.Migration):
initial = True
operations = [
migrations.RunSQL(sql="", reverse_sql=""),
]
| Migration |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-inorder-traversal.py | {
"start": 29,
"end": 182
} | class ____(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Morris Traversal Solution
| TreeNode |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/named_types.py | {
"start": 13378,
"end": 13629
} | class ____(NamedTypeGenerator):
def visit_DOMAIN(self, domain):
if not self._can_create_type(domain):
return
with self.with_ddl_events(domain):
self.connection.execute(CreateDomainType(domain))
| DomainGenerator |
python | huggingface__transformers | src/transformers/models/videomae/modeling_videomae.py | {
"start": 3406,
"end": 4523
} | class ____(nn.Module):
"""
Construct the patch and position embeddings.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = VideoMAEPatchEmbeddings(config)
self.num_patches = self.patch_embeddings.num_patches
# fixed sin-cos embedding
self.position_embeddings = get_sinusoid_encoding_table(self.num_patches, config.hidden_size)
self.config = config
def forward(self, pixel_values, bool_masked_pos):
# create patch embeddings
embeddings = self.patch_embeddings(pixel_values)
# add position embeddings
embeddings = embeddings + self.position_embeddings.detach().type_as(embeddings).to(
device=embeddings.device, copy=True
)
# only keep visible patches
# ~bool_masked_pos means visible
if bool_masked_pos is not None:
batch_size, _, num_channels = embeddings.shape
embeddings = embeddings[~bool_masked_pos]
embeddings = embeddings.reshape(batch_size, -1, num_channels)
return embeddings
| VideoMAEEmbeddings |
python | numpy__numpy | numpy/polynomial/tests/test_hermite_e.py | {
"start": 16726,
"end": 17324
} | class ____:
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2 * np.pi)
assert_almost_equal(w.sum(), tgt)
| TestGauss |
python | joke2k__faker | faker/providers/lorem/th_TH/__init__.py | {
"start": 68,
"end": 7475
} | class ____(LoremProvider):
"""Implement lorem provider for ``th_TH`` locale.
Word list is randomly drawn from the Thailand's Ministry of Education,
removing compound words and long words, adding common words (like
prepositions) and few of regional words.
Sources:
- http://www.arts.chula.ac.th/~ling/TTC/id-4.html
- https://www.sanook.com/campus/1390689/
- https://www.sanook.com/campus/1397677/
- https://www.sanook.com/campus/1392241/
"""
word_connector = "" # Thai writing has no word divider
sentence_punctuation = " " # single space
word_list = (
"กตัญญู",
"กบ",
"กรดไหลย้อน",
"กรรมการ",
"กระจาย",
"กระถาง",
"กล",
"กล่อง",
"กล้า",
"กลาง",
"กลางคืน",
"กล่าว",
"กว้าง",
"กะเพรา",
"กะละมัง",
"กับ",
"ก้าง",
"กาม",
"การ",
"กำ",
"กำไร",
"กิ่งไม้",
"กิจกรรม",
"กิน",
"กิโลเมตร",
"กีฬา",
"กู",
"กูเกิล",
"เกม",
"เกาหลี",
"แก้ว",
"แกะ",
"แก",
"แก่",
"แก้",
"โก๋แก่",
"โกง",
"ขนม",
"ขนมชั้น",
"ของหวาน",
"ขัด",
"ขันน้ำ",
"ข้าง",
"ขาดเคิ่ง",
"ข้าว",
"ข้าวเจ้า",
"ข้าวหมูแดง",
"ขี่",
"ขี้ไคล",
"ขี้ดิน",
"ขุด",
"เขยิบ",
"เขยื้อน",
"เข้ารหัส",
"แข่งขัน",
"แข็ง",
"แข้ง",
"ไข่",
"คนไข้",
"คนตาย",
"คบ",
"คมนาคม",
"ครอง",
"ครู",
"คลาน",
"ควร",
"ความ",
"คอก",
"คอมมิวนิสต์",
"ค่อย",
"คะแนน",
"คั่ว",
"คาว",
"คำถาม",
"คำสั่ง",
"คู่",
"เคย",
"เครื่องบิน",
"เคเอฟซี",
"เคารพ",
"แคะ",
"โควิด",
"ไค้หัน",
"งม",
"ง่วง",
"เงา",
"โง่",
"จะไปพั่ง",
"จัด",
"จาก",
"จ๋า",
"เจ็บไข้",
"แจ่มใส",
"ใจ",
"ฉีด",
"เฉย",
"ชนิด",
"ชะนี",
"ช้า",
"ชาว",
"ชาวนา",
"ชิง",
"ชุดนอน",
"ชุมนุม",
"ชู",
"เช้า",
"เชื่อม",
"เชื้อโรค",
"เชื่อ",
"ไชโย",
"ซ่อน",
"ซ่อมเบิ่ง",
"ซอย",
"ซี่",
"แซง",
"ด้วย",
"ดอกไม้",
"ดอง",
"ดังนี้",
"ด้าย",
"ดาวเทียม",
"ดำ",
"ดี",
"ดึก",
"ดู",
"เดี่ยว",
"โดย",
"ได้แก่",
"ตกลง",
"ต้น",
"ตรวจ",
"ตลอด",
"ตอก",
"ตอใด",
"ต่อ",
"ตะแกรง",
"ตะปู",
"ตั้งแต่",
"ตับ",
"ตัวเมีย",
"ตัวอย่าง",
"ตำลึง",
"ติด",
"ตีน",
"ตื่น",
"ตู้",
"ตู่",
"เตา",
"เตียน",
"แต่ง",
"แตะ",
"แต่",
"โตย",
"โต",
"ไต้หวัน",
"ไต้",
"ถกเถียง",
"ถาง",
"ถีบ",
"ถึง",
"แถบ",
"ทด",
"ทดลอง",
"ทรัพย์สิน",
"ทวด",
"ทวิตเตอร์",
"ทหาร",
"ท้องฟ้า",
"ทอด",
"ทอดมัน",
"ทั่ว",
"ทาน",
"ทำสวน",
"ที่ดิน",
"ที่",
"ทุกข์",
"ทุ่ม",
"เทเลแกรม",
"แท็กซี่",
"แท็บลอยด์",
"ธนาคาร",
"ธาตุ",
"น้อง",
"นักเรียน",
"นั่ง",
"น้า",
"น้ำเย็น",
"น้ำหวาน",
"นิ่ม",
"นุ่น",
"เนื่องจาก",
"เนื้อ",
"โน่น",
"ใน",
"บริโภค",
"บริษัท",
"บอก",
"บอกใบ้",
"บัดนี้",
"บันได",
"บาด",
"บูชา",
"บูด",
"เบียร์",
"ใบไม้",
"ปกครอง",
"ประชาธิปไตย",
"ประพฤติ",
"ประสบการณ์",
"ปาก",
"ปิ่นโต",
"ปี",
"ปี่",
"ปู",
"เป็น",
"เปลือง",
"เป้า",
"แปรง",
"ผล",
"ผลัด",
"ผลิต",
"ผสม",
"ผ่อ",
"ผัก",
"ผิด",
"ผีก",
"ผู้ร้าย",
"เผื่อ",
"แผนที่",
"โผล่",
"ฝาก",
"พนมมือ",
"พยาธิ",
"พ่อ",
"พักผ่อน",
"พับ",
"พิการ",
"พิพักพิพ่วน",
"เพดาน",
"เพราะ",
"เพลง",
"เพียง",
"แพ้",
"ฟาก",
"เฟซบุ๊ก",
"มลายู",
"มอบ",
"มะเขือเทศ",
"มัสยิด",
"มิตร",
"เมตตา",
"เมล็ด",
"เมาะ",
"แมค",
"แม่มด",
"แมลง",
"แม่",
"แม้",
"ย่อ",
"ยัน",
"ยา",
"ย้ำ",
"ยีราฟ",
"ยึด",
"ยูทูบ",
"เย็น",
"เย็บ",
"เยอะ",
"เยาวชน",
"รถโดยสาร",
"รถถัง",
"รถทัวร์",
"รถบัส",
"ร่มรื่น",
"รสชาติ",
"ร้อน",
"รอ",
"ระเบียง",
"ระยำ",
"รังแก",
"รัฐบาล",
"รัฐประหาร",
"ราก",
"ร่างกาย",
"ร่าง",
"ริม",
"รู้จัก",
"เริ่ม",
"เรียง",
"เรื่อย",
"แรก",
"แรงงาน",
"โรงสี",
"ฤดู",
"ลงมือ",
"ล่อ",
"ลืมคาว",
"ลูกชิ้น",
"ลูกตา",
"ลูก",
"เล่ม",
"เลี้ยว",
"เลือก",
"แลก",
"และ",
"วัง",
"วัฒนธรรม",
"วาด",
"วิกิพีเดีย",
"วิ่ง",
"วิชาชีพ",
"วินโดวส์",
"ศาลากลาง",
"ศาสตร์",
"ศิษย์",
"เศรษฐกิจ",
"เศษอาหาร",
"เศษ",
"สดชื่น",
"สด",
"สถานี",
"สนอง",
"สบาย",
"สมอง",
"สมาคม",
"สม่ำเสมอ",
"สลับ",
"สหกรณ์",
"สหภาพ",
"สัญญา",
"สาธารณรัฐ",
"สารวัตร",
"สำนักงาน",
"สำหรับ",
"สีแดง",
"สีเทา",
"สี",
"สุขภาพ",
"สุดท้าย",
"เสรีนิยม",
"เสรีภาพ",
"เสียบ",
"แสง",
"หน้ากาก",
"หน้าต่าง",
"หน้าที่",
"หนุน",
"หนู",
"หมด",
"ห่มผ้า",
"หมอก",
"หม้อ",
"หมัด",
"หมี",
"หมุน",
"หยอก",
"หยัก",
"หรือ",
"หลง",
"หล่น",
"หลบ",
"หลังคา",
"ห่วงใย",
"หว่าน",
"ห่อข้าว",
"ห้องเรียน",
"หอย",
"ห้าง",
"หาบ",
"หาม้าย",
"หาย",
"หึงสา",
"หุ้ม",
"เหตุ",
"เห็น",
"แหย่",
"ใหม่",
"ไหน",
"องค์",
"อด",
"อธิษฐาน",
"อนุบาล",
"อบอุ่น",
"อวัยวะ",
"ออนซอนเด๊",
"อ่อนหวาน",
"อัศจรรย์",
"อายุ",
"อาสา",
"อาหาร",
"อิฐ",
"อินเทอร์เน็ต",
"อินสตาแกรม",
"อิสลาม",
"อุปโภค",
"เอสซีบี",
"เอิด",
"แอนดรอยด์",
"ไอศกรีม",
"ไอโอเอส",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 9981,
"end": 10365
} | class ____(BaseModel):
"""
Schema for writing the response part of a Human-in-the-loop detail for a specific task instance.
"""
ti_id: Annotated[UUID, Field(title="Ti Id")]
chosen_options: Annotated[list[str], Field(min_length=1, title="Chosen Options")]
params_input: Annotated[dict[str, Any] | None, Field(title="Params Input")] = None
| UpdateHITLDetailPayload |
python | spack__spack | lib/spack/spack/directives.py | {
"start": 33370,
"end": 33472
} | class ____(DirectiveError):
"""Raised for errors with patching dependencies."""
| DependencyPatchError |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 2564,
"end": 4896
} | class ____(Ticker):
''' Generate tick locations that are computed by a user-defined function.
A ``CustomJSTicker`` may be used with either a continuous (numeric) axis,
or a categorical axis. However, only basic, non-hierarchical categorical
axes (i.e. with a single level of factors) are supported.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the ticker's code snippet as the values of
named parameters to the callback.
""")
major_code = String(default="", help="""
Callback code to run in the browser to compute minor tick locations for the
current viewport.
The ``cb_data`` parameter that is available to the callback code will contain
four specific fields:
``start``
the computed start coordinate of the axis
``end``
the computed end of the axis
``range``
the Range model for this axis
``cross_loc``
the coordinate that this axis intersects the orthogonal axis
""")
minor_code = String(default="", help="""
Callback code to run in the browser to compute minor tick locations for the
current viewport.
.. note::
Minor ticks are not used for categorical axes. This property will be
ignored when the range is a ``FactorRange``.
The ``cb_data`` parameter that is available to the callback code will contain
five specific fields:
``major_ticks``
the list of the current computed major tick locations
``start``
the computed start coordinate of the axis
``end``
the computed end of the axis
``range``
the Range model for this axis
``cross_loc``
the coordinate that this axis intersects the orthogonal axis
""")
@abstract
| CustomJSTicker |
python | django__django | tests/model_forms/models.py | {
"start": 2126,
"end": 2190
} | class ____(Writer):
score = models.IntegerField()
| BetterWriter |
python | has2k1__plotnine | plotnine/geoms/geom_polygon.py | {
"start": 500,
"end": 4149
} | class ____(geom):
"""
Polygon, a filled path
{usage}
Parameters
----------
{common_parameters}
Notes
-----
All paths in the same `group` aesthetic value make up a polygon.
"""
DEFAULT_AES = {
"alpha": 1,
"color": None,
"fill": "#333333",
"linetype": "solid",
"size": 0.5,
}
DEFAULT_PARAMS = {
"stat": "identity",
"position": "identity",
"na_rm": False,
}
REQUIRED_AES = {"x", "y"}
legend_key_size = staticmethod(geom_path.legend_key_size)
def handle_na(self, data: pd.DataFrame) -> pd.DataFrame:
return data
def draw_panel(
self,
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
):
"""
Plot all groups
"""
self.draw_group(data, panel_params, coord, ax, self.params)
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
params: dict[str, Any],
):
from matplotlib.collections import PolyCollection
data = coord.transform(data, panel_params, munch=True)
data["linewidth"] = data["size"] * SIZE_FACTOR
# Each group is a polygon with a single facecolor
# with potentially an edgecolor for every edge.
verts = []
facecolor = []
edgecolor = []
linestyle = []
linewidth = []
# Some stats may order the data in ways that prevent
# objects from occluding other objects. We do not want
# to undo that order.
grouper = data.groupby("group", sort=False)
for group, df in grouper:
fill = to_rgba(df["fill"].iloc[0], df["alpha"].iloc[0])
verts.append(tuple(zip(df["x"], df["y"])))
facecolor.append("none" if fill is None else fill)
edgecolor.append(df["color"].iloc[0] or "none")
linestyle.append(df["linetype"].iloc[0])
linewidth.append(df["linewidth"].iloc[0])
col = PolyCollection(
verts,
facecolors=facecolor,
edgecolors=edgecolor,
linestyles=linestyle,
linewidths=linewidth,
zorder=params["zorder"],
rasterized=params["raster"],
)
ax.add_collection(col)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: layer
) -> DrawingArea:
"""
Draw a rectangle in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.patches import Rectangle
# We take into account that the linewidth
# bestrides the boundary of the rectangle
linewidth = data["size"] * SIZE_FACTOR
linewidth = np.min([linewidth, da.width / 4, da.height / 4])
if data["color"] is None:
linewidth = 0
facecolor = to_rgba(data["fill"], data["alpha"])
if facecolor is None:
facecolor = "none"
rect = Rectangle(
(0 + linewidth / 2, 0 + linewidth / 2),
width=da.width - linewidth,
height=da.height - linewidth,
linewidth=linewidth,
linestyle=data["linetype"],
facecolor=facecolor,
edgecolor=data["color"],
capstyle="projecting",
)
da.add_artist(rect)
return da
| geom_polygon |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/operators/sql.py | {
"start": 16266,
"end": 26689
} | class ____(BaseSQLOperator):
"""
Performs one or more of the templated checks in the column_checks dictionary.
Checks are performed on a per-column basis specified by the column_mapping.
Each check can take one or more of the following options:
* ``equal_to``: an exact value to equal, cannot be used with other comparison options
* ``greater_than``: value that result should be strictly greater than
* ``less_than``: value that results should be strictly less than
* ``geq_to``: value that results should be greater than or equal to
* ``leq_to``: value that results should be less than or equal to
* ``tolerance``: the percentage that the result may be off from the expected value
* ``partition_clause``: an extra clause passed into a WHERE statement to partition data
:param table: the table to run checks on
:param column_mapping: the dictionary of columns and their associated checks, e.g.
.. code-block:: python
{
"col_name": {
"null_check": {
"equal_to": 0,
"partition_clause": "foreign_key IS NOT NULL",
},
"min": {
"greater_than": 5,
"leq_to": 10,
"tolerance": 0.2,
},
"max": {"less_than": 1000, "geq_to": 10, "tolerance": 0.01},
}
}
:param partition_clause: a partial SQL statement that is added to a WHERE clause in the query built by
the operator that creates partition_clauses for the checks to run on, e.g.
.. code-block:: python
"date = '1970-01-01'"
:param conn_id: the connection ID used to connect to the database
:param database: name of database which overwrite the defined one in connection
:param accept_none: whether or not to accept None values returned by the query. If true, converts None
to 0.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SQLColumnCheckOperator`
"""
template_fields: Sequence[str] = ("table", "partition_clause", "sql", *BaseSQLOperator.template_fields)
template_fields_renderers: ClassVar[dict] = {"sql": "sql"}
sql_check_template = """
SELECT '{column}' AS col_name, '{check}' AS check_type, {column}_{check} AS check_result
FROM (SELECT {check_statement} AS {column}_{check} FROM {table} {partition_clause}) AS sq
"""
column_checks = {
"null_check": "SUM(CASE WHEN {column} IS NULL THEN 1 ELSE 0 END)",
"distinct_check": "COUNT(DISTINCT({column}))",
"unique_check": "COUNT({column}) - COUNT(DISTINCT({column}))",
"min": "MIN({column})",
"max": "MAX({column})",
}
def __init__(
self,
*,
table: str,
column_mapping: dict[str, dict[str, Any]],
partition_clause: str | None = None,
conn_id: str | None = None,
database: str | None = None,
accept_none: bool = True,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.table = table
self.column_mapping = column_mapping
self.partition_clause = _initialize_partition_clause(partition_clause)
self.accept_none = accept_none
def _build_checks_sql():
for column, checks in self.column_mapping.items():
for check, check_values in checks.items():
self._column_mapping_validation(check, check_values)
yield self._generate_sql_query(column, checks)
checks_sql = "UNION ALL".join(_build_checks_sql())
self.sql = f"SELECT col_name, check_type, check_result FROM ({checks_sql}) AS check_columns"
def execute(self, context: Context):
hook = self.get_db_hook()
records = hook.get_records(self.sql)
if not records:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
self.log.info("Record: %s", records)
for column, check, result in records:
tolerance = self.column_mapping[column][check].get("tolerance")
self.column_mapping[column][check]["result"] = result
self.column_mapping[column][check]["success"] = self._get_match(
self.column_mapping[column][check], result, tolerance
)
failed_tests = [
f"Column: {col}\n\tCheck: {check},\n\tCheck Values: {check_values}\n"
for col, checks in self.column_mapping.items()
for check, check_values in checks.items()
if not check_values["success"]
]
if failed_tests:
exception_string = (
f"Test failed.\nResults:\n{records!s}\n"
f"The following tests have failed:\n{''.join(failed_tests)}"
)
self._raise_exception(exception_string)
self.log.info("All tests have passed")
def _generate_sql_query(self, column, checks):
def _generate_partition_clause(check):
if self.partition_clause and "partition_clause" not in checks[check]:
return f"WHERE {self.partition_clause}"
if not self.partition_clause and "partition_clause" in checks[check]:
return f"WHERE {checks[check]['partition_clause']}"
if self.partition_clause and "partition_clause" in checks[check]:
return f"WHERE {self.partition_clause} AND {checks[check]['partition_clause']}"
return ""
checks_sql = "UNION ALL".join(
self.sql_check_template.format(
check_statement=self.column_checks[check].format(column=column),
check=check,
table=self.table,
column=column,
partition_clause=_generate_partition_clause(check),
)
for check in checks
)
return checks_sql
def _get_match(self, check_values, record, tolerance=None) -> bool:
if record is None and self.accept_none:
record = 0
match_boolean = True
if "geq_to" in check_values:
if tolerance is not None:
match_boolean = record >= check_values["geq_to"] * (1 - tolerance)
else:
match_boolean = record >= check_values["geq_to"]
elif "greater_than" in check_values:
if tolerance is not None:
match_boolean = record > check_values["greater_than"] * (1 - tolerance)
else:
match_boolean = record > check_values["greater_than"]
if "leq_to" in check_values:
if tolerance is not None:
match_boolean = record <= check_values["leq_to"] * (1 + tolerance) and match_boolean
else:
match_boolean = record <= check_values["leq_to"] and match_boolean
elif "less_than" in check_values:
if tolerance is not None:
match_boolean = record < check_values["less_than"] * (1 + tolerance) and match_boolean
else:
match_boolean = record < check_values["less_than"] and match_boolean
if "equal_to" in check_values:
if tolerance is not None:
match_boolean = (
check_values["equal_to"] * (1 - tolerance)
<= record
<= check_values["equal_to"] * (1 + tolerance)
) and match_boolean
else:
match_boolean = record == check_values["equal_to"] and match_boolean
return match_boolean
def _column_mapping_validation(self, check, check_values):
if check not in self.column_checks:
raise AirflowException(f"Invalid column check: {check}.")
if (
"greater_than" not in check_values
and "geq_to" not in check_values
and "less_than" not in check_values
and "leq_to" not in check_values
and "equal_to" not in check_values
):
raise ValueError(
"Please provide one or more of: less_than, leq_to, "
"greater_than, geq_to, or equal_to in the check's dict."
)
if "greater_than" in check_values and "less_than" in check_values:
if check_values["greater_than"] >= check_values["less_than"]:
raise ValueError(
"greater_than should be strictly less than "
"less_than. Use geq_to or leq_to for "
"overlapping equality."
)
if "greater_than" in check_values and "leq_to" in check_values:
if check_values["greater_than"] >= check_values["leq_to"]:
raise ValueError(
"greater_than must be strictly less than leq_to. "
"Use geq_to with leq_to for overlapping equality."
)
if "geq_to" in check_values and "less_than" in check_values:
if check_values["geq_to"] >= check_values["less_than"]:
raise ValueError(
"geq_to should be strictly less than less_than. "
"Use leq_to with geq_to for overlapping equality."
)
if "geq_to" in check_values and "leq_to" in check_values:
if check_values["geq_to"] > check_values["leq_to"]:
raise ValueError("geq_to should be less than or equal to leq_to.")
if "greater_than" in check_values and "geq_to" in check_values:
raise ValueError("Only supply one of greater_than or geq_to.")
if "less_than" in check_values and "leq_to" in check_values:
raise ValueError("Only supply one of less_than or leq_to.")
if (
"greater_than" in check_values
or "geq_to" in check_values
or "less_than" in check_values
or "leq_to" in check_values
) and "equal_to" in check_values:
raise ValueError(
"equal_to cannot be passed with a greater or less than "
"function. To specify 'greater than or equal to' or "
"'less than or equal to', use geq_to or leq_to."
)
| SQLColumnCheckOperator |
python | jazzband__django-redis | tests/test_backend.py | {
"start": 952,
"end": 35733
} | class ____:
def test_setnx(self, cache: RedisCache):
# we should ensure there is no test_key_nx in redis
cache.delete("test_key_nx")
res = cache.get("test_key_nx")
assert res is None
res = cache.set("test_key_nx", 1, nx=True)
assert bool(res) is True
# test that second set will have
res = cache.set("test_key_nx", 2, nx=True)
assert res is False
res = cache.get("test_key_nx")
assert res == 1
cache.delete("test_key_nx")
res = cache.get("test_key_nx")
assert res is None
def test_setnx_timeout(self, cache: RedisCache):
# test that timeout still works for nx=True
res = cache.set("test_key_nx", 1, timeout=2, nx=True)
assert res is True
time.sleep(3)
res = cache.get("test_key_nx")
assert res is None
# test that timeout will not affect key, if it was there
cache.set("test_key_nx", 1)
res = cache.set("test_key_nx", 2, timeout=2, nx=True)
assert res is False
time.sleep(3)
res = cache.get("test_key_nx")
assert res == 1
cache.delete("test_key_nx")
res = cache.get("test_key_nx")
assert res is None
def test_unicode_keys(self, cache: RedisCache):
cache.set("ключ", "value")
res = cache.get("ключ")
assert res == "value"
def test_save_and_integer(self, cache: RedisCache):
cache.set("test_key", 2)
res = cache.get("test_key", "Foo")
assert isinstance(res, int)
assert res == 2
def test_save_string(self, cache: RedisCache):
cache.set("test_key", "hello" * 1000)
res = cache.get("test_key")
assert isinstance(res, str)
assert res == "hello" * 1000
cache.set("test_key", "2")
res = cache.get("test_key")
assert isinstance(res, str)
assert res == "2"
def test_save_unicode(self, cache: RedisCache):
cache.set("test_key", "heló")
res = cache.get("test_key")
assert isinstance(res, str)
assert res == "heló"
def test_save_dict(self, cache: RedisCache):
if isinstance(cache.client._serializer, (JSONSerializer, MSGPackSerializer)):
# JSONSerializer and MSGPackSerializer use the isoformat for
# datetimes.
now_dt: Union[str, datetime.datetime] = datetime.datetime.now().isoformat()
else:
now_dt = datetime.datetime.now()
test_dict = {"id": 1, "date": now_dt, "name": "Foo"}
cache.set("test_key", test_dict)
res = cache.get("test_key")
assert isinstance(res, dict)
assert res["id"] == 1
assert res["name"] == "Foo"
assert res["date"] == now_dt
def test_save_float(self, cache: RedisCache):
float_val = 1.345620002
cache.set("test_key", float_val)
res = cache.get("test_key")
assert isinstance(res, float)
assert res == float_val
def test_timeout(self, cache: RedisCache):
cache.set("test_key", 222, timeout=3)
time.sleep(4)
res = cache.get("test_key")
assert res is None
def test_timeout_0(self, cache: RedisCache):
cache.set("test_key", 222, timeout=0)
res = cache.get("test_key")
assert res is None
def test_timeout_parameter_as_positional_argument(self, cache: RedisCache):
cache.set("test_key", 222, -1)
res = cache.get("test_key")
assert res is None
cache.set("test_key", 222, 1)
res1 = cache.get("test_key")
time.sleep(2)
res2 = cache.get("test_key")
assert res1 == 222
assert res2 is None
# nx=True should not overwrite expire of key already in db
cache.set("test_key", 222, None)
cache.set("test_key", 222, -1, nx=True)
res = cache.get("test_key")
assert res == 222
def test_timeout_negative(self, cache: RedisCache):
cache.set("test_key", 222, timeout=-1)
res = cache.get("test_key")
assert res is None
cache.set("test_key", 222, timeout=None)
cache.set("test_key", 222, timeout=-1)
res = cache.get("test_key")
assert res is None
# nx=True should not overwrite expire of key already in db
cache.set("test_key", 222, timeout=None)
cache.set("test_key", 222, timeout=-1, nx=True)
res = cache.get("test_key")
assert res == 222
def test_timeout_tiny(self, cache: RedisCache):
cache.set("test_key", 222, timeout=0.00001)
res = cache.get("test_key")
assert res in (None, 222)
def test_set_add(self, cache: RedisCache):
cache.set("add_key", "Initial value")
res = cache.add("add_key", "New value")
assert res is False
res = cache.get("add_key")
assert res == "Initial value"
res = cache.add("other_key", "New value")
assert res is True
def test_get_many(self, cache: RedisCache):
cache.set("a", 1)
cache.set("b", 2)
cache.set("c", 3)
res = cache.get_many(["a", "b", "c"])
assert res == {"a": 1, "b": 2, "c": 3}
def test_get_many_unicode(self, cache: RedisCache):
cache.set("a", "1")
cache.set("b", "2")
cache.set("c", "3")
res = cache.get_many(["a", "b", "c"])
assert res == {"a": "1", "b": "2", "c": "3"}
def test_set_many(self, cache: RedisCache):
cache.set_many({"a": 1, "b": 2, "c": 3})
res = cache.get_many(["a", "b", "c"])
assert res == {"a": 1, "b": 2, "c": 3}
def test_set_call_empty_pipeline(
self,
cache: RedisCache,
mocker: MockerFixture,
settings: SettingsWrapper,
):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
pipeline = cache.client.get_client(write=True).pipeline()
key = "key"
value = "value"
mocked_set = mocker.patch.object(pipeline, "set")
cache.set(key, value, client=pipeline)
if isinstance(cache.client, herd.HerdClient):
default_timeout = cache.client._backend.default_timeout
herd_timeout = (default_timeout + settings.CACHE_HERD_TIMEOUT) * 1000
herd_pack_value = cache.client._pack(value, default_timeout)
mocked_set.assert_called_once_with(
cache.client.make_key(key, version=None),
cache.client.encode(herd_pack_value),
nx=False,
px=herd_timeout,
xx=False,
)
else:
mocked_set.assert_called_once_with(
cache.client.make_key(key, version=None),
cache.client.encode(value),
nx=False,
px=cache.client._backend.default_timeout * 1000,
xx=False,
)
def test_delete(self, cache: RedisCache):
cache.set_many({"a": 1, "b": 2, "c": 3})
res = cache.delete("a")
assert bool(res) is True
res = cache.get_many(["a", "b", "c"])
assert res == {"b": 2, "c": 3}
res = cache.delete("a")
assert bool(res) is False
@patch("django_redis.cache.DJANGO_VERSION", (3, 1, 0, "final", 0))
def test_delete_return_value_type_new31(self, cache: RedisCache):
"""delete() returns a boolean instead of int since django version 3.1"""
cache.set("a", 1)
res = cache.delete("a")
assert isinstance(res, bool)
assert res is True
res = cache.delete("b")
assert isinstance(res, bool)
assert res is False
@patch("django_redis.cache.DJANGO_VERSION", new=(3, 0, 1, "final", 0))
def test_delete_return_value_type_before31(self, cache: RedisCache):
"""delete() returns a int before django version 3.1"""
cache.set("a", 1)
res = cache.delete("a")
assert isinstance(res, int)
assert res == 1
res = cache.delete("b")
assert isinstance(res, int)
assert res == 0
def test_delete_many(self, cache: RedisCache):
cache.set_many({"a": 1, "b": 2, "c": 3})
res = cache.delete_many(["a", "b"])
assert bool(res) is True
res = cache.get_many(["a", "b", "c"])
assert res == {"c": 3}
res = cache.delete_many(["a", "b"])
assert bool(res) is False
def test_delete_many_generator(self, cache: RedisCache):
cache.set_many({"a": 1, "b": 2, "c": 3})
res = cache.delete_many(key for key in ["a", "b"])
assert bool(res) is True
res = cache.get_many(["a", "b", "c"])
assert res == {"c": 3}
res = cache.delete_many(["a", "b"])
assert bool(res) is False
def test_delete_many_empty_generator(self, cache: RedisCache):
res = cache.delete_many(key for key in cast("list[str]", []))
assert bool(res) is False
def test_incr(self, cache: RedisCache):
if isinstance(cache.client, herd.HerdClient):
pytest.skip("HerdClient doesn't support incr")
cache.set("num", 1)
cache.incr("num")
res = cache.get("num")
assert res == 2
cache.incr("num", 10)
res = cache.get("num")
assert res == 12
# max 64 bit signed int
cache.set("num", 9223372036854775807)
cache.incr("num")
res = cache.get("num")
assert res == 9223372036854775808
cache.incr("num", 2)
res = cache.get("num")
assert res == 9223372036854775810
cache.set("num", 3)
cache.incr("num", 2)
res = cache.get("num")
assert res == 5
def test_incr_no_timeout(self, cache: RedisCache):
if isinstance(cache.client, herd.HerdClient):
pytest.skip("HerdClient doesn't support incr")
cache.set("num", 1, timeout=None)
cache.incr("num")
res = cache.get("num")
assert res == 2
cache.incr("num", 10)
res = cache.get("num")
assert res == 12
# max 64 bit signed int
cache.set("num", 9223372036854775807, timeout=None)
cache.incr("num")
res = cache.get("num")
assert res == 9223372036854775808
cache.incr("num", 2)
res = cache.get("num")
assert res == 9223372036854775810
cache.set("num", 3, timeout=None)
cache.incr("num", 2)
res = cache.get("num")
assert res == 5
def test_incr_error(self, cache: RedisCache):
if isinstance(cache.client, herd.HerdClient):
pytest.skip("HerdClient doesn't support incr")
with pytest.raises(ValueError):
# key does not exist
cache.incr("numnum")
def test_incr_ignore_check(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support argument ignore_key_check to incr")
if isinstance(cache.client, herd.HerdClient):
pytest.skip("HerdClient doesn't support incr")
# key exists check will be skipped and the value will be incremented by
# '1' which is the default delta
cache.incr("num", ignore_key_check=True)
res = cache.get("num")
assert res == 1
cache.delete("num")
# since key doesnt exist it is set to the delta value, 10 in this case
cache.incr("num", 10, ignore_key_check=True)
res = cache.get("num")
assert res == 10
cache.delete("num")
# following are just regression checks to make sure it still works as
# expected with incr max 64 bit signed int
cache.set("num", 9223372036854775807)
cache.incr("num", ignore_key_check=True)
res = cache.get("num")
assert res == 9223372036854775808
cache.incr("num", 2, ignore_key_check=True)
res = cache.get("num")
assert res == 9223372036854775810
cache.set("num", 3)
cache.incr("num", 2, ignore_key_check=True)
res = cache.get("num")
assert res == 5
def test_get_set_bool(self, cache: RedisCache):
cache.set("bool", True)
res = cache.get("bool")
assert isinstance(res, bool)
assert res is True
cache.set("bool", False)
res = cache.get("bool")
assert isinstance(res, bool)
assert res is False
def test_decr(self, cache: RedisCache):
if isinstance(cache.client, herd.HerdClient):
pytest.skip("HerdClient doesn't support decr")
cache.set("num", 20)
cache.decr("num")
res = cache.get("num")
assert res == 19
cache.decr("num", 20)
res = cache.get("num")
assert res == -1
cache.decr("num", 2)
res = cache.get("num")
assert res == -3
cache.set("num", 20)
cache.decr("num")
res = cache.get("num")
assert res == 19
# max 64 bit signed int + 1
cache.set("num", 9223372036854775808)
cache.decr("num")
res = cache.get("num")
assert res == 9223372036854775807
cache.decr("num", 2)
res = cache.get("num")
assert res == 9223372036854775805
def test_version(self, cache: RedisCache):
cache.set("keytest", 2, version=2)
res = cache.get("keytest")
assert res is None
res = cache.get("keytest", version=2)
assert res == 2
def test_incr_version(self, cache: RedisCache):
cache.set("keytest", 2)
cache.incr_version("keytest")
res = cache.get("keytest")
assert res is None
res = cache.get("keytest", version=2)
assert res == 2
def test_ttl_incr_version_no_timeout(self, cache: RedisCache):
cache.set("my_key", "hello world!", timeout=None)
cache.incr_version("my_key")
my_value = cache.get("my_key", version=2)
assert my_value == "hello world!"
def test_delete_pattern(self, cache: RedisCache):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
cache.set(key, "foo")
res = cache.delete_pattern("*foo-a*")
assert bool(res) is True
keys = cache.keys("foo*")
assert set(keys) == {"foo-bb", "foo-bc"}
res = cache.delete_pattern("*foo-a*")
assert bool(res) is False
@patch("django_redis.cache.RedisCache.client")
def test_delete_pattern_with_custom_count(self, client_mock, cache: RedisCache):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
cache.set(key, "foo")
cache.delete_pattern("*foo-a*", itersize=2)
client_mock.delete_pattern.assert_called_once_with("*foo-a*", itersize=2)
@patch("django_redis.cache.RedisCache.client")
def test_delete_pattern_with_settings_default_scan_count(
self,
client_mock,
patch_itersize_setting,
cache: RedisCache,
settings: SettingsWrapper,
):
for key in ["foo-aa", "foo-ab", "foo-bb", "foo-bc"]:
cache.set(key, "foo")
expected_count = settings.DJANGO_REDIS_SCAN_ITERSIZE
cache.delete_pattern("*foo-a*")
client_mock.delete_pattern.assert_called_once_with(
"*foo-a*",
itersize=expected_count,
)
def test_close(self, cache: RedisCache, settings: SettingsWrapper):
settings.DJANGO_REDIS_CLOSE_CONNECTION = True
cache.set("f", "1")
cache.close()
def test_close_client(self, cache: RedisCache, mocker: MockerFixture):
mock = mocker.patch.object(cache.client, "close")
cache.close()
assert mock.called
def test_ttl(self, cache: RedisCache):
cache.set("foo", "bar", 10)
ttl = cache.ttl("foo")
if isinstance(cache.client, herd.HerdClient):
assert pytest.approx(ttl) == 12
else:
assert pytest.approx(ttl) == 10
# Test ttl None
cache.set("foo", "foo", timeout=None)
ttl = cache.ttl("foo")
assert ttl is None
# Test ttl with expired key
cache.set("foo", "foo", timeout=-1)
ttl = cache.ttl("foo")
assert ttl == 0
# Test ttl with not existent key
ttl = cache.ttl("not-existent-key")
assert ttl == 0
def test_pttl(self, cache: RedisCache):
# Test pttl
cache.set("foo", "bar", 10)
ttl = cache.pttl("foo")
# delta is set to 10 as precision error causes tests to fail
if isinstance(cache.client, herd.HerdClient):
assert pytest.approx(ttl, 10) == 12000
else:
assert pytest.approx(ttl, 10) == 10000
# Test pttl with float value
cache.set("foo", "bar", 5.5)
ttl = cache.pttl("foo")
if isinstance(cache.client, herd.HerdClient):
assert pytest.approx(ttl, 10) == 7500
else:
assert pytest.approx(ttl, 10) == 5500
# Test pttl None
cache.set("foo", "foo", timeout=None)
ttl = cache.pttl("foo")
assert ttl is None
# Test pttl with expired key
cache.set("foo", "foo", timeout=-1)
ttl = cache.pttl("foo")
assert ttl == 0
# Test pttl with not existent key
ttl = cache.pttl("not-existent-key")
assert ttl == 0
def test_persist(self, cache: RedisCache):
cache.set("foo", "bar", timeout=20)
assert cache.persist("foo") is True
ttl = cache.ttl("foo")
assert ttl is None
assert cache.persist("not-existent-key") is False
def test_expire(self, cache: RedisCache):
cache.set("foo", "bar", timeout=None)
assert cache.expire("foo", 20) is True
ttl = cache.ttl("foo")
assert pytest.approx(ttl) == 20
assert cache.expire("not-existent-key", 20) is False
def test_expire_with_default_timeout(self, cache: RedisCache):
cache.set("foo", "bar", timeout=None)
assert cache.expire("foo", DEFAULT_TIMEOUT) is True
assert cache.expire("not-existent-key", DEFAULT_TIMEOUT) is False
def test_pexpire(self, cache: RedisCache):
cache.set("foo", "bar", timeout=None)
assert cache.pexpire("foo", 20500) is True
ttl = cache.pttl("foo")
# delta is set to 10 as precision error causes tests to fail
assert pytest.approx(ttl, 10) == 20500
assert cache.pexpire("not-existent-key", 20500) is False
def test_pexpire_with_default_timeout(self, cache: RedisCache):
cache.set("foo", "bar", timeout=None)
assert cache.pexpire("foo", DEFAULT_TIMEOUT) is True
assert cache.pexpire("not-existent-key", DEFAULT_TIMEOUT) is False
def test_pexpire_at(self, cache: RedisCache):
# Test settings expiration time 1 hour ahead by datetime.
cache.set("foo", "bar", timeout=None)
expiration_time = datetime.datetime.now() + timedelta(hours=1)
assert cache.pexpire_at("foo", expiration_time) is True
ttl = cache.pttl("foo")
assert pytest.approx(ttl, 10) == timedelta(hours=1).total_seconds()
# Test settings expiration time 1 hour ahead by Unix timestamp.
cache.set("foo", "bar", timeout=None)
expiration_time = datetime.datetime.now() + timedelta(hours=2)
assert cache.pexpire_at("foo", int(expiration_time.timestamp() * 1000)) is True
ttl = cache.pttl("foo")
assert pytest.approx(ttl, 10) == timedelta(hours=2).total_seconds() * 1000
# Test settings expiration time 1 hour in past, which effectively
# deletes the key.
expiration_time = datetime.datetime.now() - timedelta(hours=2)
assert cache.pexpire_at("foo", expiration_time) is True
value = cache.get("foo")
assert value is None
expiration_time = datetime.datetime.now() + timedelta(hours=2)
assert cache.pexpire_at("not-existent-key", expiration_time) is False
def test_expire_at(self, cache: RedisCache):
# Test settings expiration time 1 hour ahead by datetime.
cache.set("foo", "bar", timeout=None)
expiration_time = datetime.datetime.now() + timedelta(hours=1)
assert cache.expire_at("foo", expiration_time) is True
ttl = cache.ttl("foo")
assert pytest.approx(ttl, 1) == timedelta(hours=1).total_seconds()
# Test settings expiration time 1 hour ahead by Unix timestamp.
cache.set("foo", "bar", timeout=None)
expiration_time = datetime.datetime.now() + timedelta(hours=2)
assert cache.expire_at("foo", int(expiration_time.timestamp())) is True
ttl = cache.ttl("foo")
assert pytest.approx(ttl, 1) == timedelta(hours=1).total_seconds() * 2
# Test settings expiration time 1 hour in past, which effectively
# deletes the key.
expiration_time = datetime.datetime.now() - timedelta(hours=2)
assert cache.expire_at("foo", expiration_time) is True
value = cache.get("foo")
assert value is None
expiration_time = datetime.datetime.now() + timedelta(hours=2)
assert cache.expire_at("not-existent-key", expiration_time) is False
def test_lock(self, cache: RedisCache):
lock = cache.lock("foobar")
assert lock.acquire(blocking=True)
assert cache.has_key("foobar")
lock.release()
assert not cache.has_key("foobar")
def test_lock_not_blocking(self, cache: RedisCache):
lock = cache.lock("foobar")
assert lock.acquire(blocking=False)
lock2 = cache.lock("foobar")
assert not lock2.acquire(blocking=False)
assert cache.has_key("foobar")
lock.release()
assert not cache.has_key("foobar")
def test_lock_released_by_thread(self, cache: RedisCache):
lock = cache.lock("foobar", thread_local=False)
assert lock.acquire(blocking=True)
def release_lock(lock_):
lock_.release()
t = threading.Thread(target=release_lock, args=[lock])
t.start()
t.join()
assert not cache.has_key("foobar")
def test_iter_keys(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support iter_keys")
cache.set("foo1", 1)
cache.set("foo2", 1)
cache.set("foo3", 1)
# Test simple result
result = set(cache.iter_keys("foo*"))
assert result == {"foo1", "foo2", "foo3"}
def test_iter_keys_itersize(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support iter_keys")
cache.set("foo1", 1)
cache.set("foo2", 1)
cache.set("foo3", 1)
# Test limited result
result = list(cache.iter_keys("foo*", itersize=2))
assert len(result) == 3
def test_iter_keys_generator(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support iter_keys")
cache.set("foo1", 1)
cache.set("foo2", 1)
cache.set("foo3", 1)
# Test generator object
result = cache.iter_keys("foo*")
next_value = next(result)
assert next_value is not None
def test_primary_replica_switching(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache = cast("RedisCache", caches["sample"])
client = cache.client
client._server = ["foo", "bar"]
client._clients = ["Foo", "Bar"]
assert client.get_client(write=True) == "Foo"
assert client.get_client(write=False) == "Bar"
def test_primary_replica_switching_with_index(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache = cast("RedisCache", caches["sample"])
client = cache.client
client._server = ["foo", "bar"]
client._clients = ["Foo", "Bar"]
assert client.get_client_with_index(write=True) == ("Foo", 0)
assert client.get_client_with_index(write=False) == ("Bar", 1)
def test_touch_zero_timeout(self, cache: RedisCache):
cache.set("test_key", 222, timeout=10)
assert cache.touch("test_key", 0) is True
res = cache.get("test_key")
assert res is None
def test_touch_positive_timeout(self, cache: RedisCache):
cache.set("test_key", 222, timeout=10)
assert cache.touch("test_key", 2) is True
assert cache.get("test_key") == 222
time.sleep(3)
assert cache.get("test_key") is None
def test_touch_negative_timeout(self, cache: RedisCache):
cache.set("test_key", 222, timeout=10)
assert cache.touch("test_key", -1) is True
res = cache.get("test_key")
assert res is None
def test_touch_missed_key(self, cache: RedisCache):
assert cache.touch("test_key_does_not_exist", 1) is False
def test_touch_forever(self, cache: RedisCache):
cache.set("test_key", "foo", timeout=1)
result = cache.touch("test_key", None)
assert result is True
assert cache.ttl("test_key") is None
time.sleep(2)
assert cache.get("test_key") == "foo"
def test_touch_forever_nonexistent(self, cache: RedisCache):
result = cache.touch("test_key_does_not_exist", None)
assert result is False
def test_touch_default_timeout(self, cache: RedisCache):
cache.set("test_key", "foo", timeout=1)
result = cache.touch("test_key")
assert result is True
time.sleep(2)
assert cache.get("test_key") == "foo"
def test_clear(self, cache: RedisCache):
cache.set("foo", "bar")
value_from_cache = cache.get("foo")
assert value_from_cache == "bar"
cache.clear()
value_from_cache_after_clear = cache.get("foo")
assert value_from_cache_after_clear is None
def test_hset(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.hset("foo_hash1", "foo1", "bar1")
cache.hset("foo_hash1", "foo2", "bar2")
assert cache.hlen("foo_hash1") == 2
assert cache.hexists("foo_hash1", "foo1")
assert cache.hexists("foo_hash1", "foo2")
def test_hdel(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.hset("foo_hash2", "foo1", "bar1")
cache.hset("foo_hash2", "foo2", "bar2")
assert cache.hlen("foo_hash2") == 2
deleted_count = cache.hdel("foo_hash2", "foo1")
assert deleted_count == 1
assert cache.hlen("foo_hash2") == 1
assert not cache.hexists("foo_hash2", "foo1")
assert cache.hexists("foo_hash2", "foo2")
def test_hlen(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
assert cache.hlen("foo_hash3") == 0
cache.hset("foo_hash3", "foo1", "bar1")
assert cache.hlen("foo_hash3") == 1
cache.hset("foo_hash3", "foo2", "bar2")
assert cache.hlen("foo_hash3") == 2
def test_hkeys(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.hset("foo_hash4", "foo1", "bar1")
cache.hset("foo_hash4", "foo2", "bar2")
cache.hset("foo_hash4", "foo3", "bar3")
keys = cache.hkeys("foo_hash4")
assert len(keys) == 3
for i in range(len(keys)):
assert keys[i] == f"foo{i + 1}"
def test_hexists(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.hset("foo_hash5", "foo1", "bar1")
assert cache.hexists("foo_hash5", "foo1")
assert not cache.hexists("foo_hash5", "foo")
def test_sadd(self, cache: RedisCache):
assert cache.sadd("foo", "bar") == 1
assert cache.smembers("foo") == {"bar"}
def test_scard(self, cache: RedisCache):
cache.sadd("foo", "bar", "bar2")
assert cache.scard("foo") == 2
def test_sdiff(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.sdiff("foo1", "foo2") == {"bar1"}
def test_sdiffstore(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.sdiffstore("foo3", "foo1", "foo2") == 1
assert cache.smembers("foo3") == {"bar1"}
def test_sdiffstore_with_keys_version(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2", version=2)
cache.sadd("foo2", "bar2", "bar3", version=2)
assert cache.sdiffstore("foo3", "foo1", "foo2", version_keys=2) == 1
assert cache.smembers("foo3") == {"bar1"}
def test_sdiffstore_with_different_keys_versions_without_initial_set_in_version(
self,
cache: RedisCache,
):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2", version=1)
cache.sadd("foo2", "bar2", "bar3", version=2)
assert cache.sdiffstore("foo3", "foo1", "foo2", version_keys=2) == 0
def test_sdiffstore_with_different_keys_versions_with_initial_set_in_version(
self,
cache: RedisCache,
):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2", version=2)
cache.sadd("foo2", "bar2", "bar3", version=1)
assert cache.sdiffstore("foo3", "foo1", "foo2", version_keys=2) == 2
def test_sinter(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.sinter("foo1", "foo2") == {"bar2"}
def test_interstore(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.sinterstore("foo3", "foo1", "foo2") == 1
assert cache.smembers("foo3") == {"bar2"}
def test_sismember(self, cache: RedisCache):
cache.sadd("foo", "bar")
assert cache.sismember("foo", "bar") is True
assert cache.sismember("foo", "bar2") is False
def test_smove(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.smove("foo1", "foo2", "bar1") is True
assert cache.smove("foo1", "foo2", "bar4") is False
assert cache.smembers("foo1") == {"bar2"}
assert cache.smembers("foo2") == {"bar1", "bar2", "bar3"}
def test_spop_default_count(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
assert cache.spop("foo") in {"bar1", "bar2"}
assert cache.smembers("foo") in [{"bar1"}, {"bar2"}]
def test_spop(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
assert cache.spop("foo", 1) in [{"bar1"}, {"bar2"}]
assert cache.smembers("foo") in [{"bar1"}, {"bar2"}]
def test_srandmember_default_count(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
assert cache.srandmember("foo") in {"bar1", "bar2"}
def test_srandmember(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
assert cache.srandmember("foo", 1) in [["bar1"], ["bar2"]]
def test_srem(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
assert cache.srem("foo", "bar1") == 1
assert cache.srem("foo", "bar3") == 0
def test_sscan(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
items = cache.sscan("foo")
assert items == {"bar1", "bar2"}
def test_sscan_with_match(self, cache: RedisCache):
if cache.client._has_compression_enabled():
pytest.skip("Compression is enabled, sscan with match is not supported")
cache.sadd("foo", "bar1", "bar2", "zoo")
items = cache.sscan("foo", match="zoo")
assert items == {"zoo"}
def test_sscan_iter(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2")
items = cache.sscan_iter("foo")
assert set(items) == {"bar1", "bar2"}
def test_sscan_iter_with_match(self, cache: RedisCache):
if cache.client._has_compression_enabled():
pytest.skip(
"Compression is enabled, sscan_iter with match is not supported",
)
cache.sadd("foo", "bar1", "bar2", "zoo")
items = cache.sscan_iter("foo", match="bar*")
assert set(items) == {"bar1", "bar2"}
def test_smismember(self, cache: RedisCache):
cache.sadd("foo", "bar1", "bar2", "bar3")
assert cache.smismember("foo", "bar1", "bar2", "xyz") == [True, True, False]
def test_sunion(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.sunion("foo1", "foo2") == {"bar1", "bar2", "bar3"}
def test_sunionstore(self, cache: RedisCache):
if isinstance(cache.client, ShardClient):
pytest.skip("ShardClient doesn't support get_client")
cache.sadd("foo1", "bar1", "bar2")
cache.sadd("foo2", "bar2", "bar3")
assert cache.sunionstore("foo3", "foo1", "foo2") == 3
assert cache.smembers("foo3") == {"bar1", "bar2", "bar3"}
| TestDjangoRedisCache |
python | cython__cython | Cython/Compiler/UtilNodes.py | {
"start": 1884,
"end": 3354
} | class ____(Node):
# THIS IS DEPRECATED, USE LetNode instead
"""
Creates a block which allocates temporary variables.
This is used by transforms to output constructs that need
to make use of a temporary variable. Simply pass the types
of the needed temporaries to the constructor.
The variables can be referred to using a TempRefNode
(which can be constructed by calling get_ref_node).
"""
# temps [TempHandle]
# body StatNode
child_attrs = ["body"]
def generate_execution_code(self, code):
for handle in self.temps:
handle.temp = code.funcstate.allocate_temp(
handle.type, manage_ref=handle.needs_cleanup)
self.body.generate_execution_code(code)
for handle in self.temps:
if handle.needs_cleanup:
if handle.needs_xdecref:
code.put_xdecref_clear(handle.temp, handle.type)
else:
code.put_decref_clear(handle.temp, handle.type)
code.funcstate.release_temp(handle.temp)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.body = self.body.analyse_expressions(env)
return self
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
| TempsBlockNode |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/embedding.py | {
"start": 1311,
"end": 1687
} | class ____(BaseEvent):
"""
EmbeddingEndEvent.
Args:
chunks (List[str]): List of chunks.
embeddings (List[List[float]]): List of embeddings.
"""
chunks: List[str]
embeddings: List[Dict[int, float]]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SparseEmbeddingEndEvent"
| SparseEmbeddingEndEvent |
python | scrapy__scrapy | tests/AsyncCrawlerProcess/twisted_reactor_custom_settings_same.py | {
"start": 254,
"end": 567
} | class ____(scrapy.Spider):
name = "asyncio_reactor2"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
}
process = AsyncCrawlerProcess()
process.crawl(AsyncioReactorSpider1)
process.crawl(AsyncioReactorSpider2)
process.start()
| AsyncioReactorSpider2 |
python | tensorflow__tensorflow | tensorflow/tools/compatibility/ast_edits.py | {
"start": 5231,
"end": 6628
} | class ____:
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
def preprocess(self, root_node): # pylint: disable=unused-argument
"""Preprocess a parse tree. Return a preprocessed node, logs and errors."""
return root_node, [], []
def clear_preprocessing(self):
"""Restore this APIChangeSpec to before it preprocessed a file.
This is needed if preprocessing a file changed any rewriting rules.
"""
pass
| APIChangeSpec |
python | Textualize__textual | docs/examples/widgets/option_list_options.py | {
"start": 147,
"end": 1022
} | class ____(App[None]):
CSS_PATH = "option_list.tcss"
def compose(self) -> ComposeResult:
yield Header()
yield OptionList(
Option("Aerilon", id="aer"),
Option("Aquaria", id="aqu"),
None,
Option("Canceron", id="can"),
Option("Caprica", id="cap", disabled=True),
None,
Option("Gemenon", id="gem"),
None,
Option("Leonis", id="leo"),
Option("Libran", id="lib"),
None,
Option("Picon", id="pic"),
None,
Option("Sagittaron", id="sag"),
Option("Scorpia", id="sco"),
None,
Option("Tauron", id="tau"),
None,
Option("Virgon", id="vir"),
)
yield Footer()
if __name__ == "__main__":
OptionListApp().run()
| OptionListApp |
python | pytorch__pytorch | test/functorch/test_ops.py | {
"start": 12940,
"end": 126065
} | class ____(TestCase):
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_grad",
vjp_fail.union(
{
xfail(
"chalf", "", device_type="cpu"
), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf'
xfail(
"sparse.sampled_addmm", ""
), # RuntimeError: Sparse CSR tensors do not have strides
xfail(
"sparse.mm", "reduce"
), # RuntimeError: Sparse CSR tensors do not have strides
# Non-contiguous Bugs
#
# AssertionError: Tensor-likes are not close!
xfail("as_strided"),
xfail("as_strided", "partial_views"),
# RuntimeError: !self.requires_grad() || self.is_contiguous()
xfail("as_strided_scatter"),
# RuntimeError: Tensor must have a last dimension with stride 1
xfail("view_as_complex"),
# query: last dimension must be contiguous
# Fused attention kernels require last dim to be contiguous
decorate(
"nn.functional.scaled_dot_product_attention",
decorator=expectedFailureIf(not TEST_WITH_ROCM),
), # Works on ROCm
xfail("torch.ops.aten._flash_attention_forward"),
xfail("torch.ops.aten._efficient_attention_forward"),
}
),
)
@opsToleranceOverride(
"TestOperators",
"test_grad",
(
tol1(
"nn.functional.binary_cross_entropy_with_logits",
{torch.float32: tol(atol=1e-04, rtol=1e-04)},
),
tol1("masked.cumprod", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
tol1("svd_lowrank", {torch.float32: tol(atol=3e-04, rtol=3e-04)}),
tol1(
"linalg.multi_dot",
{torch.float32: tol(atol=1e-05, rtol=8e-04)},
device_type="cuda",
),
tol1(
"linalg.tensorsolve",
{torch.float32: tol(atol=3e-04, rtol=3e-04)},
device_type="cuda",
),
tol1(
"nn.functional.multi_head_attention_forward",
{torch.float32: tol(atol=8e-04, rtol=1e-03)},
),
tol1(
"__rmatmul__",
{torch.float32: tol(atol=3e-04, rtol=3e-04)},
device_type="cuda",
),
tol1(
"matmul",
{torch.float32: tol(atol=3e-04, rtol=3e-04)},
device_type="cuda",
),
tol1(
"pca_lowrank",
{torch.float32: tol(atol=3e-05, rtol=4e-06)},
device_type="cpu",
),
),
)
def test_grad(self, device, dtype, op):
if op.name in vjp_fail:
self.skipTest("Skipped; Expected failures")
return
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
if is_inplace(op, op.get_op()):
self.skipTest("Skipped for redundancy. test_vjp handles in-place testing.")
return
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
if op.name not in skip_noncontig:
noncontig_sample = sample.noncontiguous()
noncontig_args = [noncontig_sample.input] + list(noncontig_sample.args)
noncontig_kwargs = noncontig_sample.kwargs
diff_argnums = tuple(i for i, arg in enumerate(args) if diff_arg(arg))
assert len(diff_argnums) > 0
diff_args = tuple(args[i] for i in diff_argnums)
def wrapped_fn(*args, **kwargs):
result = op(*args, **kwargs)
if sample.output_process_fn_grad is not None:
result = sample.output_process_fn_grad(result)
def abs_if_complex(t):
if t.dtype.is_complex:
return t.abs()
return t
# Reduce into single value for grad
if isinstance(result, torch.Tensor):
return abs_if_complex(result.sum())
result = sum(abs_if_complex(res.sum()) for res in result)
return result
result = grad(wrapped_fn, diff_argnums)(*args, **kwargs)
expected = _autograd_grad(_as_tuple(wrapped_fn(*args, **kwargs)), diff_args)
self.assertEqual(result, expected)
if op.name not in skip_noncontig:
result_noncontig = grad(wrapped_fn, diff_argnums)(
*noncontig_args, **noncontig_kwargs
)
self.assertEqual(result_noncontig, expected)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_jvp",
set(
{
# Composite ops that do bad things. Need to be fixed in PyTorch core.
# RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
xfail("tensor_split"),
# BUG: silent incorrectness: runs and produces numerical differences
skip("nn.functional.max_unpool1d"), # fails everywhere except on mac
skip(
"nn.functional.max_unpool2d"
), # fails everywhere except on windows
skip("nn.functional.max_unpool3d"), # fails everywhere except on mac
xfail(
"native_batch_norm"
), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
xfail(
"_native_batch_norm_legit"
), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
xfail(
"_batch_norm_with_update"
), # TODO: fails comparing None to tensor of 0s for saved_mean/var tangents
xfail("nn.functional.scaled_dot_product_attention"),
xfail("torch.ops.aten._flash_attention_forward"),
xfail("torch.ops.aten._efficient_attention_forward"),
xfail(
"nn.functional.rrelu"
), # in-place test errors out with no formula implemented
xfail(
"NumpyExpMarkDirtyAutogradFunction"
), # TODO: https://github.com/pytorch/pytorch/issues/91280
# --- Non-Contiguous Failures! ---
# This is expected to fail as the operator
# expects last dim to have stride=1
xfail("view_as_complex"),
# BUG
# AssertionError: Tensor-likes are not close!
xfail("as_strided"),
xfail("as_strided", "partial_views"),
xfail("as_strided_scatter"),
}
),
)
@opsToleranceOverride(
"TestOperators",
"test_jvp",
(
tol1(
"nn.functional.conv_transpose3d",
{torch.float32: tol(atol=1e-04, rtol=1.3e-06)},
device_type="cuda",
),
tol1(
"linalg.tensorsolve",
{torch.float32: tol(atol=1e-04, rtol=1.3e-05)},
device_type="cuda",
),
tol1(
"masked.prod",
{torch.float32: tol(atol=1e-05, rtol=1.3e-05)},
device_type="cuda",
),
tol1(
"nn.functional.binary_cross_entropy_with_logits",
{torch.float32: tol(atol=4e-04, rtol=4e-04)},
),
tol1(
"nn.functional.batch_norm", {torch.float32: tol(atol=4e-05, rtol=5e-05)}
),
tol1("nn.functional.conv2d", {torch.float32: tol(atol=4e-05, rtol=5e-05)}),
tol1("svd_lowrank", {torch.float32: tol(atol=5e-05, rtol=5e-05)}),
tol1("pca_lowrank", {torch.float32: tol(atol=5e-05, rtol=5e-05)}),
tol1(
"nn.functional.multi_head_attention_forward",
{torch.float32: tol(atol=6e-05, rtol=2e-05)},
),
tol2(
"linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-5, rtol=2e-5)}
),
),
)
def test_jvp(self, device, dtype, op):
# TODO: get rid of vjp_decomp when we add decomposition support to
# PyTorch's forward-mode ad. Currently the decomposition support only
# works for functorch.jvp
VJP_DECOMP = {
"nn.functional.logsigmoid",
}
if op.name in VJP_DECOMP:
fixme_ref_jvp_local = simulate_jvp
else:
fixme_ref_jvp_local = ref_jvp
if not op.supports_forward_ad and op.name not in VJP_DECOMP:
self.skipTest("Skipped! Forward AD not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
outplace_variant = op if not is_inplace(op, op.get_op()) else None
inplace_variant = op.inplace_variant if op.supports_inplace_autograd else None
for sample in samples:
if outplace_variant:
self.jvp_opinfo_test(
outplace_variant,
sample,
sample.output_process_fn_grad,
clone_inputs=False,
fixme_ref_jvp_local=fixme_ref_jvp_local,
test_noncontig=op.name not in skip_noncontig,
)
if is_valid_inplace_sample_input(sample, op, inplace_variant):
self.jvp_opinfo_test(
inplace_variant,
sample,
sample.output_process_fn_grad,
clone_inputs=True,
fixme_ref_jvp_local=fixme_ref_jvp_local,
test_noncontig=op.name not in skip_noncontig,
)
def jvp_opinfo_test(
self,
fn,
sample,
output_process_fn,
clone_inputs,
fixme_ref_jvp_local,
test_noncontig,
):
# NB: we used requires_grad=True to determine where the primals are,
# but don't need that information otherwise
args = (sample.input,) + sample.args
kwargs = sample.kwargs
contig_fn, primals = normalize_op_input_output2(
fn, args, kwargs, output_process_fn, requires_grad=True
)
orig_primals = tree_map(lambda x: x.detach(), primals)
orig_tangents = tree_map(lambda x: torch.randn_like(x), primals)
def maybe_clone_inputs():
if clone_inputs:
primals = tree_map(torch.clone, orig_primals)
tangents = tree_map(torch.clone, orig_tangents)
return primals, tangents
return orig_primals, orig_tangents
primals, tangents = maybe_clone_inputs()
expected_primal_outs, expected_tangent_outs = fixme_ref_jvp_local(
contig_fn, primals, tangents
)
primals, tangents = maybe_clone_inputs()
primal_outs, tangent_outs = jvp(contig_fn, primals, tangents)
self.assertEqual(primal_outs, expected_primal_outs)
self.assertEqual(tangent_outs, expected_tangent_outs)
if test_noncontig:
noncontig_sample = sample.noncontiguous()
noncontig_args = (noncontig_sample.input,) + noncontig_sample.args
noncontig_kwargs = sample.kwargs
noncontig_fn, primals = normalize_op_input_output2(
fn,
noncontig_args,
noncontig_kwargs,
output_process_fn,
requires_grad=True,
)
noncontig_primals = tree_map(lambda x: x.detach(), primals)
noncontig_tangents = tree_map(
lambda x: noncontiguous_like(x), orig_tangents
)
noncontig_primal_outs, noncontig_tangent_outs = jvp(
noncontig_fn, noncontig_primals, noncontig_tangents
)
self.assertEqual(noncontig_primal_outs, expected_primal_outs)
self.assertEqual(noncontig_tangent_outs, expected_tangent_outs)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_vjp",
vjp_fail.union(
{
xfail("sparse.sampled_addmm", ""),
xfail("sparse.mm", "reduce"),
# ---- Non-Contiguous Failures ----
# This is expected to fail as the operator
# expects last dim to have stride=1
xfail("view_as_complex"),
# RuntimeError: query: last dimension must be contiguous
# The fused attention kernels require the last dim to be contiguous
decorate(
"nn.functional.scaled_dot_product_attention",
decorator=expectedFailureIf(not TEST_WITH_ROCM),
), # Works on ROCm
xfail("torch.ops.aten._flash_attention_forward"),
xfail("torch.ops.aten._efficient_attention_forward"),
# BUG
# AssertionError: Tensor-likes are not close!
xfail("as_strided"),
xfail("as_strided_scatter"),
xfail("as_strided", "partial_views"),
}
),
)
@opsToleranceOverride(
"TestOperators",
"test_vjp",
(
tol1(
"nn.functional.conv_transpose3d",
{torch.float32: tol(atol=5e-05, rtol=9e-05)},
device_type="cuda",
),
tol1(
"nn.functional.binary_cross_entropy_with_logits",
{torch.float32: tol(atol=1e-04, rtol=1e-04)},
),
tol1(
"nn.functional.multi_head_attention_forward",
{torch.float32: tol(atol=2e-03, rtol=2e-04)},
),
tol1("__rmatmul__", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
tol1("matmul", {torch.float32: tol(atol=1e-05, rtol=1e-05)}),
tol2(
"linalg.pinv", "hermitian", {torch.float32: tol(atol=1e-05, rtol=1e-05)}
),
tol1("linalg.tensorsolve", {torch.float32: tol(atol=9e-03, rtol=2e-04)}),
tol1("linalg.multi_dot", {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
tol1("svd_lowrank", {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
tol1("pca_lowrank", {torch.float32: tol(atol=1e-04, rtol=1e-04)}),
),
)
def test_vjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
def _test(_op, inplace=False):
for sample in samples:
if inplace and not is_valid_inplace_sample_input(
sample, op, op.inplace_variant
):
continue
fn, primals = normalize_op_input_output(_op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
out, vjp_fn = vjp(fn, *primals)
self.assertEqual(out, result)
result_vjps = vjp_fn(cotangents)
_, vjp_fn = ref_vjp(fn, *primals)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
if op.name not in skip_noncontig:
noncontig_fn, noncontig_primals = normalize_op_input_output(
_op, sample.noncontiguous()
)
noncontig_cotangents = tree_map(
lambda x: noncontiguous_like(x), cotangents
)
out_noncontig, vjp_fn = vjp(noncontig_fn, *noncontig_primals)
self.assertEqual(out_noncontig, result)
noncontig_result_vjps = vjp_fn(noncontig_cotangents)
self.assertEqual(noncontig_result_vjps, expected_vjps)
_test(op)
for a_op in op.aliases:
_test(a_op)
if op.inplace_variant:
def f(inp, *args, **kwargs):
return op.inplace_variant(inp.clone(), *args, **kwargs)
_test(f, inplace=True)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_vjpvjp",
vjp_fail.union(
{
skip("nn.functional.max_unpool1d"), # silent incorrectness; Flaky
skip("nn.functional.max_unpool2d"), # silent incorrectness; Flaky
xfail("nn.functional.ctc_loss"), # Not Implemented
xfail(
"native_layer_norm", ""
), # Expected a proper Tensor but got None for argument #1 'other'
xfail("sparse.sampled_addmm", ""), # sparse tensors have no strides
xfail("sparse.mm", "reduce"), # sparse tensors have no strides
skip("nn.functional.scaled_dot_product_attention"),
xfail("torch.ops.aten._flash_attention_forward"),
xfail("torch.ops.aten._efficient_attention_forward"),
# AssertionError: Tensor-likes are not close!
# Mismatched elements: 1 / 15 (6.7%)
# Greatest absolute difference: 24.0 at index (2, 4) (up to 1e-05 allowed)
# Greatest relative difference: 1.7933241714393998e-06 at index (2, 4) (up to 1.3e-06 allowed)
# The failure occurred for item [0]
xfail("masked.prod"),
}
),
)
@opsToleranceOverride(
"TestOperators",
"test_vjpvjp",
(
tol1(
"nn.functional.conv_transpose3d",
{torch.float32: tol(atol=5e-05, rtol=9e-05)},
device_type="cuda",
),
tol1("prod", {torch.float32: tol(atol=2e-05, rtol=1e-04)}),
tol1("masked.cumprod", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol1("cumprod", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol1("linalg.vander", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
),
)
def test_vjpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
if not op.supports_gradgrad:
self.skipTest("Skipped! Operation does not support gradgrad")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
def test(_op, inplace=False):
for sample in samples:
if inplace and not is_valid_inplace_sample_input(
sample, op, op.inplace_variant
):
continue
fn, args = get_vjpfull_variant(_op, sample)
result = fn(*args)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
# Compute vjp of vjp
_, vjp_fn = vjp(fn, *args)
result_vjps = vjp_fn(cotangents)
# Compute ref_vjp of vjp. We could have done ref_vjp of ref_vjp,
# but since we're confident that vjp works by itself, this is
# an equivalent way to test that.
_, vjp_fn = ref_vjp(fn, *args)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
test(op)
if op.inplace_variant:
def fn(inp, *args, **kwargs):
return op.inplace_variant(inp.clone(), *args, **kwargs)
test(fn, inplace=True)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@skipOps(
"TestOperators",
"test_vmapvjpvjp",
vjp_fail.union(
{
skip("atleast_1d"), # Takes too long
skip("atleast_2d"), # Takes too long
skip("atleast_3d"), # Takes too long
skip("ormqr"), # Takes too long
xfail("as_strided"), # incorrect output
xfail("as_strided", "partial_views"), # incorrect output
xfail("as_strided_scatter"), # incorrect output
skip("bernoulli"), # calls random op
xfail("bfloat16"), # rank 4 tensor for channels_last
xfail("cdouble"), # rank 4 tensor for channels_last
xfail("cfloat"), # rank 4 tensor for channels_last
xfail("chalf"), # rank 4 tensor for channels_last
xfail("double"), # rank 4 tensor for channels_last
xfail("float"), # rank 4 tensor for channels_last
xfail("half"), # rank 4 tensor for channels_last
xfail(
"NumpyCubeNotComposableAutogradFunction"
), # Not composable autograd.Function
# It looks like you're either (1) calling .item() on a Tensor or
# (2) attempting to use a Tensor in some data-dependent control flow or
# (3) encountering this error in PyTorch internals.
xfail("index_reduce", "prod"),
decorate(
"linalg.householder_product", decorator=runOnRocm
), # works on ROCm
xfail(
# nans
"masked.softmax",
device_type="cpu",
),
xfail(
"nanquantile", device_type="cpu"
), # vmap not implemented for at::equal.
xfail("native_layer_norm"), # vmap: inplace into a regular tensor
# got a batched tensor as input while the running_mean or running_var,
# which will be updated in place, were not batched.
xfail("nn.functional.batch_norm"),
xfail(
"nn.functional.binary_cross_entropy"
), # vmap: inplace into a regular tensor
xfail(
"nn.functional.ctc_loss"
), # derivate not implemented for _ctc_loss_backward
# flaky on ROCM needs investigation
decorate("nn.functional.conv_transpose2d", decorator=skipIfRocm),
skip("nn.functional.dropout"), # calls random op
skip("nn.functional.dropout2d"), # calls random op
skip("nn.functional.dropout3d"), # calls random op
skip("nn.functional.alpha_dropout"), # calls random op
skip(
"nn.functional.feature_alpha_dropout", "with_train"
), # calls random op
skip("nn.functional.fractional_max_pool2d"), # calls random op
skip("nn.functional.fractional_max_pool3d"), # calls random op
xfail("nn.functional.scaled_dot_product_attention"), # randomness
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
xfail("nn.functional.multi_head_attention_forward"), # randomness
# It looks like you're either (1) calling .item() on a Tensor or
# (2) attempting to use a Tensor in some data-dependent control flow or
# (3) encountering this error in PyTorch internals.
xfail("nn.functional.gaussian_nll_loss"),
# got a batched tensor as input while the running_mean or running_var,
# which will be updated in place, were not batched.
xfail("nn.functional.instance_norm"),
xfail(
"nn.functional.layer_norm"
), # vmap: inplace into a regular tensor
# RuntimeError: NYI: querying is_contiguous inside of vmap
# for memory_format other than torch.contiguous_formats
xfail("nn.functional.max_pool2d"),
# RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only
# supported with memory_format torch.preserve_format or
# torch.contiguous_format (got ChannelsLast)
xfail("nn.functional.max_unpool2d"),
# RuntimeError: NYI: Tensor.clone(memory_format) inside vmap is only
# supported with memory_format torch.preserve_format
# or torch.contiguous_format (got ChannelsLast)s
xfail("nn.functional.max_unpool2d", "grad"),
xfail(
"nn.functional.rrelu"
), # RuntimeError: vmap: we do not yet support aten::rrelu_with_noise.
xfail("normal"), # calls random op
xfail("normal", "number_mean"), # calls random op
xfail("pca_lowrank"), # calls random op
xfail(
"quantile", device_type="cpu"
), # Batching rule not implemented for `at::equal`
xfail(
"scatter_reduce", "prod"
), # vmap (looks like you are calling item/data-dependent)
xfail(
"sparse.sampled_addmm"
), # RuntimeError: Sparse CSR tensors do not have strides
xfail(
"sparse.mm", "reduce"
), # RuntimeError: Sparse CSR tensors do not have strides
xfail("svd_lowrank"), # calls random op
xfail("to"), # rank 4 tensor for channels_last
xfail(
"view_as_complex"
), # RuntimeError: Tensor must have a last dimension with stride 1
# got a batched tensor as input while the running_mean or running_var,
# which will be updated in place, were not batched.
xfail("nn.functional.batch_norm", "without_cudnn"),
# view doesn't work on sparse
xfail("to_sparse"),
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
}
),
)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@opsToleranceOverride(
"TestOperators",
"test_vmapvjpvjp",
(
tol1("linalg.svd", {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
tol1("linalg.lu", {torch.float32: tol(atol=5e-04, rtol=7e-04)}),
tol1("linalg.lu_factor", {torch.float32: tol(atol=2e-03, rtol=2e-02)}),
tol1("linalg.multi_dot", {torch.float32: tol(atol=2e-03, rtol=2e-04)}),
tol1("svd", {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
tol1("matrix_exp", {torch.float32: tol(atol=1e-03, rtol=5e-04)}),
tol1("masked.prod", {torch.float32: tol(atol=2e-03, rtol=2e-04)}),
),
)
@skipOps(
"TestOperators",
"test_vmapvjpvjp",
{
xfail("as_strided", "partial_views"),
xfail("as_strided_copy"),
},
)
def test_vmapvjpvjp(self, device, dtype, op):
# Since, we test `vjpvjp` independently,
# for this test, we just verify that vmap
# of `vjpvjp` is correct.
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
if not op.supports_gradgrad:
self.skipTest("Skipped! Operation does not support gradgrad")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, args = get_vjpfull_variant(op, sample)
result = fn(*args)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
cotangents = pytree.tree_leaves(cotangents)
num_args = len(args)
args_and_cotangents = tuple(args) + tuple(cotangents)
def vjp_of_vjp(*args_and_cotangents):
args = args_and_cotangents[:num_args]
cotangents = args_and_cotangents[num_args:]
result, vjp_fn = vjp(fn, *args)
result_vjps = vjp_fn(cotangents)
result = pytree.tree_leaves(result)
result_vjps = pytree.tree_leaves(result_vjps)
return (*result, *result_vjps)
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
vjp_of_vjp,
args_and_cotangents,
{},
is_batch_norm_and_training=is_batch_norm_and_training,
)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
vmapvjp_fail = vjp_fail.union(
{
# -------------------- ALLOWED FAILURES --------------------------------
# The following are not bugs and are expected behavior
xfail("masked_select"), # Not possible due to dynamic shapes
skip("bernoulli"), # randomness
skip("normal", ""), # randomness
skip("normal", "number_mean"), # randomness
skip("nn.functional.rrelu"), # randomness
skip("nn.functional.feature_alpha_dropout", "with_train"), # randomness
skip("nn.functional.feature_alpha_dropout", "without_train"), # randomness
skip("nn.functional.dropout"), # randomness
skip("nn.functional.dropout2d"), # randomness
skip("nn.functional.dropout3d", ""), # randomness
skip("nn.functional.alpha_dropout"), # randomness
skip("nn.functional.scaled_dot_product_attention"), # randomness
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
skip("nn.functional.multi_head_attention_forward"), # randomness
xfail(
"index_put", ""
), # not possible due to dynamic shapes; we support a subset
xfail("nn.functional.fractional_max_pool2d"), # random
xfail("nn.functional.fractional_max_pool3d"), # random
xfail("pca_lowrank", ""), # randomness
xfail("svd_lowrank", ""), # randomness
xfail("to_sparse", ""), # non-dense output
skip(
"to"
), # RuntimeError: required rank 4 tensor to use channels_last format
xfail("as_strided", "partial_views"),
xfail(
"NumpyCubeNotComposableAutogradFunction"
), # Not composable autograd.Function
# ----------------------------------------------------------------------
# ---------------------------- BUGS ------------------------------------
# All of the following are bugs and need to be fixed
skip(
"linalg.svdvals"
), # # really annoying thing where it passes correctness check but not has_batch_rule
skip("native_batch_norm"),
skip("_native_batch_norm_legit"),
# TODO: implement batching rule
skip("_batch_norm_with_update"),
xfail("__getitem__", ""), # dynamic error
xfail("nanquantile", device_type="cpu"), # checks q via a .item() call
xfail("nn.functional.gaussian_nll_loss"), # checks var for if any value < 0
xfail("narrow"), # .item() call
xfail("quantile", device_type="cpu"), # checks q via a .item() call
xfail("view_as_complex"), # Tensor must have a last dimension with stride 1
# required rank 4 tensor to use channels_last format
xfail("bfloat16"),
xfail("double"),
xfail("float"),
xfail("half"),
xfail("cdouble", ""),
xfail("cfloat", ""),
xfail("chalf", ""),
xfail("scatter_reduce", "prod"), # item call
# Batching rule not implemented for aten::_use_cudnn_ctc_loss.Tensor
xfail("nn.functional.ctc_loss", device_type="cuda"),
# NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format
xfail("nn.functional.max_unpool2d"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("sparse.sampled_addmm", ""),
xfail("sparse.mm", "reduce"),
xfail("as_strided_scatter", ""), # calls as_strided
xfail("index_reduce", "prod"), # .item() call
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
# ---------------------------------------------------------------------
}
)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@opsToleranceOverride(
"TestOperators",
"test_vmapvjp",
(
tol1(
"linalg.svd",
{torch.float32: tol(atol=5e-04, rtol=1e-04)},
device_type="cuda",
),
tol1(
"svd", {torch.float32: tol(atol=5e-04, rtol=1e-04)}, device_type="cuda"
),
tol1(
"linalg.householder_product",
{torch.float32: tol(atol=3e-04, rtol=9e-04)},
),
tol1(
"matrix_exp",
{torch.float32: tol(atol=5e-04, rtol=1e-04)},
device_type="cuda",
),
tol1(
"nn.functional.layer_norm",
{torch.float32: tol(atol=3e-4, rtol=1e-4)},
device_type="cpu",
),
tol1(
"native_layer_norm",
{torch.float32: tol(atol=3e-4, rtol=1e-4)},
device_type="cpu",
),
),
)
@skipOps(
"TestOperators",
"test_vmapvjp",
vmapvjp_fail.union(
{
xfail("as_strided"),
xfail("as_strided_copy"),
xfail("as_strided", "partial_views"),
}
),
)
def test_vmapvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
is_batch_norm_and_training = is_batch_norm_training(op.name, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training
)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
vmapjvpall_fail = {
# -------------------- ALLOWED FAILURES --------------------------------
# The following are expected (not a bug)
skip("bernoulli", ""), # randomness
skip("nn.functional.dropout"), # randomness
skip("nn.functional.rrelu"), # randomness
skip("nn.functional.dropout2d", ""),
skip("nn.functional.dropout3d", ""),
skip("nn.functional.scaled_dot_product_attention"), # randomness
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
skip("nn.functional.multi_head_attention_forward"), # randomness
skip("nn.functional.alpha_dropout"), # randomness
skip("nn.functional.feature_alpha_dropout", "without_train"),
skip("nn.functional.feature_alpha_dropout", "with_train"),
xfail(
"nn.functional.fractional_max_pool2d"
), # Cannot access data pointer of Tensor that doesn't have storage
xfail(
"nn.functional.fractional_max_pool3d"
), # Cannot access data pointer of Tensor that doesn't have storage
# Not actually a problem: embedding with max_norm mutates the weight
# and causes different runs to produce different results.
# skip because this is flaky depending on what the max_norm is!
skip("nn.functional.embedding", ""),
skip("to"), # RuntimeError: required rank 4 tensor to use channels_last format
xfail(
"NumpyExpMarkDirtyAutogradFunction"
), # vmap: inplace into a regular tensor
# ----------------------------------------------------------------------
# ---------------------------- BUGS ------------------------------------
# The following are bugs that we should fix
xfail("masked.mean"), # silent incorrectness (nan difference)
xfail("as_strided", "partial_views"), # Tensor-likes are not close!
xfail(
"nn.functional.soft_margin_loss", ""
), # soft_margin_loss_backward does not support forward-ad
xfail("tensor_split"), # data_ptr composite compliance
xfail("quantile"), # at::equal batching rule (cpu), also, in-place vmap (cuda)
skip("as_strided"), # Test runner cannot handle this
# requires special handling, and does not yet have a batching rule. Feel free to file a github issue!
xfail("as_strided_scatter"),
xfail(
"nn.functional.gaussian_nll_loss"
), # .item or data-dependent control flow
xfail("scatter"), # forward-mode AD does not support at::scatter
xfail(
"nanquantile"
), # at::equal batching rule (cpu), also, in-place vmap (cuda)
xfail("view_as_complex"), # Tensor must have a last dimension with stride 1
skip("pca_lowrank", ""), # randomness
skip("svd_lowrank", ""), # randomness
xfail("double"), # required rank 4 tensor to use channels_last format
xfail("cdouble"), # required rank 4 tensor to use channels_last format
# potential silent incorrectness
skip(
"nn.functional.max_unpool1d"
), # Flaky, seems to sometimes his max_unpool2d
skip("nn.functional.max_unpool2d"), # fails everywhere except on mac
skip("nn.functional.max_unpool3d"), # fails everywhere except on mac
# erroring because running_mean and running_var aren't differentiable
xfail("nn.functional.batch_norm"),
xfail("nn.functional.batch_norm", "without_cudnn"),
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
# ----------------------------------------------------------------------
}
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@opsToleranceOverride(
"TestOperators",
"test_vmapjvpall",
(
tol1(
"nn.functional.conv_transpose3d",
{torch.float32: tol(atol=2e-04, rtol=9e-3)},
device_type="cuda",
),
tol1(
"linalg.householder_product",
{torch.float32: tol(atol=2e-04, rtol=9e-3)},
),
),
)
@skipOps(
"TestOperators",
"test_vmapjvpall",
vmapjvpall_fail.union(
{
xfail("as_strided_copy"),
}
),
)
# This is technically a superset of test_vmapjvp. We should either delete test_vmapjvp
# or figure out if we can split vmapjvpall. It's useful to keep test_vmapjvp intact
# because that corresponds to "batched forward-mode AD" testing in PyTorch core
def test_vmapjvpall(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple(arg_values) + tuple(kwarg_values)
fn, args = get_jvp_variant_primals_tangents(op, sample)
is_batch_norm_and_training = is_batch_norm_training(op.name, kwarg_values)
generator = get_fallback_and_vmap_exhaustive(
fn, args, {}, is_batch_norm_and_training=is_batch_norm_and_training
)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_vmapjvpall_has_batch_rule",
vmapjvpall_fail.union(
{
skip(
"to"
), # RuntimeError: required rank 4 tensor to use channels_last format
xfail(
"cdouble"
), # RuntimeError: required rank 4 tensor to use channels_last format
xfail("cumprod"),
xfail("masked_fill"),
xfail("fill"),
skip("masked.mean"), # ???
xfail("masked_scatter"),
xfail("put"),
xfail("take"),
xfail("nn.functional.feature_alpha_dropout", "without_train"),
xfail("nn.functional.dropout2d", ""),
xfail("pca_lowrank", ""),
xfail("svd_lowrank", ""),
xfail("nn.functional.feature_alpha_dropout", "with_train"),
xfail("special.log_ndtr", ""),
xfail("fft.ihfft2"), # conj_physical fallback
xfail("fft.ihfftn"), # conj_physical fallback
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("nn.functional.soft_margin_loss", ""),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.embedding", ""),
xfail("nn.functional.glu"),
xfail("nn.functional.bilinear"), # trilinear doesn't have batching rule
xfail("linalg.lu", ""),
xfail("nn.functional.dropout3d", ""),
xfail("as_strided_scatter", ""),
xfail("masked.cumprod", ""),
xfail("permute_copy"),
xfail("renorm"), # hit vmap fallback, which is disabled
xfail("squeeze_copy"),
xfail("t_copy"),
xfail("transpose_copy"),
xfail("unsqueeze_copy"),
}
),
)
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
def test_vmapjvpall_has_batch_rule(self, device, dtype, op):
if is_inplace(op, op.get_op()):
# TODO: test in-place
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=False)
if not op.supports_forward_ad:
self.skipTest("Skipped! Forward AD not supported.")
return
def test():
for sample in samples:
arg_values = [sample.input] + list(sample.args)
kwarg_values = sample.kwargs
args = tuple(arg_values) + tuple(kwarg_values)
fn, args = get_jvp_variant_primals_tangents(op, sample)
is_batch_norm_and_training = is_batch_norm_training(
op.name, kwarg_values
)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn,
args,
{},
is_batch_norm_and_training=is_batch_norm_and_training,
compute_loop_out=False,
):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@skipOps(
"TestOperators",
"test_vmapvjp_has_batch_rule",
vmapvjp_fail.union(
{
skip(
"to"
), # RuntimeError: required rank 4 tensor to use channels_last format
xfail("view_as_complex"),
xfail("cummax"),
xfail("cummin"),
xfail("fill"),
xfail(
"narrow"
), # Batching rule not implemented for `narrow.Tensor` (and view op)
xfail("special.log_ndtr"),
xfail("linalg.householder_product"),
xfail("masked_fill"),
xfail("masked_scatter"),
xfail("masked_select"),
xfail("nanquantile"),
xfail("ormqr"),
xfail("permute_copy"),
xfail("put"),
xfail("quantile"),
xfail("renorm"),
xfail("squeeze_copy"),
xfail("take"),
xfail("tensor_split"),
xfail("to_sparse"),
xfail("unfold"),
xfail("unfold_copy"),
xfail("nn.functional.dropout"),
xfail("fft.ihfft2"),
xfail("fft.ihfftn"),
xfail("nn.functional.gaussian_nll_loss"),
xfail("nn.functional.bilinear"),
xfail("nn.functional.fractional_max_pool3d"),
xfail("nn.functional.ctc_loss"),
xfail("nn.functional.rrelu"),
xfail("nn.functional.embedding_bag"),
xfail("nn.functional.fractional_max_pool2d"),
xfail("nn.functional.feature_alpha_dropout", "with_train"),
xfail("pca_lowrank", ""),
xfail("nn.functional.dropout2d", ""),
xfail("nn.functional.feature_alpha_dropout", "without_train"),
xfail("svd_lowrank", ""),
xfail("nn.functional.max_unpool2d", ""),
xfail("nn.functional.multi_margin_loss", ""),
xfail("nn.functional.multilabel_margin_loss", ""),
xfail("nn.functional.pdist", ""),
xfail("nn.functional.max_unpool1d", ""),
xfail("nn.functional.max_unpool3d", ""),
xfail("nn.functional.max_unpool3d", "grad"),
xfail("nn.functional.soft_margin_loss", ""),
xfail("nn.functional.max_unpool1d", "grad"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail("linalg.lu", ""),
xfail("cdouble", ""),
xfail("cfloat", ""),
xfail("chalf", ""),
xfail(
"index_reduce", "prod"
), # aten::index_reduce hit the vmap fallback which is currently disabled
xfail(
"index_reduce", "mean"
), # aten::index_reduce hit the vmap fallback which is currently disabled
xfail(
"index_reduce", "amax"
), # aten::index_reduce hit the vmap fallback which is currently disabled
xfail(
"index_reduce", "amin"
), # aten::index_reduce hit the vmap fallback which is currently disabled
xfail("nn.functional.dropout3d", ""),
xfail("as_strided_scatter", ""),
xfail("_segment_reduce", "offsets"),
xfail("_segment_reduce", "lengths"),
xfail("sparse.sampled_addmm", ""),
xfail("sparse.mm", "reduce"),
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail(
"index_fill"
), # aten::_unique hit the vmap fallback which is currently disabled
xfail("squeeze_copy"),
xfail("t_copy"),
xfail("transpose_copy"),
xfail("unsqueeze_copy"),
}
),
)
def test_vmapvjp_has_batch_rule(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
def test():
for sample in samples:
cotangents = get_sample_cotangents(op, sample)
fn, args = get_vjp_fn_and_args_with_cotangents(op, sample, cotangents)
is_batch_norm_and_training = is_batch_norm_training(
op.name, sample.kwargs
)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn,
args,
{},
is_batch_norm_and_training=is_batch_norm_and_training,
compute_loop_out=False,
):
pass
for a_op in op.aliases:
fn, args = get_vjp_fn_and_args_with_cotangents(
a_op, sample, cotangents
)
for loop_out, batched_out in get_fallback_and_vmap_exhaustive(
fn,
args,
{},
is_batch_norm_and_training=is_batch_norm_and_training,
compute_loop_out=False,
):
pass
check_vmap_fallback(self, test, op, dry_run=False)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_vjpvmap",
vjp_fail.union(
{
skip("bernoulli", ""), # vjpvmap testing can't handle randomness
skip("normal", ""), # vjpvmap testing can't handle randomness
skip(
"normal", "number_mean"
), # vjpvmap testing can't handle randomness
skip("nn.functional.rrelu"), # randomness
skip("nn.functional.feature_alpha_dropout", "with_train"), # randomness
skip(
"nn.functional.feature_alpha_dropout", "without_train"
), # randomness
skip("nn.functional.scaled_dot_product_attention"),
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
skip("nn.functional.multi_head_attention_forward"), # randomness
skip("nn.functional.alpha_dropout"), # randomness
skip(
"to"
), # RuntimeError: required rank 4 tensor to use channels_last format
skip("to_sparse", ""), # non-dense output
skip("ormqr", ""), # takes too long
xfail(
"NumpyCubeNotComposableAutogradFunction"
), # Not composable autograd.Function
# fallback path doesn't work
# All of the following are bugs and need to be fixed
xfail("__getitem__", ""),
xfail("index_put", ""),
xfail("view_as_complex"),
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
xfail("nn.functional.gaussian_nll_loss"),
xfail("masked_select"),
xfail(
"narrow"
), # Batching rule not implemented for `narrow.Tensor` (and view op)
skip(
"nn.functional.fractional_max_pool3d"
), # generator works on cpu, fails on cuda
skip(
"nn.functional.fractional_max_pool2d"
), # generator works on cpu, fails on cuda
xfail("column_stack", ""),
xfail("nn.functional.dropout2d", ""),
xfail("svd_lowrank", ""),
xfail("pca_lowrank", ""),
xfail("clamp"),
# something weird happening with channels_last
xfail("bfloat16"),
xfail("double"),
xfail("float"),
xfail("half"),
xfail("cdouble"),
xfail("cfloat"),
xfail("nn.functional.dropout3d", ""),
xfail("as_strided_scatter", ""),
xfail("sparse.sampled_addmm", ""),
xfail("sparse.mm", "reduce"),
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail("as_strided", "partial_views"),
}
),
)
def test_vjpvmap(self, device, dtype, op):
# NB: there is no vjpvmap_has_batch_rule test because that is almost
# certainly redundant with the vmap_has_batch_rule test in test_vmap.py
# one-off skip
if op.name == "nn.functional.dropout":
self.skipTest("Skipped!")
if not op.supports_autograd:
# If the op doesn't support autograd, vmap(op) won't either
self.skipTest("Skipped! Autograd not supported.")
return
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
batch_norm_fns = (
"nn.functional.batch_norm",
"nn.functional.instance_norm",
) # instance norm calls batch norm
is_batch_norm = op.name in batch_norm_fns
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
is_batch_norm_and_training = is_batch_norm and is_batch_norm_training(
op.name, kwargs
)
generator = generate_vmap_inputs(
args, kwargs, is_batch_norm_and_training=is_batch_norm_and_training
)
for batched_args, in_dims, kwargs in generator:
vmapped_op = vmap(op, in_dims)
fn, primals = normalize_op_input_output2(
vmapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
_, vjp_fn = vjp(fn, *primals)
result_vjps = vjp_fn(cotangents)
_, vjp_fn = ref_vjp(fn, *primals)
expected_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
def _compare_jacobians_of_vjp(
self, fn, cotangents_and_primals, argnums=None, atol_rtol=None
):
if argnums is None:
argnums = tuple(range(len(cotangents_and_primals)))
def get_vjp(cotangents, *primals):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
jacobian_jvp = jacfwd(get_vjp, argnums)(*cotangents_and_primals)
jacobian_vjp = jacrev(get_vjp, argnums)(*cotangents_and_primals)
# For dtype changing operations, the jacobians have different dtype.
jacobian_jvp = tree_map(lambda x: x.to(torch.float), jacobian_jvp)
jacobian_vjp = tree_map(lambda x: x.to(torch.float), jacobian_vjp)
if atol_rtol is not None:
(atol, rtol) = atol_rtol
self.assertEqual(jacobian_jvp, jacobian_vjp, atol=atol, rtol=rtol)
else:
self.assertEqual(jacobian_jvp, jacobian_vjp)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@skipOps(
"TestOperators",
"test_jvpvjp",
vjp_fail.union(
{
xfail("to_sparse", ""), # NYI
# RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor,
# this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3].
xfail("normal", ""),
xfail("cdist", ""), # NYI: forward-AD for _cdist_forward
xfail("cholesky", ""), # NYI: forward-AD for cholesky
xfail(
"nn.functional.embedding_bag", ""
), # NYI: forward-AD for _embedding_bag
xfail(
"nn.functional.grid_sample", ""
), # NYI: forward AD for grid_sampler_2d
xfail("grid_sampler_2d", ""), # NYI: forward AD for grid_sampler_2d
xfail(
"nn.functional.hardsigmoid", ""
), # NYI: forward AD for hardsigmoid_backward
xfail(
"nn.functional.huber_loss", ""
), # NYI: forward AD for huber_loss_backward
xfail("NumpyCubeNotComposableAutogradFunction"), # not composable
xfail("ormqr", ""), # NYI: forward AD for ormqr
xfail(
"nn.functional.multilabel_margin_loss", ""
), # NYI: multilabel_margin_loss_forward
xfail(
"nn.functional.soft_margin_loss", ""
), # NYI: forward-AD for soft_margin_loss_backward
xfail("nn.functional.ctc_loss", ""), # NYI: forward-AD for _ctc_loss
xfail("nn.functional.pdist", ""), # NYI: forward-AD with _pdist_forward
skip("nn.functional.scaled_dot_product_attention"),
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
xfail(
"nn.functional.multi_margin_loss", ""
), # NYI: forward AD with multi_margin_loss
skip(
"linalg.householder_product", "", device_type="cuda"
), # flaky, I'm not sure why
xfail("sparse.sampled_addmm", ""), # Sparse tensors have no strides
xfail(
"_segment_reduce", "offsets"
), # NYI: forward-AD for _segment_reduce
xfail("sparse.mm", "reduce"), # Sparse tensors have no strides
xfail("index_reduce", "prod"), # NYI: forward-AD for index_reduce
xfail("index_reduce", "mean"), # NYI: forward-AD for index_reduce
xfail("index_reduce", "amax"), # NYI: forward-AD for index_reduce
xfail("index_reduce", "amin"), # NYI: forward-AD for index_reduce
xfail(
"_segment_reduce", "lengths"
), # NYI: forward-AD for _segment_reduce
xfail("native_dropout_backward"), # NYI
}
),
)
@opsToleranceOverride(
"TestOperators",
"test_jvpvjp",
(
tol1("masked.prod", {torch.float32: tol(atol=1e-04, rtol=1.3e-05)}),
tol1("masked.cumprod", {torch.float32: tol(atol=1e-04, rtol=5e-04)}),
tol1(
"cumprod",
{torch.float32: tol(atol=1e-03, rtol=5e-04)},
device_type="cuda",
),
tol1(
"linalg.det",
{torch.float32: tol(atol=3e-05, rtol=5e-06)},
device_type="cuda",
),
tol1(
"linalg.vander",
{torch.float32: tol(atol=1e-04, rtol=1.3e-05)},
device_type="cuda",
),
tol1(
"nn.functional.group_norm", {torch.float32: tol(atol=1e-03, rtol=1e-03)}
),
tol2(
"linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-03, rtol=5e-03)}
),
),
)
def test_jvpvjp(self, device, dtype, op):
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, primals = normalize_op_input_output(op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
primals_tangents = tree_map(lambda x: torch.randn_like(x), primals)
cotangents_tangents = tree_map(lambda x: torch.randn_like(x), cotangents)
def push_vjp(primals, cotangents):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
result = jvp(
push_vjp, (primals, cotangents), (primals_tangents, cotangents_tangents)
)
self.assertEqual(len(result), 2)
def tree_map2(fn, first, second):
flat_first, spec_first = tree_flatten(first)
flat_second, spec_second = tree_flatten(second)
assert spec_first == spec_second
flat_result = [fn(f, s) for f, s in zip(flat_first, flat_second)]
return tree_unflatten(flat_result, spec_first)
def reference(primals, cotangents, primals_tangents, cotangents_tangents):
with fwAD.dual_level():
primal_duals = tree_map2(fwAD.make_dual, primals, primals_tangents)
_, vjp_fn = ref_vjp(fn, *primal_duals)
cotangent_duals = tree_map2(
fwAD.make_dual, cotangents, cotangents_tangents
)
result = vjp_fn(cotangent_duals)
flat_result, spec = tree_flatten(result)
primals_out, tangents_out = zip(
*[fwAD.unpack_dual(r) for r in flat_result]
)
tangents_out = [
t if t is not None else torch.zeros_like(p)
for p, t in zip(primals_out, tangents_out)
]
expected = (
tree_unflatten(primals_out, spec),
tree_unflatten(tangents_out, spec),
)
return expected
expected = reference(
primals, cotangents, primals_tangents, cotangents_tangents
)
self.assertEqual(result, expected)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@skipOps(
"TestOperators",
"test_vmapjvpvjp",
vjp_fail.union(
{
# Following operators take too long, hence skipped
skip("atleast_1d"),
skip("atleast_2d"),
skip("atleast_3d"),
skip("meshgrid", "list_of_tensors"),
skip("meshgrid", "variadic_tensors"),
skip("broadcast_tensors"),
skip("linalg.lstsq"),
skip("nn.functional.bilinear"),
skip("native_layer_norm"),
skip("ormqr"),
# Not actually a problem
xfail("NumpyCubeNotComposableAutogradFunction"), # not composable
xfail(
"NumpyExpMarkDirtyAutogradFunction"
), # vmap: inplace into a regular tensor
# Potential bugs/errors
xfail("as_strided"), # AssertionError: Tensor-likes are not close!
xfail(
"as_strided", "partial_views"
), # AssertionError: Tensor-likes are not close!
xfail("as_strided_copy"), # AssertionError: Tensor-likes are not close!
xfail(
"as_strided_scatter"
), # AssertionError: Tensor-likes are not close!
xfail(
"unbind_copy"
), # Batching rule not implemented for aten::unbind_copy.int.
xfail("bernoulli"), # calls random op
xfail("bfloat16"), # required rank 4 tensor to use channels_last format
xfail("cdist"), # Forward AD not implemented and no decomposition
xfail("cdouble"), # required rank 4 tensor to use channels_last format
xfail("cfloat"), # required rank 4 tensor to use channels_last format
xfail("chalf"), # required rank 4 tensor to use channels_last format
xfail("cholesky"), # Forward AD not implemented and no decomposition
xfail("ormqr"), # Forward AD not implemented and no decomposition
xfail("double"), # required rank 4 tensor to use channels_last format
xfail("float"), # required rank 4 tensor to use channels_last format
xfail("half"), # required rank 4 tensor to use channels_last format
xfail("index_reduce", "prod"), # NYI: forward AD for index_reduce
xfail("index_reduce", "mean"), # NYI: forward AD for index_reduce
xfail("index_reduce", "amax"), # NYI: forward AD for index_reduce
xfail("index_reduce", "amin"), # NYI: forward AD for index_reduce
xfail(
"mvlgamma", "mvlgamma_p_1"
), # vmap: inplace into a regular tensor
xfail(
"mvlgamma", "mvlgamma_p_3"
), # vmap: inplace into a regular tensor
xfail(
"mvlgamma", "mvlgamma_p_5"
), # vmap: inplace into a regular tensor
xfail("nanquantile"), # Batching rule not implemented for aten::equal
# RuntimeError: Batch norm got a batched tensor as input while the
# running_mean or running_var, which will be updated in place,
# were not batched.
xfail("nn.functional.batch_norm"),
xfail("nn.functional.batch_norm", "without_cudnn"),
xfail(
"nn.functional.ctc_loss"
), # ForwardAD not implemented and no decomposition
xfail("nn.functional.dropout2d"), # calls random op
xfail("nn.functional.dropout3d"), # calls random op
xfail("nn.functional.dropout"), # calls random op
xfail("nn.functional.scaled_dot_product_attention"), # randomness
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
xfail("nn.functional.multi_head_attention_forward"), # randomness
xfail(
"nn.functional.embedding_bag"
), # Forward AD not implemented and no decomposition
xfail("nn.functional.alpha_dropout"), # calls randomn op
xfail(
"nn.functional.feature_alpha_dropout", "with_train"
), # calls random op
xfail("nn.functional.fractional_max_pool2d"), # calls random op
xfail("nn.functional.fractional_max_pool3d"), # calls random op
xfail("nn.functional.gaussian_nll_loss"), # data depenedant flow
xfail(
"nn.functional.grid_sample"
), # Forward AD not implemented and no decomposition
xfail(
"grid_sampler_2d"
), # Forward AD not implemented and no decomposition
xfail(
"nn.functional.hardsigmoid"
), # Forward AD not implemented and no decomposition
xfail(
"nn.functional.hinge_embedding_loss"
), # vmap: inplace into a regular tensor
xfail(
"nn.functional.huber_loss"
), # Forward AD not implemented and no decomposition
# RuntimeError: Batch norm got a batched tensor as input while the
# running_mean or running_var, which will be updated in place,
# were not batched.
xfail("nn.functional.instance_norm"),
# NYI: Tensor.clone(memory_format) inside vmap is only supported with
# memory_format torch.preserve_format or torch.contiguous_format (got ChannelsLast)
xfail("nn.functional.max_unpool2d"),
xfail("nn.functional.max_unpool2d", "grad"),
xfail(
"nn.functional.multi_margin_loss"
), # Forward AD not implemented and no decomposition
xfail(
"nn.functional.multilabel_margin_loss"
), # Forward AD not implemented and no decomposition
xfail(
"nn.functional.pdist"
), # Forward AD not implemented and no decomposition
xfail(
"nn.functional.rrelu"
), # vmap: we do not yet support aten::rrelu_with_noise.
xfail(
"nn.functional.soft_margin_loss"
), # Forward AD not implemented and no decomposition
xfail("normal"), # calls random op
xfail("normal", "number_mean"), # calls random op
xfail("pca_lowrank"), # calls random op
xfail("quantile"), # Batching rule not implemented for aten::equal
xfail(
"scatter_reduce", "prod"
), # Forward AD not implemented and no decomposition
xfail(
"_segment_reduce", "lengths"
), # Forward AD not implemented and no decomposition
xfail(
"_segment_reduce", "offsets"
), # Forward AD not implemented and no decomposition
xfail(
"sparse.sampled_addmm"
), # RuntimeError: Sparse CSR tensors do not have strides
xfail(
"sparse.mm", "reduce"
), # RuntimeError: Sparse CSR tensors do not have strides
xfail("svd_lowrank"), # calls random op
xfail(
"to"
), # RuntimeError: required rank 4 tensor to use channels_last format
xfail("to_sparse"), # Forward AD not implemented and no decomposition
xfail(
"view_as_complex"
), # RuntimeError: Tensor must have a last dimension with stride 1
# RuntimeError: Batch norm got a batched tensor as
# input while the running_mean or running_var, which will be updated in
# place, were not batched.
xfail("native_batch_norm"),
xfail("_native_batch_norm_legit"),
# TODO: implement batching rule
xfail("_batch_norm_with_update"),
xfail("native_dropout_backward"),
}
),
)
@ops(op_db + additional_op_db + autograd_function_db, allowed_dtypes=(torch.float,))
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
@opsToleranceOverride(
"TestOperators",
"test_vmapjvpvjp",
(
tol1("linalg.svd", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol1(
"linalg.householder_product",
{torch.float32: tol(atol=5e-03, rtol=5e-03)},
),
tol1("linalg.multi_dot", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol2(
"linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-04, rtol=5e-04)}
),
tol1(
"nn.functional.conv_transpose2d",
{torch.float32: tol(atol=5e-04, rtol=5e-04)},
),
tol1("svd", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
tol1("matrix_exp", {torch.float32: tol(atol=5e-04, rtol=5e-04)}),
),
)
def test_vmapjvpvjp(self, device, dtype, op):
# Since we test `jvpvjp` separately,
# in this we just check that vmap of `jvpvjp`
# is correct.
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
samples = op.sample_inputs(device, dtype, requires_grad=True)
# TODO: test in-place
if is_inplace(op, op.get_op()):
self.skipTest("Skipped! NYI: inplace-testing not supported.")
return
for sample in samples:
fn, primals = normalize_op_input_output(op, sample)
result = fn(*primals)
cotangents = tree_map(lambda x: torch.randn_like(x), result)
primals_tangents = tree_map(lambda x: torch.randn_like(x), primals)
cotangents_tangents = tree_map(lambda x: torch.randn_like(x), cotangents)
def push_vjp(primals, cotangents):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
args, spec = tree_flatten(
((primals, cotangents), (primals_tangents, cotangents_tangents))
)
def jvp_of_vjp(*args):
(primals, tangents) = tree_unflatten(args, spec)
primals_out, tangents_out = jvp(push_vjp, primals, tangents)
flat_primals_out = pytree.tree_leaves(primals_out)
flat_tangents_out = pytree.tree_leaves(tangents_out)
return tuple(flat_primals_out + flat_tangents_out)
is_batch_norm_and_training = is_batch_norm_training(op, sample.kwargs)
generator = get_fallback_and_vmap_exhaustive(
jvp_of_vjp,
args,
{},
is_batch_norm_and_training=is_batch_norm_and_training,
)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
def _make_extremal_inputs(self, shape, device):
if shape is None:
return (None,)
return (
torch.full(shape, -1000.0, device=device),
torch.zeros(shape, device=device),
torch.full(shape, 1000.0, device=device),
)
def _arg_and_kwarg_options(self, args_options, kwargs_options):
return itertools.product(*args_options, kwargs_options)
def test_extremal_numerics_nll_loss(self, device):
N, C = 3, 4
d1, d2, d3 = 5, 6, 7
shapes = (
((N, C), (N,), (C,)),
((N, C), (N,), None),
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
)
kwargs_options = (
{"ignore_index": 0, "reduction": "mean"},
{"reduction": "sum"},
{"reduction": "none"},
{},
)
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
for input, kwargs in self._arg_and_kwarg_options(
(input_options,), kwargs_options
):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
target = torch.randint(0, C, target_shape, device=device)
target[0] = (
1 # since we're ignoring index 0, at least one element must be non-zero
)
fn = functools.partial(
torch.nn.functional.nll_loss, target=target, weight=weight, **kwargs
)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input))
def test_extremal_numerics_l1_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({"reduction": "sum"}, {"reduction": "none"}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
for input, target, kwargs in self._arg_and_kwarg_options(
(input_options, target_options), kwargs_options
):
result = torch.nn.functional.l1_loss(input, target)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(
torch.nn.functional.l1_loss, (cotangents, input, target)
)
def test_extremal_numerics_mse_loss(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({"reduction": "sum"}, {"reduction": "none"}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
target_options = self._make_extremal_inputs(shape, device)
for input, target, kwargs in self._arg_and_kwarg_options(
(input_options, target_options), kwargs_options
):
result = torch.nn.functional.mse_loss(input, target)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(
torch.nn.functional.mse_loss, (cotangents, input, target)
)
def test_extremal_numerics_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({"dim": 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
for input, kwargs in self._arg_and_kwarg_options(
(input_options,), kwargs_options
):
result = torch.nn.functional.softmax(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(
torch.nn.functional.softmax, (cotangents, input)
)
def test_extremal_numerics_log_softmax(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
kwargs_options = ({"dim": 1}, {})
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
for input, kwargs in self._arg_and_kwarg_options(
(input_options,), kwargs_options
):
result = torch.nn.functional.log_softmax(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(
torch.nn.functional.log_softmax, (cotangents, input)
)
def test_extremal_numerics_cross_entropy(self, device):
N, C = 3, 4
d1, d2, d3 = 5, 6, 7
shapes = (
((N, C), (N,), (C,)),
((N, C), (N,), None),
((N, C), (N, C), (C,)),
((N, C), (N, C), None),
((C,), (), (C,)),
((C,), (), None),
((C,), (C,), (C,)),
((C,), (C,), None),
((N, C, d1, d2, d3), (N, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, d1, d2, d3), None),
((N, C, d1, d2, d3), (N, C, d1, d2, d3), (C,)),
((N, C, d1, d2, d3), (N, C, d1, d2, d3), None),
)
for input_shape, target_shape, weight_shape in shapes:
input_options = self._make_extremal_inputs(input_shape, device)
kwargs_options = [{"reduction": "sum"}, {"reduction": "none"}, {}]
if input_shape != target_shape:
kwargs_options.append({"ignore_index": 0, "reduction": "mean"})
for input, kwargs in self._arg_and_kwarg_options(
(input_options,), kwargs_options
):
if weight_shape is None:
weight = None
else:
weight = torch.randn(weight_shape, device=device)
if input_shape == target_shape:
target = torch.rand(target_shape, device=device)
elif len(target_shape) == 0:
target = torch.tensor(
1, device=device
) # must be non-zero since ignore_index may be 0
else:
target = torch.randint(0, C, target_shape, device=device)
fn = functools.partial(
torch.nn.functional.cross_entropy,
target=target,
weight=weight,
**kwargs,
)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(
fn, (cotangents, input), atol_rtol=(1e-4, 1e-5)
)
def test_extremal_numerics_binary_cross_entropy(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
weight_options = self._make_extremal_inputs(shape, device)
kwargs_options = [{"reduction": "sum"}, {"reduction": "none"}, {}]
for weight, kwargs in self._arg_and_kwarg_options(
(weight_options,), kwargs_options
):
input = torch.rand(shape, device=device)
target = torch.rand(shape, device=device)
fn = functools.partial(
torch.nn.functional.binary_cross_entropy,
target=target,
weight=weight,
**kwargs,
)
result = fn(input)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(
fn, (cotangents, input), atol_rtol=(1e-4, 2e-5)
)
def test_extremal_numerics_layer_norm(self, device):
N, C, H, W = 3, 4, 5, 6
shapes = ((N, C), (N, C, H), (N, C, H, W))
for shape in shapes:
input_options = self._make_extremal_inputs(shape, device)
normalized_shape = shape[1:]
weight_options = self._make_extremal_inputs(normalized_shape, device)
bias_options = self._make_extremal_inputs(normalized_shape, device)
for input, bias, weight in self._arg_and_kwarg_options(
(input_options, bias_options, weight_options), ()
):
def fn(input, weight, bias):
return torch.nn.functional.layer_norm(
input, normalized_shape, weight=weight, bias=bias
)
result = fn(input, weight, bias)
cotangents = torch.randn_like(result, device=device)
self._compare_jacobians_of_vjp(fn, (cotangents, input, weight, bias))
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(
op_db + additional_op_db + autograd_function_db,
allowed_dtypes=(torch.float32, torch.double),
)
@skipOps(
"TestOperators",
"test_vmap_autograd_grad",
{
# The size of tensor a (4) must match the size of tensor b (10) at non-singleton dimension 0
xfail("masked_select"),
xfail("nn.functional.max_unpool2d", "grad"), # contiguous call
xfail("nn.functional.max_unpool2d"), # contiguous call
xfail("to_sparse"), # dispatch key issue
xfail("torch.ops.aten._efficient_attention_forward"), # outputs ints
# https://github.com/pytorch/pytorch/issues/96560#issuecomment-2151063723
# ** minor accuracy issue for float32 on ROCm
decorate("xlogy", decorator=skipIfRocm),
# numerical inconsistencies, look like bugs
skip(
"matrix_exp", dtypes=(torch.float32,), device_type="cuda"
), # fails on linux, passes on windows
skip(
"ldexp", dtypes=(torch.float32,), device_type="cpu"
), # fails on all but mac
skip("__rmatmul__"), # flaky needs investigation
skip("matmul"), # flaky needs investigation
skip("nn.functional.conv_transpose3d"), # flaky needs investigation
skip("nn.functional.conv_transpose2d"), # flaky needs investigation
skip("nn.functional.conv_transpose1d"), # flaky needs investigation
skip(
"nn.functional.layer_norm", dtypes=(torch.float32,), device_type="cpu"
), # fails on windows
skip(
"linalg.lu_factor", dtypes=(torch.float32,), device_type="cuda"
), # fails on all but windows
skip(
"linalg.lu_factor_ex", dtypes=(torch.float32,), device_type="cuda"
), # fails on all but windows
skip("linalg.multi_dot", "", device_type="cpu"),
skip("sparse.sampled_addmm", ""),
skip("sparse.mm", "reduce"),
skip("native_layer_norm", "", device_type="cpu"),
},
)
@opsToleranceOverride(
"TestOperators",
"test_vmap_autograd_grad",
(
tol1(
"ldexp",
{torch.float32: tol(atol=3e-04, rtol=1.6e-06)},
device_type="cuda",
),
tol1(
"linalg.householder_product",
{torch.float32: tol(atol=5e-04, rtol=9e-03)},
device_type="cuda",
),
tol1(
"linalg.householder_product",
{torch.float32: tol(atol=6e-03, rtol=1e-03)},
device_type="cpu",
),
tol1(
"linalg.multi_dot",
{torch.float32: tol(atol=2e-04, rtol=1e-04)},
device_type="cuda",
),
tol2(
"linalg.pinv", "hermitian", {torch.float32: tol(atol=5e-06, rtol=5e-06)}
),
tol1("nn.functional.conv3d", {torch.float32: tol(atol=5e-04, rtol=9e-03)}),
tol1(
"nn.functional.conv2d",
{torch.float32: tol(atol=5e-05, rtol=5e-05)},
device_type="cuda",
),
tol1("svd_lowrank", {torch.float32: tol(atol=5e-05, rtol=5e-05)}),
tol1("pca_lowrank", {torch.float32: tol(atol=5e-05, rtol=5e-05)}),
),
)
def test_vmap_autograd_grad(self, device, dtype, op):
def is_differentiable(inp):
return isinstance(inp, Tensor) and (
inp.grad_fn is not None or inp.requires_grad
)
def get_flat_differentiable(tree):
flattened = pytree.tree_leaves(tree)
return tuple(i for i in flattened if is_differentiable(i))
def get_differentiable_linked(list1, list2):
paired_list = zip(list1, list2)
paired_list = tuple(
(first, second)
for (first, second) in paired_list
if is_differentiable(first)
)
return zip(*paired_list)
def filter_none(out):
flattened = pytree.tree_leaves(out)
return tuple(o for o in flattened if o is not None)
if not op.supports_autograd:
self.skipTest("Skipped! Autograd not supported.")
return
sample_inputs = op.sample_inputs(device, dtype, requires_grad=True)
for sample_input in sample_inputs:
fn, primals = normalize_op_input_output(op, sample_input)
out = fn(*primals)
cotangents = tree_map(torch.randn_like, out)
def compute_grad(cotangents):
out_flattened = out
cotangents_flattened = cotangents
if not isinstance(out_flattened, torch.Tensor):
out_flattened = pytree.tree_leaves(out)
cotangents_flattened = pytree.tree_leaves(cotangents)
out_flattened, cotangents_flattened = get_differentiable_linked(
out_flattened, cotangents_flattened
)
return filter_none(
torch.autograd.grad(
out_flattened,
get_flat_differentiable(primals),
cotangents_flattened,
retain_graph=True,
allow_unused=True,
)
)
is_batch_norm_and_training = is_batch_norm_training(op, sample_input.kwargs)
generator = get_fallback_and_vmap_exhaustive(
compute_grad,
(cotangents,),
{},
is_batch_norm_and_training=is_batch_norm_and_training,
)
for loop_out, batched_out in generator:
self.assertEqual(loop_out, batched_out)
def test_vmapvmapjvp_linalg_solve(self):
ops = [op for op in op_db if op.name == "linalg.solve"]
assert len(ops) > 0
# this specializes a lot of code from the get_fallback_and_vmap_exhaustive test. If we need this more
# generally, this could go for a refactor
B0 = 2
B1 = 3
# we want to check the case where A will be seen as contiguous by jvp but during the vmap calls will become
# non-contiguous because vmap will expand. This will happen during both levels of vmap
A = torch.randn(4, 4)
k = torch.randn(4, 5, B1, B0)
fn, args = get_jvp_variant_primals_tangents(
torch.linalg.solve, SampleInput(A, args=(k,))
)
in_dims_all = (None, -1, None, -1)
batched_out = vmap(vmap(fn, in_dims=in_dims_all), in_dims=in_dims_all)(*args)
loop_out = loop2(fn, in_dims_all, in_dims_all, 0, 0, B0, B1, *args)
self.assertEqual(loop_out, batched_out)
@ops(
filter(lambda op: op.name in aliasing_ops, op_db + additional_op_db),
allowed_dtypes=(torch.float,),
)
@parametrize("grad_op", ["jvp", "vjp"])
def test_view_then_inplace(self, device, dtype, op, grad_op):
for sample_input in op.sample_inputs(device, dtype):
def f(x):
op(sample_input.input, *sample_input.args, **sample_input.kwargs).copy_(
x
)
return x
without_grad = op(
sample_input.input, *sample_input.args, **sample_input.kwargs
)
if grad_op == "jvp":
with self.assertRaisesRegex(
RuntimeError,
"During a grad .* attempted to call in-place operation",
):
jvp(
f,
(torch.randn_like(without_grad),),
(torch.randn_like(without_grad),),
)
else:
assert grad_op == "vjp"
with self.assertRaisesRegex(
RuntimeError,
"During a grad .* attempted to call in-place operation",
):
vjp(f, torch.randn_like(without_grad))
@ops(
filter(
lambda op: op.name in aliasing_ops_list_return, op_db + additional_op_db
),
allowed_dtypes=(torch.float,),
)
@parametrize("grad_op", ["jvp", "vjp"])
def test_view_then_inplace_list_return(self, device, dtype, op, grad_op):
for sample_input in op.sample_inputs(device, dtype):
def f(x):
op(sample_input.input, *sample_input.args, **sample_input.kwargs)[
0
].copy_(x)
return x
without_grad = op(
sample_input.input, *sample_input.args, **sample_input.kwargs
)[0]
with self.assertRaisesRegex(
RuntimeError, "During a grad .* attempted to call in-place operation"
):
if grad_op == "jvp":
jvp(
f,
(torch.randn_like(without_grad),),
(torch.randn_like(without_grad),),
)
else:
assert grad_op == "vjp"
vjp(f, torch.randn_like(without_grad))
@parametrize("grad_op", ["jvp", "vjp"])
def test_view_then_inplace_special(self, grad_op):
# some things in __getitem__ use at::index, which doesn't alias, so this tests a subset of them that do alias
ops = [
lambda x: x[0],
lambda x: x[0, 0, 0],
lambda x: x[:1],
lambda x: x[:, :1],
lambda x: x[:, :1, :],
]
for op in ops:
def f(x):
op(captured).copy_(x)
return x
captured = torch.randn(4, 3, 3)
without_grad = op(captured)
if grad_op == "jvp":
with self.assertRaisesRegex(
RuntimeError,
"During a grad .* attempted to call in-place operation",
):
jvp(
f,
(torch.randn_like(without_grad),),
(torch.randn_like(without_grad),),
)
else:
assert grad_op == "vjp"
with self.assertRaisesRegex(
RuntimeError,
"During a grad .* attempted to call in-place operation",
):
vjp(f, torch.randn_like(without_grad))
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
# NOTE: [three-transform testing]
# We only test the autograd_function_db tests here.
#
# Usually testing the composition of two transforms is sufficient to convince
# ourselves that an operator is correctly implemented. For the following cases,
# we want to be extra sure, so we send those through some three-transform tests:
# - autograd.Function. The mechanism is via PyDispatcher/HigherOrderOperator, not the
# regular PyTorch dispatcher, so it's good to exercise more caution.
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_vmapvjpvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_vmapvjpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
inner_vmapped_fn, primals = normalize_op_input_output2(
inner_vmapped_op,
batched_args,
kwargs,
sample.output_process_fn_grad,
)
inner_mapped_fn, _ = normalize_op_input_output2(
inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
result = inner_mapped_fn(*primals)
cotangents = tree_map(lambda x: torch.rand_like(x), result)
def apply_vjp(fn):
def inner(primals, cotangents):
_, vjp_fn = vjp(fn, *primals)
return vjp_fn(cotangents)
return inner
vjpvmap_fn = apply_vjp(inner_vmapped_fn)
vjpmap_fn = apply_vjp(inner_mapped_fn)
batched_args = (primals, cotangents)
generator = generate_vmap_inputs(batched_args, {})
for batched_args, in_dims, _ in generator:
# strategy: compare vmap(vjp(vmap(op)) vs map(vjp(map(op))
vmapvjpvmap_fn = vmap(vjpvmap_fn, in_dims)
mapvjpmap_fn = functools.partial(loop, vjpmap_fn, in_dims, 0, B)
result = vmapvjpvmap_fn(*batched_args)
expected = mapvjpmap_fn(*batched_args)
self.assertEqual(result, expected)
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_vjpvmapvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_vjpvmapvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, inner_in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, inner_in_dims)
inner_mapped_op = functools.partial(loop, op, inner_in_dims, 0, B)
generator = generate_vmap_inputs(batched_args, kwargs)
for batched_args, in_dims, kwargs in generator:
# strategy: compare vjp(vmap(vmap(op)) vs vjp(map(map(op))
vmapped_op = vmap(inner_vmapped_op, in_dims)
mapped_op = functools.partial(loop, inner_mapped_op, in_dims, 0, B)
vmapped_fn, primals = normalize_op_input_output2(
vmapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
mapped_fn, _ = normalize_op_input_output2(
mapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
result = mapped_fn(*primals)
cotangents = tree_map(lambda x: torch.rand_like(x), result)
_, vjp_fn = vjp(mapped_fn, *primals)
expected_vjps = vjp_fn(cotangents)
_, vjp_fn = vjp(vmapped_fn, *primals)
result_vjps = vjp_fn(cotangents)
self.assertEqual(result_vjps, expected_vjps)
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_vjpvjpvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_vjpvjpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
vjpmap_fn, args = get_vjpfull_variant2(
inner_mapped_op, batched_args, kwargs
)
vjpvmap_fn, _ = get_vjpfull_variant2(
inner_vmapped_op, batched_args, kwargs
)
vjpvjpvmap_fn, new_args = get_vjpfull_variant2(vjpvmap_fn, args, {})
vjpvjpmap_fn, _ = get_vjpfull_variant2(vjpmap_fn, args, {})
expected = vjpvjpmap_fn(*new_args)
result = vjpvjpvmap_fn(*new_args)
self.assertEqual(result, expected)
# We're generally convinced that jvp x vmap works (vmap turns an operator
# into another operator and we test jvp support for operators). So
# we only test it on the things we're not sure about:
# - the autograd.Function <> functorch interaction
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_jvpvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_jvpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
jvpvmap_op, primals = get_jvp_variant_primals_tangents2(
inner_vmapped_op,
batched_args,
kwargs,
sample.output_process_fn_grad,
)
jvpmap_op, _ = get_jvp_variant_primals_tangents2(
inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
expected = jvpmap_op(*primals)
result = jvpvmap_op(*primals)
self.assertEqual(result, expected)
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_jvpvmapvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_jvpvmapvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, inner_in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, inner_in_dims)
inner_mapped_op = functools.partial(loop, op, inner_in_dims, 0, B)
generator = generate_vmap_inputs(batched_args, kwargs)
for batched_args, in_dims, kwargs in generator:
# strategy: compare jvp(vmap(vmap(op)) vs jvp(map(map(op))
vmapped_op = vmap(inner_vmapped_op, in_dims)
mapped_op = functools.partial(loop, inner_mapped_op, in_dims, 0, B)
jvpvmapvmap_fn, primals = get_jvp_variant_primals_tangents2(
vmapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
jvpmapmap_fn, _ = get_jvp_variant_primals_tangents2(
mapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
expected = jvpmapmap_fn(*primals)
result = jvpvmapvmap_fn(*primals)
self.assertEqual(result, expected)
# See NOTE: [three-transform testing]
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_vmapjvpvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_vmapjvpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
jvpvmap_fn, primals = get_jvp_variant_primals_tangents2(
inner_vmapped_op,
batched_args,
kwargs,
sample.output_process_fn_grad,
)
jvpmap_fn, _ = get_jvp_variant_primals_tangents2(
inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
generator = generate_vmap_inputs(primals, {})
for batched_args, in_dims, _ in generator:
# strategy: compare vmap(jvp(vmap(op)) vs map(jvp(map(op))
vmapjvpvmap_fn = vmap(jvpvmap_fn, in_dims)
mapjvpmap_fn = functools.partial(loop, jvpmap_fn, in_dims, 0, B)
result = vmapjvpvmap_fn(*batched_args)
expected = mapjvpmap_fn(*batched_args)
self.assertEqual(result, expected)
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_jvpjvpvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_jvpjvpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
jvpmap_fn, args = get_jvp_variant_primals_tangents2(
inner_mapped_op, batched_args, kwargs, sample.output_process_fn_grad
)
jvpvmap_fn, _ = get_jvp_variant_primals_tangents2(
inner_vmapped_op,
batched_args,
kwargs,
sample.output_process_fn_grad,
)
jvpjvpvmap_fn, new_args = get_jvp_variant_primals_tangents2(
jvpvmap_fn, args, {}
)
jvpjvpmap_fn, _ = get_jvp_variant_primals_tangents2(jvpmap_fn, args, {})
expected = jvpjvpmap_fn(*new_args)
result = jvpjvpvmap_fn(*new_args)
self.assertEqual(result, expected)
# See NOTE: [three-transform testing]
@ops(autograd_function_db, allowed_dtypes=(torch.float32,))
@skipOps(
"TestOperators",
"test_jvpvjpvmap",
{
xfail("NumpyCubeNotComposableAutogradFunction"), # Not composable
},
)
def test_jvpvjpvmap(self, device, dtype, op):
samples = op.sample_inputs(device, dtype, requires_grad=True)
B = 2
for sample in samples:
args = [sample.input] + list(sample.args)
kwargs = sample.kwargs
generator = generate_vmap_inputs(args, kwargs, batch_size=B)
for batched_args, in_dims, kwargs in generator:
inner_vmapped_op = vmap(op, in_dims)
inner_mapped_op = functools.partial(loop, op, in_dims, 0, B)
vjpmap_fn, args = get_vjpfull_variant2(
inner_mapped_op, batched_args, kwargs
)
vjpvmap_fn, _ = get_vjpfull_variant2(
inner_vmapped_op, batched_args, kwargs
)
jvpvjpvmap_fn, new_args = get_jvp_variant_primals_tangents2(
vjpvmap_fn, args, {}
)
jvpvjpmap_fn, _ = get_jvp_variant_primals_tangents2(vjpmap_fn, args, {})
expected = jvpvjpmap_fn(*new_args)
result = jvpvjpvmap_fn(*new_args)
self.assertEqual(result, expected)
def test_data_write_errors_under_transform(self, device):
t = torch.randn(3, 3, device=device)
def fn(t):
t.data = torch.randn(3, 3)
return t.sum()
msg = "mutating directly with `.data` inside functorch transform"
with self.assertRaisesRegex(RuntimeError, msg):
grad(fn)(t)
with self.assertRaisesRegex(RuntimeError, msg):
vjp(fn, t)
with self.assertRaisesRegex(RuntimeError, msg):
jvp(fn, (t,), (torch.randn_like(t),))
def test_tensor_with_scalar_list(self, device):
x = torch.randn((), device=device)
def func_list_of_scalar(x):
return torch.tensor([x], device=device)
def func(x):
return torch.tensor(x, device=device).view(1)
actual_o, actual_fn = vjp(func_list_of_scalar, x)
expected_o, expected_fn = vjp(func, x)
self.assertEqual(actual_o, expected_o)
self.assertEqual(
expected_fn(torch.ones_like(expected_o)),
actual_fn(torch.ones_like(actual_o)),
)
@ops(bool_ordered_op_db, dtypes=[torch.bool])
def test_ordered_bool_raises(self, device, dtype, op):
# Generate sample inputs for the op
sample_inputs = op.sample_inputs(device, dtype)
for sample_input in sample_inputs:
# Check that the op raises NotImplementedError or appropriate failure
self.assertRaises(
RuntimeError,
op,
sample_input.input,
*sample_input.args,
**sample_input.kwargs,
)
@ops(
complex_ordered_op_db,
dtypes=[torch.complex32, torch.complex64, torch.complex128],
)
def test_ordered_complex_raises(self, device, dtype, op):
# Generate sample inputs for the op
sample_inputs = op.sample_inputs(device, dtype)
for sample_input in sample_inputs:
# Check that the op raises NotImplementedError or appropriate failure
self.assertRaises(
RuntimeError,
op,
sample_input.input,
*sample_input.args,
**sample_input.kwargs,
)
only_for = ("cpu", "cuda")
instantiate_device_type_tests(TestOperators, globals(), only_for=only_for)
if __name__ == "__main__":
run_tests()
| TestOperators |
python | apache__airflow | devel-common/src/tests_common/test_utils/perf/perf_kit/memory.py | {
"start": 1234,
"end": 2549
} | class ____:
"""Trace results of memory,."""
def __init__(self):
self.before = 0
self.after = 0
self.value = 0
@contextmanager
def trace_memory(human_readable=True, gc_collect=False):
"""
Decorate function and calculate the amount of difference in free memory before and after script execution.
In other words, how much data the code snippet has used up memory.
:param human_readable: If yes, the result will be displayed in human readable units.
If no, the result will be displayed as bytes.
:param gc_collect: If yes, the garbage collector will be started before checking used memory.
"""
if gc_collect:
gc.collect()
before = _get_process_memory()
result = TraceMemoryResult()
try:
yield result
finally:
if gc_collect:
gc.collect()
after = _get_process_memory()
diff = after - before
result.before = before
result.after = after
result.value = diff
if human_readable:
human_diff = _human_readable_size(diff)
print(f"Memory: {human_diff}")
else:
print(f"Memory: {diff} bytes")
if __name__ == "__main__":
# Example:
with trace_memory():
import airflow # noqa: F401
| TraceMemoryResult |
python | tornadoweb__tornado | tornado/websocket.py | {
"start": 29029,
"end": 51391
} | class ____(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0F
stream = None # type: IOStream
def __init__(
self,
handler: "_WebSocketDelegate",
mask_outgoing: bool,
params: _WebSocketParams,
) -> None:
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self.params = params
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None # type: Optional[bytes]
self._frame_length = None
self._fragmented_message_buffer = None # type: Optional[bytearray]
self._fragmented_message_opcode = None
self._waiting = None # type: object
self._compression_options = params.compression_options
self._decompressor = None # type: Optional[_PerMessageDeflateDecompressor]
self._compressor = None # type: Optional[_PerMessageDeflateCompressor]
self._frame_compressed = None # type: Optional[bool]
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self._received_pong = False # type: bool
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self._ping_coroutine = None # type: Optional[asyncio.Task]
# Use a property for this to satisfy the abc.
@property
def selected_subprotocol(self) -> Optional[str]:
return self._selected_subprotocol
@selected_subprotocol.setter
def selected_subprotocol(self, value: Optional[str]) -> None:
self._selected_subprotocol = value
async def accept_connection(self, handler: WebSocketHandler) -> None:
try:
self._handle_websocket_headers(handler)
except ValueError:
handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
await self._accept_connection(handler)
except asyncio.CancelledError:
self._abort()
return
except ValueError:
gen_log.debug("Malformed WebSocket request received", exc_info=True)
self._abort()
return
def _handle_websocket_headers(self, handler: WebSocketHandler) -> None:
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: handler.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key: Union[str, bytes]) -> str:
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self, handler: WebSocketHandler) -> str:
return WebSocketProtocol13.compute_accept_value(
cast(str, handler.request.headers.get("Sec-Websocket-Key"))
)
async def _accept_connection(self, handler: WebSocketHandler) -> None:
subprotocol_header = handler.request.headers.get("Sec-WebSocket-Protocol")
if subprotocol_header:
subprotocols = [s.strip() for s in subprotocol_header.split(",")]
else:
subprotocols = []
self.selected_subprotocol = handler.select_subprotocol(subprotocols)
if self.selected_subprotocol:
assert self.selected_subprotocol in subprotocols
handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
extensions = self._parse_extensions_header(handler.request.headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors("server", ext[1], self._compression_options)
if (
"client_max_window_bits" in ext[1]
and ext[1]["client_max_window_bits"] is None
):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]["client_max_window_bits"]
handler.set_header(
"Sec-WebSocket-Extensions",
httputil._encode_header("permessage-deflate", ext[1]),
)
break
handler.clear_header("Content-Type")
handler.set_status(101)
handler.set_header("Upgrade", "websocket")
handler.set_header("Connection", "Upgrade")
handler.set_header("Sec-WebSocket-Accept", self._challenge_response(handler))
handler.finish()
self.stream = handler._detach_stream()
self.start_pinging()
try:
open_result = handler.open(*handler.open_args, **handler.open_kwargs)
if open_result is not None:
await open_result
except Exception:
handler.log_exception(*sys.exc_info())
self._abort()
return
await self._receive_frame_loop()
def _parse_extensions_header(
self, headers: httputil.HTTPHeaders
) -> List[Tuple[str, Dict[str, str]]]:
extensions = headers.get("Sec-WebSocket-Extensions", "")
if extensions:
return [httputil._parse_header(e.strip()) for e in extensions.split(",")]
return []
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers["Upgrade"].lower() == "websocket"
assert headers["Connection"].lower() == "upgrade"
accept = self.compute_accept_value(key)
assert headers["Sec-Websocket-Accept"] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
self._create_compressors("client", ext[1])
else:
raise ValueError("unsupported extension %r", ext)
self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
def _get_compressor_options(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + "_no_context_takeover") not in agreed_parameters
) # type: Dict[str, Any]
wbits_header = agreed_parameters.get(side + "_max_window_bits", None)
if wbits_header is None:
options["max_wbits"] = zlib.MAX_WBITS
else:
options["max_wbits"] = int(wbits_header)
options["compression_options"] = compression_options
return options
def _create_compressors(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Optional[Dict[str, Any]] = None,
) -> None:
# TODO: handle invalid parameters gracefully
allowed_keys = {
"server_no_context_takeover",
"client_no_context_takeover",
"server_max_window_bits",
"client_max_window_bits",
}
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = "client" if (side == "server") else "server"
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options)
)
self._decompressor = _PerMessageDeflateDecompressor(
max_message_size=self.params.max_message_size,
**self._get_compressor_options(
other_side, agreed_parameters, compression_options
),
)
def _write_frame(
self, fin: bool, opcode: int, data: bytes, flags: int = 0
) -> "Future[None]":
data_len = len(data)
if opcode & 0x8:
# All control frames MUST have a payload length of 125
# bytes or less and MUST NOT be fragmented.
if not fin:
raise ValueError("control frames may not be fragmented")
if data_len > 125:
raise ValueError("control frame payloads may not exceed 125 bytes")
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if data_len < 126:
frame += struct.pack("B", data_len | mask_bit)
elif data_len <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, data_len)
else:
frame += struct.pack("!BQ", 127 | mask_bit, data_len)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
return self.stream.write(frame)
def write_message(
self, message: Union[str, bytes, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
# For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
async def wrapper() -> None:
try:
await fut
except StreamClosedError:
raise WebSocketClosedError()
return asyncio.ensure_future(wrapper())
def write_ping(self, data: bytes) -> None:
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
async def _receive_frame_loop(self) -> None:
try:
while not self.client_terminated:
await self._receive_frame()
except StreamClosedError:
self._abort()
self.handler.on_ws_connection_close(self.close_code, self.close_reason)
async def _read_bytes(self, n: int) -> bytes:
data = await self.stream.read_bytes(n)
self._wire_bytes_in += n
return data
async def _receive_frame(self) -> None:
# Read the frame header.
data = await self._read_bytes(2)
header, mask_payloadlen = struct.unpack("BB", data)
is_final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
opcode = header & self.OPCODE_MASK
opcode_is_control = opcode & 0x8
if self._decompressor is not None and opcode != 0:
# Compression flag is present in the first frame's header,
# but we can't decompress until we have all the frames of
# the message.
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
is_masked = bool(mask_payloadlen & 0x80)
payloadlen = mask_payloadlen & 0x7F
# Parse and validate the length.
if opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
if payloadlen < 126:
self._frame_length = payloadlen
elif payloadlen == 126:
data = await self._read_bytes(2)
payloadlen = struct.unpack("!H", data)[0]
elif payloadlen == 127:
data = await self._read_bytes(8)
payloadlen = struct.unpack("!Q", data)[0]
new_len = payloadlen
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > self.params.max_message_size:
self.close(1009, "message too big")
self._abort()
return
# Read the payload, unmasking if necessary.
if is_masked:
self._frame_mask = await self._read_bytes(4)
data = await self._read_bytes(payloadlen)
if is_masked:
assert self._frame_mask is not None
data = _websocket_mask(self._frame_mask, data)
# Decide what to do with this frame.
if opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not is_final_frame:
# control frames must not be fragmented
self._abort()
return
elif opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer.extend(data)
if is_final_frame:
opcode = self._fragmented_message_opcode
data = bytes(self._fragmented_message_buffer)
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if not is_final_frame:
self._fragmented_message_opcode = opcode
self._fragmented_message_buffer = bytearray(data)
if is_final_frame:
handled_future = self._handle_message(opcode, data)
if handled_future is not None:
await handled_future
def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]":
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return None
if self._frame_compressed:
assert self._decompressor is not None
try:
data = self._decompressor.decompress(data)
except _DecompressTooLargeError:
self.close(1009, "message too big after decompression")
self._abort()
return None
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return None
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.close_code = struct.unpack(">H", data[:2])[0]
if len(data) > 2:
self.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.close_code)
elif opcode == 0x9:
# Ping
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self._received_pong = True
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
return None
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b""
else:
close_data = struct.pack(">H", code)
if reason is not None:
close_data += utf8(reason)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort
)
if self._ping_coroutine:
self._ping_coroutine.cancel()
self._ping_coroutine = None
def is_closing(self) -> bool:
"""Return ``True`` if this connection is closing.
The connection is considered closing if either side has
initiated its closing handshake or if the stream has been
shut down uncleanly.
"""
return self.stream.closed() or self.client_terminated or self.server_terminated
def set_nodelay(self, x: bool) -> None:
self.stream.set_nodelay(x)
@property
def ping_interval(self) -> float:
interval = self.params.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self) -> float:
timeout = self.params.ping_timeout
if timeout is not None:
if self.ping_interval and timeout > self.ping_interval:
de_dupe_gen_log(
# Note: using de_dupe_gen_log to prevent this message from
# being duplicated for each connection
logging.WARNING,
f"The websocket_ping_timeout ({timeout}) cannot be longer"
f" than the websocket_ping_interval ({self.ping_interval})."
f"\nSetting websocket_ping_timeout={self.ping_interval}",
)
return self.ping_interval
return timeout
return self.ping_interval
def start_pinging(self) -> None:
"""Start sending periodic pings to keep the connection alive"""
if (
# prevent multiple ping coroutines being run in parallel
not self._ping_coroutine
# only run the ping coroutine if a ping interval is configured
and self.ping_interval > 0
):
self._ping_coroutine = asyncio.create_task(self.periodic_ping())
@staticmethod
def ping_sleep_time(*, last_ping_time: float, interval: float, now: float) -> float:
"""Calculate the sleep time until the next ping should be sent."""
return max(0, last_ping_time + interval - now)
async def periodic_ping(self) -> None:
"""Send a ping and wait for a pong if ping_timeout is configured.
Called periodically if the websocket_ping_interval is set and non-zero.
"""
interval = self.ping_interval
timeout = self.ping_timeout
await asyncio.sleep(interval)
while True:
# send a ping
self._received_pong = False
ping_time = IOLoop.current().time()
self.write_ping(b"")
# wait until the ping timeout
await asyncio.sleep(timeout)
# make sure we received a pong within the timeout
if timeout > 0 and not self._received_pong:
self.close(reason="ping timed out")
return
# wait until the next scheduled ping
await asyncio.sleep(
self.ping_sleep_time(
last_ping_time=ping_time,
interval=interval,
now=IOLoop.current().time(),
)
)
| WebSocketProtocol13 |
python | streamlit__streamlit | lib/streamlit/elements/widgets/checkbox.py | {
"start": 1830,
"end": 13996
} | class ____:
@gather_metrics("checkbox")
def checkbox(
self,
label: str,
value: bool = False,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> bool:
r"""Display a checkbox widget.
Parameters
----------
label : str
A short label explaining to the user what this checkbox is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
value : bool
Preselect the checkbox when it first renders. This will be
cast to bool internally.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this checkbox's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the checkbox if set to ``True``.
The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "content", "stretch", or int
The width of the checkbox widget. This can be one of the following:
- ``"content"`` (default): The width of the widget matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the widget matches the width of the
parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
bool
Whether or not the checkbox is checked.
Example
-------
>>> import streamlit as st
>>>
>>> agree = st.checkbox("I agree")
>>>
>>> if agree:
... st.write("Great!")
.. output::
https://doc-checkbox.streamlit.app/
height: 220px
"""
ctx = get_script_run_ctx()
return self._checkbox(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
type=CheckboxProto.StyleType.DEFAULT,
ctx=ctx,
width=width,
)
@gather_metrics("toggle")
def toggle(
self,
label: str,
value: bool = False,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: Width = "content",
) -> bool:
r"""Display a toggle widget.
Parameters
----------
label : str
A short label explaining to the user what this toggle is for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
value : bool
Preselect the toggle when it first renders. This will be
cast to bool internally.
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this toggle's value changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the toggle if set to ``True``.
The default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "content", "stretch", or int
The width of the toggle widget. This can be one of the following:
- ``"content"`` (default): The width of the widget matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the widget matches the width of the
parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
bool
Whether or not the toggle is checked.
Example
-------
>>> import streamlit as st
>>>
>>> on = st.toggle("Activate feature")
>>>
>>> if on:
... st.write("Feature activated!")
.. output::
https://doc-toggle.streamlit.app/
height: 220px
"""
ctx = get_script_run_ctx()
return self._checkbox(
label=label,
value=value,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
type=CheckboxProto.StyleType.TOGGLE,
ctx=ctx,
width=width,
)
def _checkbox(
self,
label: str,
value: bool = False,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
type: CheckboxProto.StyleType.ValueType = CheckboxProto.StyleType.DEFAULT,
ctx: ScriptRunContext | None = None,
width: Width = "content",
) -> bool:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=None if value is False else value,
)
maybe_raise_label_warnings(label, label_visibility)
element_id = compute_and_register_element_id(
"toggle" if type == CheckboxProto.StyleType.TOGGLE else "checkbox",
user_key=key,
key_as_main_identity=True,
dg=self.dg,
label=label,
value=bool(value),
help=help,
width=width,
)
checkbox_proto = CheckboxProto()
checkbox_proto.id = element_id
checkbox_proto.label = label
checkbox_proto.default = bool(value)
checkbox_proto.type = type
checkbox_proto.form_id = current_form_id(self.dg)
checkbox_proto.disabled = disabled
checkbox_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
checkbox_proto.help = dedent(help)
validate_width(width, allow_content=True)
layout_config = LayoutConfig(width=width)
serde = CheckboxSerde(value)
checkbox_state = register_widget(
checkbox_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="bool_value",
)
if checkbox_state.value_changed:
checkbox_proto.value = checkbox_state.value
checkbox_proto.set_value = True
self.dg._enqueue("checkbox", checkbox_proto, layout_config=layout_config)
return checkbox_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| CheckboxMixin |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_pdf.py | {
"start": 13097,
"end": 13571
} | class ____:
"""
PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return b"%d 0 R" % self.id
def write(self, contents, file):
write = file.write
write(b"%d 0 obj\n" % self.id)
write(pdfRepr(contents))
write(b"\nendobj\n")
@total_ordering
| Reference |
python | sqlalchemy__sqlalchemy | test/orm/test_eager_relations.py | {
"start": 217238,
"end": 218820
} | class ____(fixtures.DeclarativeMappedTest):
"""test for [ticket:3811] continuing on [ticket:3431]"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("a.id"))
parent = relationship("A", remote_side=id, lazy="raise")
def test_multi_path_load_lazy_none(self):
A = self.classes.A
s = fixture_session()
s.add_all(
[
A(id=1, parent_id=None),
A(id=2, parent_id=2),
A(id=4, parent_id=None),
A(id=3, parent_id=4),
]
)
s.commit()
q1 = (
s.query(A)
.order_by(A.id)
.filter(A.id.in_([1, 2]))
.options(joinedload(A.parent))
)
def go():
for a in q1:
if a.id == 1:
assert a.parent is None
else:
assert a.parent is not None
self.assert_sql_count(testing.db, go, 1)
q1 = (
s.query(A)
.order_by(A.id)
.filter(A.id.in_([3, 4]))
.options(joinedload(A.parent))
)
def go():
for a in q1:
if a.id == 4:
assert a.parent is None
else:
assert a.parent is not None
self.assert_sql_count(testing.db, go, 1)
| EntityViaMultiplePathTestThree |
python | lxml__lxml | src/lxml/tests/dummy_http_server.py | {
"start": 1624,
"end": 2162
} | class ____:
def __init__(self, response_data, response_code=200, headers=()):
self.requests = []
self.response_code = response_code
self.response_data = response_data
self.headers = list(headers or ())
def __call__(self, environ, start_response):
self.requests.append((
environ.get('PATH_INFO'),
urlparse.parse_qsl(environ.get('QUERY_STRING'))))
start_response('%s OK' % self.response_code, self.headers)
return [self.response_data]
| HTTPRequestCollector |
python | pytorch__pytorch | tools/lldb/pytorch_lldb.py | {
"start": 271,
"end": 3443
} | class ____:
"""
Context-manager to temporarily disable all lldb breakpoints, useful if
there is a risk to hit one during the evaluation of one of our custom
commands
"""
def __enter__(self) -> None:
target = get_target()
if target.DisableAllBreakpoints() is False:
print("[-] error: failed to disable all breakpoints.")
def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None:
target = get_target()
if target.EnableAllBreakpoints() is False:
print("[-] error: failed to enable all breakpoints.")
def IntArrayRef_summary(valobj: Any, internal_dict: Any, options: Any) -> str:
"""Print human readable representation of c10::IntArrayRef"""
with DisableBreakpoints():
target = get_target()
tensor = valobj.GetName()
result = target.EvaluateExpression(
f"torch::gdb::int_array_ref_string({tensor})"
)
str_result = str(result)
str_result = str_result[str_result.find('"') + 1 : -1]
return str_result
def DispatchKeyset_summary(valobj: Any, internal_dict: Any, options: Any) -> str:
"""Print human readable representation of c10::DispatchKeyset"""
with DisableBreakpoints():
target = get_target()
keyset = valobj.GetName()
result = target.EvaluateExpression(
f"torch::gdb::dispatch_keyset_string({keyset})"
)
str_result = str(result)
str_result = str_result[str_result.find('"') + 1 : -1]
return str_result
def Tensor_summary(valobj: Any, internal_dict: Any, options: Any) -> str:
"""Print a human readable representation of the given at::Tensor.
at::Tensor instances do not have a C++ implementation of a repr method: in
pytorch, this is done by pure-Python code. As such, print <tensor>
internally creates a Python wrapper for the given tensor and call repr()
on it.
Usage:
print self
"""
with DisableBreakpoints():
target = get_target()
tensor = valobj.GetName()
result = target.EvaluateExpression(f"torch::gdb::tensor_repr({tensor})")
str_result = str(result)
target.EvaluateExpression(f"(void)free({result.GetValue()})")
str_result = "\n" + str_result[str_result.find("tensor") : -1]
return str_result
# And the initialization code to add your commands
def __lldb_init_module(debugger: Any, internal_dict: Any) -> Any:
debugger.HandleCommand(
"type summary add c10::IntArrayRef -F pytorch_lldb.IntArrayRef_summary -w torch"
)
debugger.HandleCommand(
"type summary add c10::DispatchKeySet -F pytorch_lldb.DispatchKeyset_summary -w torch"
)
debugger.HandleCommand(
"type summary add at::Tensor -F pytorch_lldb.Tensor_summary -w torch"
)
print(
"Pretty Printing lldb summary for PyTorch AT types has been installed and is ready for use. "
"This category is enabled by default. To disable run: `type category disable torch`"
)
print(
"Usage:\n\tprint <at::tensor>\n\tprint <c10::IntArrayRef>\n\tprint <c10::DispatchKeySet>"
)
| DisableBreakpoints |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 387792,
"end": 388795
} | class ____:
forbidden_types = [
# The builtin scalar super types:
np.generic, np.flexible, np.number,
np.inexact, np.floating, np.complexfloating,
np.integer, np.unsignedinteger, np.signedinteger,
# character is a deprecated S1 special case:
np.character,
]
def test_dtype_coercion(self):
for scalar_type in self.forbidden_types:
assert_raises(TypeError, np.dtype, args=(scalar_type,))
def test_array_construction(self):
for scalar_type in self.forbidden_types:
assert_raises(TypeError, np.array, args=([], scalar_type,))
def test_not_deprecated(self):
# All specific types work
for group in np._core.sctypes.values():
for scalar_type in group:
np.dtype(scalar_type)
for scalar_type in [type, dict, list, tuple]:
# Typical python types are coerced to object currently:
np.dtype(scalar_type)
| TestDTypeCoercionForbidden |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 90435,
"end": 93806
} | class ____:
"""Test hu_HU address provider methods"""
def test_administrative_unit(self, faker, num_samples):
for _ in range(num_samples):
administrative_unit = faker.administrative_unit()
assert isinstance(administrative_unit, str)
assert administrative_unit in HuHuAddressProvider.counties
def test_street_address_with_county(self, faker, num_samples):
for _ in range(num_samples):
street_address_with_county = faker.street_address_with_county()
assert isinstance(street_address_with_county, str)
match = re.fullmatch(r".* \d*.\n.* [A-Za-zÀ-ȕ]*\nH-\d{4} [A-Za-zÀ-ȕ]*", street_address_with_county)
assert match
def test_city_prefix(self, faker, num_samples):
for _ in range(num_samples):
city_prefix = faker.city_prefix()
assert isinstance(city_prefix, str)
assert city_prefix in HuHuAddressProvider.city_prefs
def test_city_part(self, faker, num_samples):
for _ in range(num_samples):
city_part = faker.city_part()
assert isinstance(city_part, str)
assert city_part in HuHuAddressProvider.city_parts
def test_real_city_name(self, faker, num_samples):
for _ in range(num_samples):
real_city_name = faker.real_city_name()
assert isinstance(real_city_name, str)
assert real_city_name in HuHuAddressProvider.real_city_names
def test_frequent_street_name(self, faker, num_samples):
for _ in range(num_samples):
frequent_street_name = faker.frequent_street_name()
assert isinstance(frequent_street_name, str)
assert frequent_street_name in HuHuAddressProvider.frequent_street_names
def test_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
match = re.fullmatch(r"H-\d{4}", postcode)
assert match
def test_street_name(self, faker, num_samples):
for _ in range(num_samples):
street_name = faker.street_name()
assert isinstance(street_name, str)
def test_building_number(self, faker, num_samples):
for _ in range(num_samples):
building_number = faker.building_number()
assert isinstance(building_number, str)
match = re.fullmatch(r"\d{0,3}.", building_number)
assert match
def test_city(self, faker, num_samples):
# generating possible variations for cities for hu_Hu locale
real_cities = [city.lower() for city in HuHuAddressProvider.real_city_names]
cities_part_suffix = [
"".join([part, suffix])
for part in HuHuAddressProvider.city_parts
for suffix in HuHuAddressProvider.city_suffixes
]
cities_prefix_part_suffix = [
"".join([pref, part_suffix])
for pref in HuHuAddressProvider.city_prefs
for part_suffix in cities_part_suffix
]
cities = real_cities + cities_part_suffix + cities_prefix_part_suffix
for _ in range(num_samples):
city = faker.city()
assert isinstance(city, str)
assert city.lower() in cities
assert city[0].isupper()
| TestHuHu |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/dagster_types.py | {
"start": 2933,
"end": 3639
} | class ____(graphene.Interface):
key = graphene.NonNull(graphene.String)
name = graphene.String()
display_name = graphene.NonNull(graphene.String)
description = graphene.String()
is_nullable = graphene.NonNull(graphene.Boolean)
is_list = graphene.NonNull(graphene.Boolean)
is_builtin = graphene.NonNull(graphene.Boolean)
is_nothing = graphene.NonNull(graphene.Boolean)
input_schema_type = graphene.Field(GrapheneConfigType)
output_schema_type = graphene.Field(GrapheneConfigType)
inner_types = non_null_list(lambda: GrapheneDagsterType)
metadata_entries = non_null_list(GrapheneMetadataEntry)
class Meta:
name = "DagsterType"
| GrapheneDagsterType |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 75642,
"end": 84016
} | class ____(DFinePreTrainedModel):
# When using clones, all layers > 0 will be clones, but layer 0 *is* required
# We can't initialize the model on meta device as some weights are modified during the initialization
_no_split_modules = None
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": "bbox_embed.0",
r"class_embed.(?![0])\d+": "class_embed.0",
"model.decoder.class_embed": "class_embed",
"model.decoder.bbox_embed": "bbox_embed",
}
def __init__(self, config: DFineConfig):
super().__init__(config)
# D-FINE encoder-decoder model
self.eval_idx = config.eval_idx if config.eval_idx >= 0 else config.decoder_layers + config.eval_idx
self.model = DFineModel(config)
scaled_dim = round(config.layer_scale * config.hidden_size)
num_pred = config.decoder_layers
self.class_embed = nn.ModuleList([nn.Linear(config.d_model, config.num_labels) for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList(
[
DFineMLP(config.hidden_size, config.hidden_size, 4 * (config.max_num_bins + 1), 3)
for _ in range(self.eval_idx + 1)
]
+ [
DFineMLP(scaled_dim, scaled_dim, 4 * (config.max_num_bins + 1), 3)
for _ in range(config.decoder_layers - self.eval_idx - 1)
]
)
self.model.decoder.class_embed = self.class_embed
self.model.decoder.bbox_embed = self.bbox_embed
# Initialize weights and apply final processing
self.post_init()
def _set_aux_loss(self, outputs_class, outputs_coord):
return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class, outputs_coord)]
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
pixel_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[list[dict]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple[torch.FloatTensor], DFineObjectDetectionOutput]:
r"""
Example:
```python
>>> import torch
>>> from transformers.image_utils import load_image
>>> from transformers import AutoImageProcessor, DFineForObjectDetection
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> image_processor = AutoImageProcessor.from_pretrained("ustc-community/dfine-xlarge-coco")
>>> model = DFineForObjectDetection.from_pretrained("ustc-community/dfine-xlarge-coco")
>>> # prepare image for the model
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 300, 80]
>>> boxes = outputs.pred_boxes
>>> list(boxes.shape)
[1, 300, 4]
>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
>>> target_sizes = torch.tensor([image.size[::-1]])
>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)
>>> result = results[0] # first image in batch
>>> for score, label, box in zip(result["scores"], result["labels"], result["boxes"]):
... box = [round(i, 2) for i in box.tolist()]
... print(
... f"Detected {model.config.id2label[label.item()]} with confidence "
... f"{round(score.item(), 3)} at location {box}"
... )
Detected cat with confidence 0.958 at location [344.49, 23.4, 639.84, 374.27]
Detected cat with confidence 0.956 at location [11.71, 53.52, 316.64, 472.33]
Detected remote with confidence 0.947 at location [40.46, 73.7, 175.62, 117.57]
Detected sofa with confidence 0.918 at location [0.59, 1.88, 640.25, 474.74]
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
pixel_values,
pixel_mask=pixel_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
labels=labels,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
denoising_meta_values = (
outputs.denoising_meta_values if return_dict else outputs[-1] if self.training else None
)
outputs_class = outputs.intermediate_logits if return_dict else outputs[2]
outputs_coord = outputs.intermediate_reference_points if return_dict else outputs[3]
predicted_corners = outputs.intermediate_predicted_corners if return_dict else outputs[4]
initial_reference_points = outputs.initial_reference_points if return_dict else outputs[5]
logits = outputs_class[:, -1]
pred_boxes = outputs_coord[:, -1]
loss, loss_dict, auxiliary_outputs, enc_topk_logits, enc_topk_bboxes = None, None, None, None, None
if labels is not None:
enc_topk_logits = outputs.enc_topk_logits if return_dict else outputs[-5]
enc_topk_bboxes = outputs.enc_topk_bboxes if return_dict else outputs[-4]
loss, loss_dict, auxiliary_outputs = self.loss_function(
logits,
labels,
self.device,
pred_boxes,
self.config,
outputs_class,
outputs_coord,
enc_topk_logits=enc_topk_logits,
enc_topk_bboxes=enc_topk_bboxes,
denoising_meta_values=denoising_meta_values,
predicted_corners=predicted_corners,
initial_reference_points=initial_reference_points,
**kwargs,
)
if not return_dict:
if auxiliary_outputs is not None:
output = (logits, pred_boxes) + (auxiliary_outputs,) + outputs
else:
output = (logits, pred_boxes) + outputs
return ((loss, loss_dict) + output) if loss is not None else output
return DFineObjectDetectionOutput(
loss=loss,
loss_dict=loss_dict,
logits=logits,
pred_boxes=pred_boxes,
auxiliary_outputs=auxiliary_outputs,
last_hidden_state=outputs.last_hidden_state,
intermediate_hidden_states=outputs.intermediate_hidden_states,
intermediate_logits=outputs.intermediate_logits,
intermediate_reference_points=outputs.intermediate_reference_points,
intermediate_predicted_corners=outputs.intermediate_predicted_corners,
initial_reference_points=outputs.initial_reference_points,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
init_reference_points=outputs.init_reference_points,
enc_topk_logits=outputs.enc_topk_logits,
enc_topk_bboxes=outputs.enc_topk_bboxes,
enc_outputs_class=outputs.enc_outputs_class,
enc_outputs_coord_logits=outputs.enc_outputs_coord_logits,
denoising_meta_values=outputs.denoising_meta_values,
)
# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py
| DFineForObjectDetection |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_dataclasses.py | {
"start": 390,
"end": 9554
} | class ____:
a: str
b: bytes
def test_dataclass():
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bytes_schema()),
],
),
['a', 'b'],
)
s = SchemaSerializer(schema)
assert s.to_python(Foo(a='hello', b=b'more')) == IsStrictDict(a='hello', b=b'more')
assert s.to_python(Foo(a='hello', b=b'more'), mode='json') == IsStrictDict(a='hello', b='more')
j = s.to_json(Foo(a='hello', b=b'more'))
if on_pypy:
assert json.loads(j) == {'a': 'hello', 'b': 'more'}
else:
assert j == b'{"a":"hello","b":"more"}'
assert s.to_python(Foo(a='hello', b=b'more'), exclude={'b'}) == IsStrictDict(a='hello')
assert s.to_json(Foo(a='hello', b=b'more'), include={'a'}) == b'{"a":"hello"}'
def test_serialization_exclude():
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(
name='a', schema=core_schema.str_schema(), serialization_exclude_if=lambda x: x == 'bye'
),
core_schema.dataclass_field(name='b', schema=core_schema.bytes_schema(), serialization_exclude=True),
],
),
['a', 'b'],
)
s = SchemaSerializer(schema)
assert s.to_python(Foo(a='hello', b=b'more')) == {'a': 'hello'}
assert s.to_python(Foo(a='hello', b=b'more'), mode='json') == {'a': 'hello'}
# a = 'bye' excludes it
assert s.to_python(Foo(a='bye', b=b'more'), mode='json') == {}
j = s.to_json(Foo(a='hello', b=b'more'))
if on_pypy:
assert json.loads(j) == {'a': 'hello'}
else:
assert j == b'{"a":"hello"}'
j = s.to_json(Foo(a='bye', b=b'more'))
if on_pypy:
assert json.loads(j) == {}
else:
assert j == b'{}'
def test_serialization_alias():
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bytes_schema(), serialization_alias='BAR'),
],
),
['a', 'b'],
)
s = SchemaSerializer(schema)
assert s.to_python(Foo(a='hello', b=b'more'), by_alias=True) == IsStrictDict(a='hello', BAR=b'more')
assert s.to_python(Foo(a='hello', b=b'more'), mode='json', by_alias=True) == IsStrictDict(a='hello', BAR='more')
j = s.to_json(Foo(a='hello', b=b'more'), by_alias=True)
if on_pypy:
assert json.loads(j) == {'a': 'hello', 'BAR': 'more'}
else:
assert j == b'{"a":"hello","BAR":"more"}'
def test_properties():
@dataclasses.dataclass
class FooProp:
a: str
b: bytes
@property
def c(self) -> str:
return f'{self.a} {self.b.decode()}'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'FooProp',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bytes_schema()),
],
computed_fields=[core_schema.computed_field('c', core_schema.str_schema())],
),
['a', 'b'],
)
s = SchemaSerializer(schema)
assert s.to_python(FooProp(a='hello', b=b'more')) == IsStrictDict(a='hello', b=b'more', c='hello more')
assert s.to_python(FooProp(a='hello', b=b'more'), mode='json') == IsStrictDict(a='hello', b='more', c='hello more')
j = s.to_json(FooProp(a='hello', b=b'more'))
if on_pypy:
assert json.loads(j) == {'a': 'hello', 'b': 'more', 'c': 'hello more'}
else:
assert j == b'{"a":"hello","b":"more","c":"hello more"}'
assert s.to_python(FooProp(a='hello', b=b'more'), exclude={'b'}) == IsStrictDict(a='hello', c='hello more')
assert s.to_json(FooProp(a='hello', b=b'more'), include={'a'}) == b'{"a":"hello"}'
@pytest.mark.skipif(sys.version_info < (3, 10), reason='slots are only supported for dataclasses in Python > 3.10')
def test_slots_mixed():
@dataclasses.dataclass(slots=True)
class Model:
x: int
y: dataclasses.InitVar[str]
z: ClassVar[str] = 'z-classvar'
@dataclasses.dataclass
class SubModel(Model):
x2: int
y2: dataclasses.InitVar[str]
z2: ClassVar[str] = 'z2-classvar'
schema = core_schema.dataclass_schema(
SubModel,
core_schema.dataclass_args_schema(
'SubModel',
[
core_schema.dataclass_field(name='x', schema=core_schema.int_schema()),
core_schema.dataclass_field(name='y', init_only=True, schema=core_schema.str_schema()),
core_schema.dataclass_field(name='x2', schema=core_schema.int_schema()),
core_schema.dataclass_field(name='y2', init_only=True, schema=core_schema.str_schema()),
],
),
['x', 'x2'],
slots=True,
)
dc = SubModel(x=1, y='a', x2=2, y2='b')
assert dataclasses.asdict(dc) == {'x': 1, 'x2': 2}
s = SchemaSerializer(schema)
assert s.to_python(dc) == {'x': 1, 'x2': 2}
assert s.to_json(dc) == b'{"x":1,"x2":2}'
@pytest.mark.xfail(reason='dataclasses do not serialize extras')
def test_extra_custom_serializer():
@dataclasses.dataclass
class Model:
pass
schema = core_schema.dataclass_schema(
Model,
core_schema.dataclass_args_schema(
'Model',
[],
extra_behavior='allow',
# extras_schema=core_schema.any_schema(
# serialization=core_schema.plain_serializer_function_ser_schema(
# lambda v: v + ' bam!',
# )
# )
),
[],
)
s = SchemaSerializer(schema)
v = SchemaValidator(schema)
m = v.validate_python({'extra': 'extra'})
assert s.to_python(m) == {'extra': 'extra bam!'}
def test_dataclass_initvar_not_required_on_union_ser() -> None:
@dataclasses.dataclass
class Foo:
x: int
init_var: dataclasses.InitVar[int] = 1
@dataclasses.dataclass
class Bar:
x: int
schema = core_schema.union_schema(
[
core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='x', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='init_var',
init_only=True,
schema=core_schema.with_default_schema(core_schema.int_schema(), default=1),
),
],
),
['x'],
post_init=True,
),
core_schema.dataclass_schema(
Bar,
core_schema.dataclass_args_schema(
'Bar', [core_schema.dataclass_field(name='x', schema=core_schema.int_schema())]
),
['x'],
),
]
)
s = SchemaSerializer(schema)
assert s.to_python(Foo(x=1), warnings='error') == {'x': 1}
assert s.to_python(Foo(x=1, init_var=2), warnings='error') == {'x': 1}
@pytest.mark.parametrize(
'config,runtime,expected',
[
(True, True, {'my_alias': 'hello'}),
(True, False, {'my_field': 'hello'}),
(True, None, {'my_alias': 'hello'}),
(False, True, {'my_alias': 'hello'}),
(False, False, {'my_field': 'hello'}),
(False, None, {'my_field': 'hello'}),
(None, True, {'my_alias': 'hello'}),
(None, False, {'my_field': 'hello'}),
(None, None, {'my_field': 'hello'}),
],
)
def test_by_alias_and_name_config_interaction(config, runtime, expected) -> None:
"""This test reflects the priority that applies for config vs runtime serialization alias configuration.
If the runtime value (by_alias) is set, that value is used.
If the runtime value is unset, the config value (serialize_by_alias) is used.
If neither are set, the default, False, is used.
"""
@dataclasses.dataclass
class Foo:
my_field: str
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(
name='my_field', schema=core_schema.str_schema(), serialization_alias='my_alias'
),
],
),
['my_field'],
config=core_schema.CoreConfig(serialize_by_alias=config or False),
)
s = SchemaSerializer(schema)
assert s.to_python(Foo(my_field='hello'), by_alias=runtime) == expected
| Foo |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 368650,
"end": 389770
} | class ____(TestCase):
"""Tests the correctness of the quantized::qnnpack_relu op."""
@given(X=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=torch.quint8,
zero_point_min=0,
zero_point_max=0)))
def test_qnnpack_relu(self, X):
with override_quantized_engine('qnnpack'):
X, (scale, zero_point, torch_type) = X
relu = torch.nn.functional.relu
X = torch.from_numpy(X)
Y = X.clone()
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point, dtype=torch_type)
qY_hat = relu(qX)
Y[Y < 0] = 0
qY = torch.quantize_per_tensor(Y, scale=scale, zero_point=zero_point, dtype=torch_type)
self.assertEqual(qY, qY_hat)
"""Tests the correctness of the quantized::qnnpack_tanh op."""
@skipIfNoFBGEMM
def test_qnnpack_tanh(self):
# Note: In QNNPACK the output scale and zero_point can only be
# 2.0/256, 128 respectively, as it uses a LUT with 256 bins.
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, memory_formats)
for shape, memory_format in test_cases:
X, scale, zero_point, torch_type = torch.randn(*shape), 1.0, 0, torch.quint8
if memory_format == torch.channels_last and len(shape) != 4:
continue
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Floating point reference
Y = torch.tanh(qX.dequantize())
qY = torch.quantize_per_tensor(Y, scale=1.0 / 128, zero_point=128,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.tanh(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.tanh(qX)
self.assertEqual(
qY, qY_hat,
msg=f"QNNPACK TanH failed (FP ref), memory_format {memory_format}")
self.assertEqual(
qYserver, qY_hat,
msg=f"QNNPACK TanH failed (FBGEMM ref), memory_format {memory_format}")
"""Tests the correctness of the quantized::qnnpack_sigmoid op."""
@skipIfNoFBGEMM
def test_qnnpack_sigmoid(self):
# Note: In QNNPACK the output scale and zero_point can only be
# 1.0/256, 0 respectively, as it uses a LUT with 256 bins.
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
memory_formats = (torch.channels_last, torch.contiguous_format)
test_cases = itertools.product(shapes, memory_formats)
for shape, memory_format in test_cases:
X, scale, zero_point, torch_type = torch.randn(*shape), 1.0, 0, torch.quint8
if memory_format == torch.channels_last and len(shape) != 4:
continue
X = X.to(memory_format=memory_format)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=torch_type)
# Floating point reference
Y = torch.sigmoid(qX.dequantize())
qY = torch.quantize_per_tensor(Y, scale=1.0 / 256, zero_point=0,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.sigmoid(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.sigmoid(qX)
self.assertEqual(
qY, qY_hat,
msg=f"QNNPACK Sigmoid failed (FP ref), memory_format {memory_format}")
self.assertEqual(
qYserver, qY_hat,
msg=f"QNNPACK Sigmoid failed (FBGEMM ref), memory_format {memory_format}")
@skipIfNoFBGEMM
def test_qnnpack_sigmoid_sweep(self):
# Input parameters
f_min = -4.0
f_max = 4.0
scale = (f_max - f_min) / 256.0
zero_point = 128
dtype = torch.quint8
step = scale / 2.0
x = np.arange(f_min, f_max + step, step)
X = torch.from_numpy(x).to(torch.float32)
qX = torch.quantize_per_tensor(X, scale=scale,
zero_point=zero_point,
dtype=dtype)
dqX = qX.dequantize()
# Floating point reference
Y = torch.sigmoid(dqX)
qY = torch.quantize_per_tensor(Y, scale=1.0 / 256, zero_point=0,
dtype=torch.quint8)
with override_quantized_engine('fbgemm'):
qYserver = torch.sigmoid(qX)
with override_quantized_engine('qnnpack'):
qY_hat = torch.sigmoid(qX)
self.assertEqual(qY, qY_hat,
msg="QNNPACK Sigmoid failed (FP ref)!")
self.assertEqual(qYserver, qY_hat,
msg="QNNPACK Sigmoid failed (FBGEMM ref)!")
"""Tests the correctness of the quantized::add (qnnpack) op."""
@settings(suppress_health_check=(HealthCheck.filter_too_much,))
@given(A=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=[torch.quint8, torch.qint8])),
zero_point=st.sampled_from([0, 2, 5, 15, 127]),
scale_A=st.sampled_from([0.001, 0.057, 0.889, 12.3]),
scale_B=st.sampled_from([0.008, 0.0821, 0.67, 7]),
scale_C=st.sampled_from([0.003, 0.07821, 0.457, 7.34]),)
def test_qnnpack_add(self, A, zero_point, scale_A, scale_B, scale_C):
with override_quantized_engine('qnnpack'):
A_temp = A
for channels_last in [True, False]:
if channels_last and len(A_temp[0].shape) != 4:
continue
A, (scale_a, zero_point_A, torch_type) = A_temp
B, (scale_b, zero_point_B, torch_type) = A_temp
A = torch.from_numpy(A)
B = torch.from_numpy(B)
if torch_type == torch.qint8 and not torch.backends.xnnpack.enabled:
continue
if channels_last:
A = A.to(memory_format=torch.channels_last)
B = B.to(memory_format=torch.channels_last)
assume(scale_A // scale_C >= 2**-14)
assume(scale_A // scale_C < 2**8)
assume(scale_B // scale_C >= 2**-14)
assume(scale_B // scale_C < 2**8)
zero_point_C = 127
np_dtype = np.uint8
if torch_type == torch.qint8:
zero_point_C = 0
np_dtype = np.int8
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=torch_type)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=torch_type)
# Add ground truth
C = (qA.dequantize() + qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype)
qC_qnnp = torch.ops.quantized.add(qA, qB, scale_C, zero_point_C)
np.testing.assert_equal(qC, qC_qnnp.int_repr(),
"Quantized addition failed.")
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = torch.quantize_per_tensor(torch.from_numpy(Crelu), scale_C,
zero_point_C, dtype=torch_type)
qCrelu_hat = torch.ops.quantized.add_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu.int_repr().numpy(), qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests the correctness of the quantized::add (qnnpack) mul."""
@settings(suppress_health_check=(HealthCheck.filter_too_much,))
@given(A=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5),
qparams=hu.qparams(dtypes=[torch.quint8, torch.qint8])),
zero_point=st.sampled_from([0, 2, 5, 15, 127]),
scale_A=st.sampled_from([0.3, 0.57, 0.889]),
scale_B=st.sampled_from([0.8, 0.821, 0.67]),
scale_C=st.sampled_from([0.3, 0.7821, 0.457]),)
def test_qnnpack_mul(self, A, zero_point, scale_A, scale_B, scale_C):
with override_quantized_engine('qnnpack'):
A_temp = A
for channels_last in [True, False]:
if channels_last and len(A_temp[0].shape) != 4:
continue
A, (scale_a, zero_point_A, torch_type) = A_temp
B, (scale_b, zero_point_B, torch_type) = A_temp
A = torch.from_numpy(A)
B = torch.from_numpy(B)
if torch_type == torch.qint8 and not torch.backends.xnnpack.enabled:
continue
if channels_last:
A = A.to(memory_format=torch.channels_last)
B = B.to(memory_format=torch.channels_last)
assume(scale_A // scale_C >= 2**-14)
assume(scale_A // scale_C < 2**8)
assume(scale_B // scale_C >= 2**-14)
assume(scale_B // scale_C < 2**8)
zero_point_C = 127
np_dtype = np.uint8
if torch_type == torch.qint8:
zero_point_C = 0
np_dtype = np.int8
qA = torch.quantize_per_tensor(A, scale=scale_A, zero_point=zero_point,
dtype=torch_type)
qB = torch.quantize_per_tensor(B, scale=scale_B, zero_point=zero_point,
dtype=torch_type)
# Add ground truth
C = (qA.dequantize() * qB.dequantize()).numpy()
qC = _quantize(C, scale_C, zero_point_C, dtype=np_dtype)
qC_qnnp = torch.ops.quantized.mul(qA, qB, scale_C, zero_point_C)
np.testing.assert_equal(qC, qC_qnnp.int_repr(),
"Quantized addition failed.")
Crelu = C.copy()
Crelu[C < 0] = 0
qCrelu = torch.quantize_per_tensor(torch.from_numpy(Crelu), scale_C,
zero_point_C, dtype=torch_type)
qCrelu_hat = torch.ops.quantized.mul_relu(qA, qB, scale=scale_C, zero_point=zero_point_C)
np.testing.assert_equal(qCrelu.int_repr().numpy(), qCrelu_hat.int_repr(),
"Quantized addition with ReLU failed.")
"""Tests that quantized add works with broadcasting """
def test_qnnpack_add_broadcast(self):
def _run_test(A, B):
qA = torch.quantize_per_tensor(A, 0.02, 0, dtype)
qB = torch.quantize_per_tensor(B, 0.04, 2, dtype)
output_scale = 0.01
output_zp = 1
# ground truth
C = qA.dequantize() + qB.dequantize()
qC = torch.quantize_per_tensor(C, output_scale, output_zp, dtype)
# quantized
qC_hat_1 = torch.ops.quantized.add(qA, qB, output_scale, output_zp)
qC_hat_2 = torch.ops.quantized.add(qB, qA, output_scale, output_zp)
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_1.dequantize()))
self.assertTrue(torch.allclose(qC.dequantize(), qC_hat_2.dequantize()))
with override_quantized_engine("qnnpack"):
for dtype in (torch.qint8, torch.quint8):
if dtype == torch.qint8 and not torch.backends.xnnpack.enabled:
continue
for channels_last in [True, False]:
# 4d
A = torch.randn(1, 3, 4, 4)
B = torch.randn(1, 1, 1, 1)
if channels_last:
A = A.to(memory_format=torch.channels_last)
B = B.to(memory_format=torch.channels_last)
_run_test(A, B)
# 5d
C = torch.randn(1, 3, 4, 4, 4)
D = torch.randn(1, 1, 1, 1, 1)
if channels_last:
C = C.to(memory_format=torch.channels_last_3d)
D = D.to(memory_format=torch.channels_last_3d)
_run_test(C, D)
"""Tests the correctness of quantized::qnnpack_maxpool2d op."""
@given(A=hu.tensor(shapes=hu.array_shapes(4, 4, 3, 5),
qparams=hu.qparams(dtypes=torch.quint8)),
kernel=st.sampled_from([2, 4]),
stride=st.sampled_from([1, 2]),
padding=st.sampled_from([1, 2]))
def test_qnnpack_maxpool2d(self, A, kernel, stride, padding):
import torch.nn.functional as F
with override_quantized_engine('qnnpack'):
A, (scale, zero_point, torch_type) = A
X = torch.from_numpy(A)
np_type = np.uint8
dilation = 1
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, dilation)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, dilation)
assume(oW > 0)
k = (kernel, kernel)
s = (stride, stride)
d = (dilation, dilation)
p = (padding, padding)
q_max_pool = torch.ops.quantized.max_pool2d
a = scale * (X - zero_point).to(dtype=torch.float)
qa = torch.quantize_per_tensor(a, scale=scale, zero_point=zero_point,
dtype=torch_type)
a_ref = qa.dequantize()
a_pool = F.max_pool2d(a_ref, kernel_size=k, stride=s, padding=p,
dilation=d)
a_pool_nhwc = a_pool.permute([0, 2, 3, 1])
qa_pool = q_max_pool(qa, k, s, p, d, ceil_mode=False)
qa_pool_int = qa_pool.dequantize()
np.testing.assert_equal(a_pool.numpy(), qa_pool_int.numpy())
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 10),
width=st.integers(4, 10),
kernel=st.integers(2, 5),
stride=st.integers(1, 2),
padding=st.integers(1, 2),
scale=st.floats(0.2, 1.6),
zero_point=st.integers(0, 25)
)
def test_avg_pool2d(
self,
batch_size,
channels,
height,
width,
kernel,
stride,
padding,
scale,
zero_point
):
with override_quantized_engine('qnnpack'):
import torch.nn.functional as F
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
# Check constraints
assume(kernel // 2 >= padding) # Kernel cannot be overhanging!
iH, iW = X.shape[-2:]
oH = pool_output_shape(iH, kernel, padding, stride, 1)
assume(oH > 0)
oW = pool_output_shape(iW, kernel, padding, stride, 1)
assume(oW > 0)
k = (kernel, kernel)
s = (stride, stride)
p = (padding, padding)
q_avg_pool = torch.ao.nn.quantized.functional.avg_pool2d
x_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
a_pool = F.avg_pool2d(x_q.dequantize().to(torch.float), kernel_size=k, stride=s, padding=p)
qa_pool = q_avg_pool(x_q, k, s, p)
# Quantize Ref Output
a_pool_q = torch.quantize_per_tensor(a_pool, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
np.testing.assert_array_almost_equal(a_pool_q.int_repr().numpy(),
qa_pool.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 20),
width=st.integers(4, 20),
output_height=st.integers(2, 10),
output_width=st.integers(2, 10),
scale=st.floats(0.2, 1.6),
zero_point=st.integers(0, 25)
)
def test_adaptive_avg_pool2d(
self,
batch_size,
channels,
height,
width,
output_height,
output_width,
scale,
zero_point
):
with override_quantized_engine('qnnpack'):
# Check constraints
assume(height >= output_height)
assume(width >= output_width)
import torch.nn.functional as F
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
iH, iW = X.shape[-2:]
q_avg_pool = torch.ao.nn.quantized.functional.adaptive_avg_pool2d
x_q = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
a_pool = F.adaptive_avg_pool2d(x_q.dequantize().to(torch.float), (output_height, output_width))
qa_pool = q_avg_pool(x_q, (output_height, output_width))
# Quantize Ref Output
a_pool_q = torch.quantize_per_tensor(a_pool, scale=scale, zero_point=zero_point,
dtype=torch.quint8)
np.testing.assert_array_almost_equal(a_pool_q.int_repr().numpy(),
qa_pool.int_repr().numpy(), decimal=0)
@given(batch_size=st.integers(1, 5),
channels=st.sampled_from([2, 4, 5, 8, 16, 32]),
height=st.integers(4, 10),
width=st.integers(4, 10),
scale=st.floats(0.02, 2.6),
zero_point=st.integers(0, 25))
def test_mean(self, batch_size, channels, height, width, scale, zero_point):
with override_quantized_engine('qnnpack'):
dim = (2, 3)
X_init = torch.from_numpy(np.random.randint(
0, 50, (batch_size, channels, height, width)))
X = scale * (X_init - zero_point).to(dtype=torch.float)
qX = torch.quantize_per_tensor(X, scale, zero_point, torch.quint8)
Y = torch.mean(qX.dequantize(), dim)
Y = torch.quantize_per_tensor(Y, scale, zero_point, torch.quint8)
qY = torch.mean(qX, dim)
np.testing.assert_array_almost_equal(Y.int_repr().numpy(), qY.int_repr().numpy(), decimal=0)
"""Tests the correctness of the quantized::hardtanh op."""
def test_hardtanh(self):
if 'qnnpack' not in torch.backends.quantized.supported_engines:
return
with override_quantized_engine('qnnpack'):
shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))
memory_formats = (torch.channels_last, torch.contiguous_format)
min_vals = (-0.5, -0.3, 0.5)
max_vals = (-0.3, 0.3, 0.7)
test_cases = itertools.product(shapes, memory_formats, min_vals, max_vals)
for shape, memory_format, min_val, max_val in test_cases:
X, scale, zero_point, torch_type = torch.randn(*shape), 1.0, 0, torch.quint8
if memory_format == torch.channels_last and len(shape) != 4:
continue
Y = X.clone()
Y[Y < min_val] = min_val
Y[Y > max_val] = max_val
qY = torch.quantize_per_tensor(Y, scale=scale,
zero_point=zero_point, dtype=torch_type)
qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,
dtype=torch_type)
qY_hat = torch.ao.nn.quantized.functional.hardtanh(qX, min_val, max_val)
self.assertEqual(
qY, qY_hat,
msg=f"hardtanh failed:\nactual {qY_hat}\nexpected {qY}\nmemory_format {memory_format}")
"""Tests the correctness of the tensor comparators."""
| TestQNNPackOps |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 38092,
"end": 39442
} | class ____(Benchmark):
r"""
StretchedV objective function.
This class defines the Stretched V [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{StretchedV}}(x) = \sum_{i=1}^{n-1} t^{1/4}
[\sin (50t^{0.1}) + 1]^2
Where, in this exercise:
.. math::
t = x_{i+1}^2 + x_i^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0., 0.]` when
:math:`n = 2`.
.. [1] Adorio, E. MVF - "Multivariate Test Functions Library in C for
Unconstrained Global Optimization", 2005
TODO All the sources disagree on the equation, in some the 1 is in the
brackets, in others it is outside. In Jamil#142 it's not even 1. Here
we go with the Adorio option.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10] * self.N, [10] * self.N))
self.global_optimum = [[0, 0]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
t = x[1:] ** 2 + x[: -1] ** 2
return sum(t ** 0.25 * (sin(50.0 * t ** 0.1 + 1) ** 2))
| StretchedV |
python | TheAlgorithms__Python | graphs/bidirectional_breadth_first_search.py | {
"start": 753,
"end": 3315
} | class ____:
"""
# Comment out slow pytests...
# 9.15s call graphs/bidirectional_breadth_first_search.py:: \
# graphs.bidirectional_breadth_first_search.BreadthFirstSearch
# >>> bfs = BreadthFirstSearch((0, 0), (len(grid) - 1, len(grid[0]) - 1))
# >>> (bfs.start.pos_y + delta[3][0], bfs.start.pos_x + delta[3][1])
(0, 1)
# >>> [x.pos for x in bfs.get_successors(bfs.start)]
[(1, 0), (0, 1)]
# >>> (bfs.start.pos_y + delta[2][0], bfs.start.pos_x + delta[2][1])
(1, 0)
# >>> bfs.retrace_path(bfs.start)
[(0, 0)]
# >>> bfs.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1),
(5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
"""
def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
self.start = Node(start[1], start[0], goal[1], goal[0], None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], None)
self.node_queue = [self.start]
self.reached = False
def search(self) -> Path | None:
while self.node_queue:
current_node = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
self.reached = True
return self.retrace_path(current_node)
successors = self.get_successors(current_node)
for node in successors:
self.node_queue.append(node)
if not self.reached:
return [self.start.pos]
return None
def get_successors(self, parent: Node) -> list[Node]:
"""
Returns a list of successors (both in the grid and free spaces)
"""
successors = []
for action in delta:
pos_x = parent.pos_x + action[1]
pos_y = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(pos_x, pos_y, self.target.pos_y, self.target.pos_x, parent)
)
return successors
def retrace_path(self, node: Node | None) -> Path:
"""
Retrace the path from parents to parents until start node
"""
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
| BreadthFirstSearch |
python | celery__celery | t/unit/worker/test_control.py | {
"start": 1501,
"end": 1985
} | class ____:
def test_shutdown(self):
with patch('celery.worker.pidbox.ignore_errors') as eig:
parent = Mock()
pbox = Pidbox(parent)
pbox._close_channel = Mock()
assert pbox.c is parent
pconsumer = pbox.consumer = Mock()
cancel = pconsumer.cancel
pbox.shutdown(parent)
eig.assert_called_with(parent, cancel)
pbox._close_channel.assert_called_with(parent)
| test_Pidbox |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/B021.py | {
"start": 372,
"end": 487
} | class ____:
f"""hello {VARIABLE}!"""
def foo1():
"hello world!"
def foo2():
f"hello {VARIABLE}!"
| bar2 |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_mean_to_be_between.py | {
"start": 2761,
"end": 16692
} | class ____(ColumnAggregateExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnMeanToBeBetween is a \
Column Aggregate Expectation.
Column Aggregate Expectations are one of the most common types of Expectation.
They are evaluated for a single column, and produce an aggregate Metric, such as a mean, standard deviation, number of unique values, column type, etc.
If that Metric meets the conditions you set, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
min_value (float or None): \
{MIN_VALUE_DESCRIPTION}
max_value (float or None): \
{MAX_VALUE_DESCRIPTION}
strict_min (boolean): \
{STRICT_MIN_DESCRIPTION}
strict_max (boolean): \
{STRICT_MAX_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound.
* If max_value is None, then min_value is treated as a lower bound.
* observed_value field in the result object is customized for this expectation to be a float \
representing the true mean for the column
See Also:
[ExpectColumnMedianToBeBetween](https://greatexpectations.io/expectations/expect_column_median_to_be_between)
[ExpectColumnStdevToBeBetween](https://greatexpectations.io/expectations/expect_column_stdev_to_be_between)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 1
1 1.3 7
2 .8 2.5
3 2 3
Code Examples:
Passing Case:
Input:
ExpectColumnMeanToBeBetween(
column="test",
min_value=1,
max_value=3
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 1.275
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnMeanToBeBetween(
column="test2",
min_value=1,
max_value=3
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 3.375
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
min_value: Optional[Comparable] = pydantic.Field(
default=None, description=MIN_VALUE_DESCRIPTION
)
max_value: Optional[Comparable] = pydantic.Field(
default=None, description=MAX_VALUE_DESCRIPTION
)
strict_min: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MIN_DESCRIPTION
)
strict_max: Union[bool, SuiteParameterDict] = pydantic.Field(
default=False, description=STRICT_MAX_DESCRIPTION
)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column aggregate expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ # noqa: E501 # FIXME CoP
metric_dependencies = ("column.mean",)
success_keys = (
"min_value",
"strict_min",
"max_value",
"strict_max",
)
args_keys = (
"column",
"min_value",
"max_value",
"strict_min",
"strict_max",
)
class Config:
title = "Expect column mean to be between"
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ExpectColumnMeanToBeBetween]) -> None:
ColumnAggregateExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
@override
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("min_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("max_value", [RendererValueType.NUMBER, RendererValueType.DATETIME]),
("strict_min", RendererValueType.BOOLEAN),
("strict_max", RendererValueType.BOOLEAN),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if not params.min_value and not params.max_value:
template_str = "mean may have any numerical value."
else:
at_least_str = "greater than or equal to"
if params.strict_min:
at_least_str = cls._get_strict_min_string(
renderer_configuration=renderer_configuration
)
at_most_str = "less than or equal to"
if params.strict_max:
at_most_str = cls._get_strict_max_string(
renderer_configuration=renderer_configuration
)
if params.min_value and params.max_value:
template_str = (
f"mean must be {at_least_str} $min_value and {at_most_str} $max_value."
)
elif not params.min_value:
template_str = f"mean must be {at_most_str} $max_value."
else:
template_str = f"mean must be {at_least_str} $min_value."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@override
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer( # type: ignore[override] # TODO: Fix this type ignore
cls,
configuration: ExpectationConfiguration,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column",
"min_value",
"max_value",
"row_condition",
"condition_parser",
"strict_min",
"strict_max",
],
)
template_str = ""
if (params["min_value"] is None) and (params["max_value"] is None):
template_str = "mean may have any numerical value."
else:
at_least_str, at_most_str = handle_strict_min_max(params)
if params["min_value"] is not None and params["max_value"] is not None:
template_str = (
f"mean must be {at_least_str} $min_value and {at_most_str} $max_value."
)
elif params["min_value"] is None:
template_str = f"mean must be {at_most_str} $max_value."
elif params["max_value"] is None:
template_str = f"mean must be {at_least_str} $min_value."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
@classmethod
@renderer(renderer_type=LegacyDescriptiveRendererType.STATS_TABLE_MEAN_ROW)
def _descriptive_stats_table_mean_row_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must pass in result."
return [
{
"content_block_type": "string_template",
"string_template": {
"template": "Mean",
"tooltip": {"content": "expect_column_mean_to_be_between"},
},
},
f"{result.result['observed_value']:.2f}",
]
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
return self._validate_metric_value_between(
metric_name="column.mean",
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
| ExpectColumnMeanToBeBetween |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/configs/base.py | {
"start": 319,
"end": 4028
} | class ____(Block, abc.ABC):
"""
Abstract class for other dbt Configs.
Attributes:
extras: Extra target configs' keywords, not yet exposed
in prefect-dbt, but available in dbt; if there are
duplicate keys between extras and TargetConfigs,
an error will be raised.
"""
extras: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"Extra target configs' keywords, not yet exposed in prefect-dbt, "
"but available in dbt."
),
)
allow_field_overrides: bool = Field(
default=False,
description=(
"If enabled, fields from dbt target configs will override "
"fields provided in extras and credentials."
),
)
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
def _populate_configs_json(
self,
configs_json: dict[str, Any],
fields: dict[str, Any],
model: Optional[BaseModel] = None,
) -> dict[str, Any]:
"""
Recursively populate configs_json.
"""
# if allow_field_overrides is True keys from TargetConfigs take precedence
override_configs_json: dict[str, Any] = {}
for field_name, field in fields.items():
if model is not None:
# get actual value from model
field_value = getattr(model, field_name, None)
# override the name with alias so dbt parser can recognize the keyword;
# e.g. schema_ -> schema, returns the original name if no alias is set
# prioritize serialization_alias since we're generating output
if field.serialization_alias:
field_name = field.serialization_alias
elif field.alias:
field_name = field.alias
else:
field_value = field
if field_value is None or field_name == "allow_field_overrides":
# do not add to configs json if no value or default is set
continue
if isinstance(field_value, BaseModel):
configs_json = self._populate_configs_json(
configs_json, field_value.model_fields, model=field_value
)
elif field_name == "extras":
configs_json = self._populate_configs_json(
configs_json,
field_value,
)
override_configs_json.update(configs_json)
else:
if field_name in configs_json.keys() and not self.allow_field_overrides:
raise ValueError(
f"The keyword, {field_name}, has already been provided in "
f"TargetConfigs; remove duplicated keywords to continue"
)
if hasattr(field_value, "get_secret_value"):
field_value = field_value.get_secret_value()
elif isinstance(field_value, Path):
field_value = str(field_value)
configs_json[field_name] = field_value
if self.allow_field_overrides and model is self or model is None:
override_configs_json[field_name] = field_value
configs_json.update(override_configs_json)
return configs_json
def get_configs(self) -> dict[str, Any]:
"""
Returns the dbt configs, likely used eventually for writing to profiles.yml.
Returns:
A configs JSON.
"""
return self._populate_configs_json({}, type(self).model_fields, model=self)
| DbtConfigs |
python | kamyu104__LeetCode-Solutions | Python/minimize-max-distance-to-gas-station.py | {
"start": 47,
"end": 593
} | class ____(object):
def minmaxGasDist(self, stations, K):
"""
:type stations: List[int]
:type K: int
:rtype: float
"""
def check(x):
return sum(int(math.ceil((stations[i+1]-stations[i])/x))-1 for i in xrange(len(stations)-1)) <= K
left, right = 0, stations[-1]-stations[0]
while right-left > 1e-6:
mid = left + (right-left)/2.0
if check(mid):
right = mid
else:
left = mid
return left
| Solution |
python | tensorflow__tensorflow | tensorflow/core/function/integration_test/side_inputs_manual_api_test.py | {
"start": 912,
"end": 6933
} | class ____(parameterized.TestCase):
@unittest.skip("Feature not implemented")
@parameterized.parameters(
(1, tf.constant, 2, tf.constant),
(1.0, tf.constant, 2.0, tf.constant),
(1, int, 2, int),
(1.0, float, 2.0, float),
(1, int, 2, tf.constant),
(1, tf.constant, 2, int))
def test_direct_capture(self, val_before, type_before, val_after, type_after):
def f():
return tf.func.experimental.capture(lambda: x) + 1
tf_f = tf.function(f)
x = type_before(val_before)
self.assertEqual(f(), tf_f())
x = type_after(val_after)
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
def test_direct_capture_mutation(self):
def f():
cglob = tf.func.experimental.capture(lambda: glob)
return cglob[-1] + tf.constant(0)
tf_f = tf.function(f)
glob = [tf.constant(1), tf.constant(2)]
self.assertEqual(f(), tf_f())
glob.append(tf.constant(3))
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
@parameterized.parameters(
tf.constant,
int)
def test_dict_capture_mutation_with_tensor_and_non_tensor(self, capture_type):
def f():
cd = tf.func.experimental.capture(lambda: d)
return cd["val"]
tf_f = tf.function(f)
d = {"int": 1, "tensor": tf.constant(2), "val": capture_type(3)}
self.assertEqual(f(), tf_f())
d["val"] = capture_type(4)
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
@parameterized.parameters(tf.constant, int)
def test_capture_with_duplicate_usage(self, capture_type):
def f():
cx = tf.func.experimental.capture(lambda: x)
return cx + cx # should capture x just once.
tf_f = tf.function(f)
x = capture_type(1) # pylint: disable=unused-variable
self.assertEqual(f(), tf_f())
self.assertLen(tf_f._variable_creation_config._captures_container, 1)
@unittest.skip("Feature not implemented")
def test_local_capture(self):
def f():
x = tf.constant(0)
def g():
return tf.func.experimental.capture(lambda: x)
return g()
tf_f = tf.function(f)
x = tf.constant(100) # pylint: disable=unused-variable
# a = f()
a = 100
b = tf_f()
self.assertEqual(a, b)
x = tf.constant(200)
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
@parameterized.parameters(
tf.constant,
int)
def test_capture_by_nested_function(self, capture_type):
def f():
def g():
cx = tf.func.experimental.capture(lambda: x)
return cx
return g()
tf_f = tf.function(f)
x = capture_type(1) # pylint: disable=unused-variable
self.assertEqual(f(), tf_f())
x = capture_type(2)
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
@parameterized.parameters(tf.constant, int)
def test_outer_capture_with_function_call(self, capture_type):
def g():
cx = tf.func.experimental.capture(lambda: x)
return cx
def f():
return g()
tf_f = tf.function(f)
x = capture_type(1) # pylint: disable=unused-variable
self.assertEqual(f(), tf_f())
x = capture_type(2)
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
@parameterized.parameters(tf.constant, int)
def test_outer_capture_with_nested_function_call(self, capture_type):
x = capture_type(1) # pylint: disable=unused-variable
def g_factory():
def g():
cx = tf.func.experimental.capture(lambda: x)
return cx
return g()
def f():
h = g_factory
return h()
tf_f = tf.function(f)
self.assertEqual(f(), tf_f())
x = capture_type(2)
self.assertEqual(f(), tf_f())
@unittest.skip("Feature not implemented")
@parameterized.parameters(tf.constant, int)
def test_capture_within_function_argument(self, capture_type):
def g():
cx = tf.func.experimental.capture(lambda: x)
return cx
def f(h):
return h()
tf_f = tf.function(f)
x = capture_type(1) # pylint: disable=unused-variable
self.assertEqual(f(g), tf_f(g))
x = capture_type(2)
self.assertEqual(f(g), tf_f(g))
@unittest.skip("Feature not implemented")
def test_inner_nested_tf_function_raise_error(self):
@tf.function
def tf_f():
@tf.function
def tf_g():
cx = tf.func.experimental.capture(lambda: x)
return cx
return tf_g()
x = tf.constant(0) # pylint: disable=unused-variable
with self.assertRaisesRegex(
NotImplementedError, "Manual side input usage for inner nested"):
tf_f()
@unittest.skip("Feature not implemented")
@parameterized.parameters(
tf.constant,
int)
def test_outer_nested_tf_function_with_global_capture(self, capture_type):
@tf.function
def tf_f():
@tf.function
def tf_g(x):
return x
cx = tf.func.experimental.capture(lambda: x)
return tf_g(cx)
x = capture_type(0) # pylint: disable=unused-variable
self.assertEqual(tf_f(), tf.constant(0))
x = capture_type(1)
self.assertEqual(tf_f(), tf.constant(1))
@unittest.skip("Feature not implemented")
def test_non_callable_function_raise_error(self):
def f():
return tf.func.experimental.capture(x) + 1
tf_f = tf.function(f)
x = 1
with self.assertRaises(TypeError):
_ = tf_f()
x = tf.constant(1)
with self.assertRaises(TypeError):
_ = tf_f()
@unittest.skip("Feature not implemented")
@parameterized.parameters(
(1, tf.constant, 2, tf.constant),
(1, int, 2, int))
def test_call_by_value(self, val_before, type_before, val_after, type_after):
def f():
return tf.func.experimental.capture(lambda: x, by_ref=False)
tf_f = tf.function(f)
x = type_before(val_before)
self.assertEqual(tf_f(), val_before)
x = type_after(val_after)
self.assertEqual(tf_f(), val_before)
if __name__ == "__main__":
unittest.main()
| SideInputsTest |
python | sqlalchemy__sqlalchemy | test/engine/test_reflection.py | {
"start": 68748,
"end": 73997
} | class ____(fixtures.RemovesEvents, fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
to_reflect = Table(
"to_reflect",
metadata,
Column("x", sa.Integer, primary_key=True, autoincrement=False),
Column("y", sa.Integer),
test_needs_fk=True,
)
Table(
"related",
metadata,
Column("q", sa.Integer, sa.ForeignKey("to_reflect.x")),
test_needs_fk=True,
)
sa.Index("some_index", to_reflect.c.y)
def _do_test(
self, connection, col, update, assert_, tablename="to_reflect"
):
# load the actual Table class, not the test
# wrapper
from sqlalchemy.schema import Table
m = MetaData()
def column_reflect(insp, table, column_info):
if column_info["name"] == col:
column_info.update(update)
t = Table(
tablename,
m,
autoload_with=connection,
listeners=[("column_reflect", column_reflect)],
)
assert_(t)
m = MetaData()
self.event_listen(Table, "column_reflect", column_reflect)
t2 = Table(tablename, m, autoload_with=connection)
assert_(t2)
def test_override_key(self, connection):
def assertions(table):
eq_(table.c.YXZ.name, "x")
eq_(set(table.primary_key), {table.c.YXZ})
self._do_test(connection, "x", {"key": "YXZ"}, assertions)
def test_override_index(self, connection):
def assertions(table):
idx = list(table.indexes)[0]
eq_(idx.columns, [table.c.YXZ])
self._do_test(connection, "y", {"key": "YXZ"}, assertions)
def test_override_key_fk(self, connection):
m = MetaData()
def column_reflect(insp, table, column_info):
if column_info["name"] == "q":
column_info["key"] = "qyz"
elif column_info["name"] == "x":
column_info["key"] = "xyz"
to_reflect = Table(
"to_reflect",
m,
autoload_with=connection,
listeners=[("column_reflect", column_reflect)],
)
related = Table(
"related",
m,
autoload_with=connection,
listeners=[("column_reflect", column_reflect)],
)
assert related.c.qyz.references(to_reflect.c.xyz)
def test_override_type(self, connection):
def assert_(table):
assert isinstance(table.c.x.type, sa.String)
self._do_test(connection, "x", {"type": sa.String}, assert_)
def test_override_info(self, connection):
self._do_test(
connection,
"x",
{"info": {"a": "b"}},
lambda table: eq_(table.c.x.info, {"a": "b"}),
)
def test_override_server_default_fetchedvalue(self, connection):
my_default = FetchedValue()
self._do_test(
connection,
"x",
{"default": my_default},
lambda table: eq_(table.c.x.server_default, my_default),
)
def test_override_server_default_default_clause(self, connection):
my_default = DefaultClause("1")
self._do_test(
connection,
"x",
{"default": my_default},
lambda table: eq_(table.c.x.server_default, my_default),
)
def test_override_server_default_plain_text(self, connection):
my_default = "1"
def assert_text_of_one(table):
is_true(
isinstance(
table.c.x.server_default.arg, sql.elements.TextClause
)
)
eq_(str(table.c.x.server_default.arg), "1")
self._do_test(
connection, "x", {"default": my_default}, assert_text_of_one
)
def test_override_server_default_textclause(self, connection):
my_default = sa.text("1")
def assert_text_of_one(table):
is_true(
isinstance(
table.c.x.server_default.arg, sql.elements.TextClause
)
)
eq_(str(table.c.x.server_default.arg), "1")
self._do_test(
connection, "x", {"default": my_default}, assert_text_of_one
)
def test_listen_metadata_obj(self, connection):
m1 = MetaData()
m2 = MetaData()
canary = []
@event.listens_for(m1, "column_reflect")
def go(insp, table, info):
canary.append(info["name"])
Table("related", m1, autoload_with=connection)
Table("related", m2, autoload_with=connection)
eq_(canary, ["q", "x", "y"])
def test_listen_metadata_cls(self, connection):
m1 = MetaData()
m2 = MetaData()
canary = []
def go(insp, table, info):
canary.append(info["name"])
self.event_listen(MetaData, "column_reflect", go)
Table("related", m1, autoload_with=connection)
Table("related", m2, autoload_with=connection)
eq_(canary, ["q", "x", "y", "q", "x", "y"])
| ColumnEventsTest |
python | has2k1__plotnine | plotnine/geoms/geom_bin_2d.py | {
"start": 77,
"end": 662
} | class ____(geom_rect):
"""
Heatmap of 2d bin counts
{usage}
Divides the plane into rectangles, counts the number of
cases in each rectangle, and then (by default) maps the number
of cases to the rectangle's fill. This is a useful alternative
to geom_point in the presence of overplotting.
Parameters
----------
{common_parameters}
See Also
--------
plotnine.stat_bin_2d : The default stat for this `geom`.
"""
DEFAULT_PARAMS = {"stat": "bin_2d", "position": "identity", "na_rm": False}
geom_bin2d = geom_bin_2d
| geom_bin_2d |
python | arrow-py__arrow | tests/test_arrow.py | {
"start": 20589,
"end": 22674
} | class ____:
def test_not_attr(self):
with pytest.raises(ValueError):
arrow.Arrow.utcnow().replace(abc=1)
def test_replace(self):
arw = arrow.Arrow(2013, 5, 5, 12, 30, 45)
assert arw.replace(year=2012) == arrow.Arrow(2012, 5, 5, 12, 30, 45)
assert arw.replace(month=1) == arrow.Arrow(2013, 1, 5, 12, 30, 45)
assert arw.replace(day=1) == arrow.Arrow(2013, 5, 1, 12, 30, 45)
assert arw.replace(hour=1) == arrow.Arrow(2013, 5, 5, 1, 30, 45)
assert arw.replace(minute=1) == arrow.Arrow(2013, 5, 5, 12, 1, 45)
assert arw.replace(second=1) == arrow.Arrow(2013, 5, 5, 12, 30, 1)
def test_replace_tzinfo(self):
arw = arrow.Arrow.utcnow().to("US/Eastern")
result = arw.replace(tzinfo=ZoneInfo("US/Pacific"))
assert result == arw.datetime.replace(tzinfo=ZoneInfo("US/Pacific"))
def test_replace_fold(self):
before = arrow.Arrow(2017, 11, 5, 1, tzinfo="America/New_York")
after = before.replace(fold=1)
assert before.fold == 0
assert after.fold == 1
assert before == after
assert before.utcoffset() != after.utcoffset()
def test_replace_fold_and_other(self):
arw = arrow.Arrow(2013, 5, 5, 12, 30, 45)
assert arw.replace(fold=1, minute=50) == arrow.Arrow(2013, 5, 5, 12, 50, 45)
assert arw.replace(minute=50, fold=1) == arrow.Arrow(2013, 5, 5, 12, 50, 45)
def test_replace_week(self):
with pytest.raises(ValueError):
arrow.Arrow.utcnow().replace(week=1)
def test_replace_quarter(self):
with pytest.raises(ValueError):
arrow.Arrow.utcnow().replace(quarter=1)
def test_replace_quarter_and_fold(self):
with pytest.raises(AttributeError):
arrow.utcnow().replace(fold=1, quarter=1)
with pytest.raises(AttributeError):
arrow.utcnow().replace(quarter=1, fold=1)
def test_replace_other_kwargs(self):
with pytest.raises(AttributeError):
arrow.utcnow().replace(abc="def")
| TestArrowReplace |
python | ray-project__ray | python/ray/data/tests/test_partitioning.py | {
"start": 12345,
"end": 37807
} | class ____:
def test_read_single_file(self, tmp_path, block_type, ray_start_regular_shared):
path = os.path.join(tmp_path, "1970", "fr", "data.csv")
write_csv({"number": [1, 2, 3]}, path)
ds = read_csv(
path,
partitioning=Partitioning(
"dir", field_names=["year", "country"], base_dir=tmp_path
),
block_type=block_type,
)
df = ds.to_pandas()
assert list(df.columns) == ["number", "year", "country"]
assert list(df["number"]) == [1, 2, 3]
assert list(df["year"]) == ["1970", "1970", "1970"]
assert list(df["country"]) == ["fr", "fr", "fr"]
def test_read_single_file_with_null_field(
self, tmp_path, block_type, ray_start_regular_shared
):
path = os.path.join(tmp_path, "1970", "data", "data.csv")
write_csv({"number": [1, 2, 3]}, path)
ds = read_csv(
path,
partitioning=Partitioning(
"dir", field_names=["year", None], base_dir=tmp_path
),
block_type=block_type,
)
df = ds.to_pandas()
assert list(df.columns) == ["number", "year"]
assert list(df["number"]) == [1, 2, 3]
assert list(df["year"]) == ["1970", "1970", "1970"]
def test_read_single_file_with_missing_field(
self, tmp_path, block_type, ray_start_regular_shared
):
path = os.path.join(tmp_path, "1970", "data.csv")
write_csv({"number": [0, 0, 0]}, path)
# `read_csv` should error because `path` is missing the `country` field.
with pytest.raises(ValueError):
read_csv(
path,
partitioning=Partitioning(
"dir", field_names=["year", "country"], base_dir=tmp_path
),
block_type=block_type,
).schema()
@pytest.mark.parametrize(
"relative_path", ["1970/data.csv", "1970/us/94704/data.csv"]
)
def test_read_single_file_with_invalid_field_names(
self, relative_path, tmp_path, block_type, ray_start_regular_shared
):
path = os.path.join(tmp_path, relative_path)
write_csv({"number": [0, 0, 0]}, path)
with pytest.raises(ValueError):
read_csv(
path,
partitioning=Partitioning(
"dir", field_names=["year", "country"], base_dir=tmp_path
),
block_type=block_type,
).schema()
def test_read_files_with_conflicting_key(
self, tmp_path, block_type, ray_start_regular_shared
):
path = os.path.join(tmp_path, "01", "data.csv")
write_csv({"month": [1, 2, 3]}, path)
with pytest.raises(ValueError):
# `read_csv` should error because `month` is a field in both the CSV and
# the path, and the data is different.
read_csv(
path,
partitioning=Partitioning(
"dir", field_names=["month"], base_dir=tmp_path
),
block_type=block_type,
).schema()
@pytest.mark.parametrize("data", [[1, 1, 1], [1, None, 1]])
def test_read_files_with_legally_conflicting_key(
self, data, tmp_path, block_type, ray_start_regular_shared
):
path = os.path.join(tmp_path, "01", "data.csv")
write_csv({"month": data}, path)
# `month` is a field in both the path and the CSV, but because the data is
# identical, we don't raise an error.
ds = read_csv(
path,
partitioning=Partitioning("dir", field_names=["month"], base_dir=tmp_path),
block_type=block_type,
)
df = ds.to_pandas()
assert list(df.columns) == ["month"]
assert list(df["month"]) == [1, 1, 1]
def test_read_multiple_files(self, tmp_path, block_type, ray_start_regular_shared):
path1 = os.path.join(tmp_path, "1970", "fr", "data.csv")
write_csv({"number": [1, 2, 3]}, path1)
path2 = os.path.join(tmp_path, "1971", "ir", "data.csv")
write_csv({"number": [4, 5, 6]}, path2)
ds = read_csv(
[path1, path2],
partitioning=Partitioning(
"dir", field_names=["year", "country"], base_dir=tmp_path
),
block_type=block_type,
)
df = ds.to_pandas()
assert list(df.columns) == ["number", "year", "country"]
assert list(df[df["year"] == "1970"]["number"]) == [1, 2, 3]
assert list(df[df["year"] == "1970"]["country"]) == ["fr", "fr", "fr"]
assert list(df[df["year"] == "1971"]["number"]) == [4, 5, 6]
assert list(df[df["year"] == "1971"]["country"]) == ["ir", "ir", "ir"]
def _verify_resolved_paths_and_filesystem(scheme: Partitioning):
assert scheme.base_dir is not None
assert scheme.normalized_base_dir is not None
paths, expected_fs = _resolve_paths_and_filesystem(
scheme.base_dir,
scheme.filesystem,
)
path = paths[0]
expected_path = f"{path}/" if path and not path.endswith("/") else path
assert scheme.normalized_base_dir == expected_path
assert isinstance(scheme.resolved_filesystem, type(expected_fs))
def test_partition_style_serde_round_trip():
for style in PartitionStyle:
serialized = json.dumps(style)
deserialized = PartitionStyle(json.loads(serialized))
assert deserialized == style
def test_path_partition_base_properties():
style = PartitionStyle.DIRECTORY
base_dir = "/foo/bar"
field_names = ["baz", "qux"]
scheme = Partitioning(style, base_dir, field_names, None)
assert scheme.style == style
assert scheme.base_dir == base_dir
assert scheme.field_names == field_names
_verify_resolved_paths_and_filesystem(scheme)
scheme = Partitioning(style, None, field_names, None)
assert scheme.style == style
assert scheme.base_dir == ""
assert scheme.field_names == field_names
_verify_resolved_paths_and_filesystem(scheme)
def test_path_partition_encoder_errors():
# no field names for default HIVE path partitioning
with pytest.raises(ValueError):
PathPartitionEncoder.of()
# explicit no field names for HIVE path partitioning
with pytest.raises(ValueError):
PathPartitionEncoder.of(style=PartitionStyle.HIVE, field_names=[])
# invalid path partitioning style
with pytest.raises(ValueError):
PathPartitionEncoder.of(style=None)
# partition field name and field value length mismatch
for style in [PartitionStyle.HIVE, PartitionStyle.DIRECTORY]:
path_partition_encoder = PathPartitionEncoder.of(
style,
field_names=["foo", "bar"],
)
with pytest.raises(TypeError):
path_partition_encoder(None)
with pytest.raises(AssertionError):
path_partition_encoder([])
with pytest.raises(AssertionError):
path_partition_encoder(["1"])
with pytest.raises(AssertionError):
path_partition_encoder(["1", "2", "3"])
@pytest.mark.parametrize(
"fs,base_dir",
[
(None, None),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(
lazy_fixture("s3_fs_with_special_chars"),
lazy_fixture("s3_path_with_special_chars"),
),
],
)
def test_path_partition_encoder_hive(fs, base_dir):
field_names = ["foo", "bar"]
path_partition_encoder = PathPartitionEncoder.of(
field_names=field_names,
base_dir=base_dir,
filesystem=fs,
)
_verify_resolved_paths_and_filesystem(path_partition_encoder.scheme)
partition_values = ["1", "2"]
partition_path = path_partition_encoder(partition_values)
assert partition_path == posixpath.join(
path_partition_encoder.scheme.normalized_base_dir,
"foo=1",
"bar=2",
)
if fs is not None:
file_info = fs.get_file_info(partition_path)
assert file_info.type == FileType.NotFound
fs.create_dir(partition_path)
file_info = fs.get_file_info(partition_path)
assert file_info.type == FileType.Directory
@pytest.mark.parametrize(
"fs,base_dir",
[
(None, None),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(
lazy_fixture("s3_fs_with_special_chars"),
lazy_fixture("s3_path_with_special_chars"),
),
],
)
def test_path_partition_encoder_directory(fs, base_dir):
path_partition_encoder = PathPartitionEncoder.of(
style=PartitionStyle.DIRECTORY,
field_names=["foo", "bar"],
base_dir=base_dir,
filesystem=fs,
)
_verify_resolved_paths_and_filesystem(path_partition_encoder.scheme)
partition_values = ["1", "2"]
partition_path = path_partition_encoder(partition_values)
assert partition_path == posixpath.join(
path_partition_encoder.scheme.normalized_base_dir,
*partition_values,
)
if fs is not None:
file_info = fs.get_file_info(partition_path)
assert file_info.type == FileType.NotFound
fs.create_dir(partition_path)
file_info = fs.get_file_info(partition_path)
assert file_info.type == FileType.Directory
path_partition_encoder = PathPartitionEncoder.of(
style=PartitionStyle.DIRECTORY,
base_dir=base_dir,
filesystem=fs,
)
partition_path = path_partition_encoder([])
assert partition_path == path_partition_encoder.scheme.normalized_base_dir
partition_path = path_partition_encoder(partition_values)
assert partition_path == posixpath.join(
path_partition_encoder.scheme.normalized_base_dir,
*partition_values,
)
def test_path_partition_parser_errors():
# no field names for DIRECTORY path partitioning
with pytest.raises(ValueError):
PathPartitionParser.of(style=PartitionStyle.DIRECTORY)
# explicit no field names for DIRECTORY path partitioning
with pytest.raises(ValueError):
PathPartitionParser.of(style=PartitionStyle.DIRECTORY, field_names=[])
# invalid path partitioning style
with pytest.raises(ValueError):
PathPartitionParser.of(style=None)
# HIVE partition field name and field value length or order mismatch
path_partition_parser = PathPartitionParser.of(
style=PartitionStyle.HIVE,
field_names=["foo", "bar"],
)
with pytest.raises(ValueError):
path_partition_parser("foo=1/")
with pytest.raises(ValueError):
path_partition_parser("bar=1/foo=2/")
with pytest.raises(ValueError):
path_partition_parser("foo=1/bar=2/qux=3/")
# ensure HIVE partition base directory is not considered a partition
path_partition_parser = PathPartitionParser.of(
style=PartitionStyle.HIVE,
base_dir="foo=1",
field_names=["foo", "bar"],
)
with pytest.raises(ValueError):
path_partition_parser("foo=1/bar=2/")
# DIRECTORY partition field name and field value length mismatch
path_partition_parser = PathPartitionParser.of(
style=PartitionStyle.DIRECTORY,
field_names=["foo", "bar"],
)
with pytest.raises(ValueError):
path_partition_parser("1/")
with pytest.raises(ValueError):
path_partition_parser("1/2/3/")
# ensure DIRECTORY partition base directory is not considered a partition
path_partition_parser = PathPartitionParser.of(
style=PartitionStyle.DIRECTORY,
base_dir="1",
field_names=["foo", "bar"],
)
with pytest.raises(ValueError):
path_partition_parser("1/2/")
@pytest.mark.parametrize(
"fs,base_dir",
[
(None, None),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(
lazy_fixture("s3_fs_with_special_chars"),
lazy_fixture("s3_path_with_special_chars"),
),
],
)
def test_path_partition_parser_hive(fs, base_dir):
partition_parser = PathPartitionParser.of(base_dir=base_dir, filesystem=fs)
_verify_resolved_paths_and_filesystem(partition_parser.scheme)
base_dir = partition_parser.scheme.normalized_base_dir
# parse unpartitioned paths...
partition_kvs = partition_parser("")
assert partition_kvs == {}
unpartitioned_paths = [
"",
"foo/1",
"bar/2",
"baz/3",
posixpath.join(base_dir, "test.txt"),
posixpath.join(base_dir, "foo/test.txt"),
posixpath.join(base_dir, "foo/bar/qux=3"),
posixpath.join(base_dir, "test=1.txt"),
]
for path in unpartitioned_paths:
assert partition_parser(path) == {}
partitioned_path = posixpath.join(base_dir, "foo=1/test.txt")
assert partition_parser(partitioned_path) == {"foo": "1"}
partitioned_path = posixpath.join(base_dir, " foo = 1 /test.txt")
assert partition_parser(partitioned_path) == {" foo ": " 1 "}
partitioned_path = posixpath.join(base_dir, "foo/bar=2/test.txt")
assert partition_parser(partitioned_path) == {"bar": "2"}
partitioned_path = posixpath.join(base_dir, "bar=2/foo=1/test")
assert partition_parser(partitioned_path) == {"foo": "1", "bar": "2"}
partitioned_path = posixpath.join(base_dir, "foo/bar/qux=3/")
assert partition_parser(partitioned_path) == {"qux": "3"}
@pytest.mark.parametrize(
"path, expected_partitions",
[
# '%2F' should decode to '/'
("bucket/key=partition%2Fvalue/file.txt", {"key": "partition/value"}),
# '+' must remain literal when decoding path components. See
# https://github.com/ray-project/ray/pull/57625#discussion_r2441360523.
("bucket/key=foo+bar/file.txt", {"key": "foo+bar"}),
# '%2B' should decode to '+'
("bucket/key=foo%2Bbar/file.txt", {"key": "foo+bar"}),
],
)
def test_path_partition_parser_decodes_special_characters(
path: str, expected_partitions: Dict[str, str]
):
partition_parser = PathPartitionParser.of(base_dir="bucket")
assert partition_parser(path) == expected_partitions
@pytest.mark.parametrize(
"fs,base_dir",
[
(None, None),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(
lazy_fixture("s3_fs_with_special_chars"),
lazy_fixture("s3_path_with_special_chars"),
),
],
)
def test_path_partition_parser_dir(fs, base_dir):
partition_parser = PathPartitionParser.of(
PartitionStyle.DIRECTORY,
base_dir=base_dir,
field_names=["foo", "bar"],
filesystem=fs,
)
_verify_resolved_paths_and_filesystem(partition_parser.scheme)
base_dir = partition_parser.scheme.normalized_base_dir
# parse unpartitioned paths...
partition_kvs = partition_parser("")
assert partition_kvs == {}
if base_dir:
unpartitioned_paths = [
"",
"foo/1",
"bar/2",
"baz/3",
posixpath.join(base_dir, "test.txt"),
]
for path in unpartitioned_paths:
assert partition_parser(path) == {}
partitioned_path = posixpath.join(base_dir, "1/2/test.txt")
assert partition_parser(partitioned_path) == {"foo": "1", "bar": "2"}
partitioned_path = posixpath.join(base_dir, " 1 / t w o /test.txt")
assert partition_parser(partitioned_path) == {"foo": " 1 ", "bar": " t w o "}
partitioned_path = posixpath.join(base_dir, "2/1/test.txt")
assert partition_parser(partitioned_path) == {"foo": "2", "bar": "1"}
partitioned_path = posixpath.join(base_dir, "1/2/")
assert partition_parser(partitioned_path) == {"foo": "1", "bar": "2"}
partitioned_path = posixpath.join(base_dir, "1/2/3")
assert partition_parser(partitioned_path) == {"foo": "1", "bar": "2"}
partition_parser = PathPartitionParser.of(
PartitionStyle.DIRECTORY,
base_dir=base_dir,
field_names=["bar", "foo"],
filesystem=fs,
)
partitioned_path = posixpath.join(base_dir, "1/2/test")
assert partition_parser(partitioned_path) == {"bar": "1", "foo": "2"}
partitioned_path = posixpath.join(base_dir, "2/1/test")
assert partition_parser(partitioned_path) == {"bar": "2", "foo": "1"}
partition_parser = PathPartitionParser.of(
PartitionStyle.DIRECTORY,
base_dir=base_dir,
field_names=["year", None, "country"],
filesystem=fs,
)
partitioned_path = posixpath.join(base_dir, "1970/countries/fr/products.csv")
assert partition_parser(partitioned_path) == {"year": "1970", "country": "fr"}
@pytest.mark.parametrize(
"fs,base_dir",
[
(None, None),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(
lazy_fixture("s3_fs_with_special_chars"),
lazy_fixture("s3_path_with_special_chars"),
),
],
)
def test_path_partition_filter_hive(fs, base_dir):
pass_through = PathPartitionFilter.of(None, base_dir=base_dir, filesystem=fs)
_verify_resolved_paths_and_filesystem(pass_through.parser.scheme)
paths = pass_through([])
assert paths == []
paths = pass_through(["foo/1", "bar/2", "baz/3"])
assert paths == ["foo/1", "bar/2", "baz/3"]
filter_unpartitioned = PathPartitionFilter.of(
base_dir=base_dir,
filesystem=fs,
filter_fn=lambda d: bool(d),
)
_verify_resolved_paths_and_filesystem(filter_unpartitioned.parser.scheme)
base_dir = filter_unpartitioned.parser.scheme.normalized_base_dir
test_paths = [
posixpath.join(base_dir, "test.txt"),
posixpath.join(base_dir, "foo/test.txt"),
posixpath.join(base_dir, "foo=1/test.txt"),
posixpath.join(base_dir, "foo/bar=2/test.txt"),
posixpath.join(base_dir, "foo=1/bar=2/test"),
posixpath.join(base_dir, "foo/bar/qux=3/"),
posixpath.join(base_dir, "foo/bar/qux=3"),
posixpath.join(base_dir, "test=1.txt"),
]
if base_dir:
test_paths.extend(["test.txt", "foo=1/test.txt"])
paths = filter_unpartitioned(test_paths)
assert paths == [
posixpath.join(base_dir, "foo=1/test.txt"),
posixpath.join(base_dir, "foo/bar=2/test.txt"),
posixpath.join(base_dir, "foo=1/bar=2/test"),
posixpath.join(base_dir, "foo/bar/qux=3/"),
]
filter_values = PathPartitionFilter.of(
base_dir=base_dir,
filesystem=fs,
filter_fn=lambda d: d
and (d.get("qux") == "3" or (d.get("foo") == "1" and d.get("bar") == "2")),
)
_verify_resolved_paths_and_filesystem(filter_values.parser.scheme)
paths = filter_values(test_paths)
assert paths == [
posixpath.join(base_dir, "foo=1/bar=2/test"),
posixpath.join(base_dir, "foo/bar/qux=3/"),
]
filter_field_name_values = PathPartitionFilter.of(
base_dir=base_dir,
field_names=["foo", "bar"],
filesystem=fs,
filter_fn=lambda d: d and d.get("foo") == "1" and d.get("bar") == "2",
)
test_paths = [
posixpath.join(base_dir, "foo=1/bar=2/test"),
posixpath.join(base_dir, "prefix/foo=1/padding/bar=2/test"),
]
paths = filter_field_name_values(test_paths)
assert paths == test_paths
@pytest.mark.parametrize(
"fs,base_dir",
[
(None, None),
(lazy_fixture("local_fs"), lazy_fixture("local_path")),
(lazy_fixture("s3_fs"), lazy_fixture("s3_path")),
(
lazy_fixture("s3_fs_with_special_chars"),
lazy_fixture("s3_path_with_special_chars"),
),
],
)
def test_path_partition_filter_directory(fs, base_dir):
pass_through = PathPartitionFilter.of(
None,
style=PartitionStyle.DIRECTORY,
base_dir=base_dir,
field_names=["foo", "bar"],
filesystem=fs,
)
paths = pass_through([])
assert paths == []
paths = pass_through(["foo/1", "bar/2", "baz/3"])
assert paths == ["foo/1", "bar/2", "baz/3"]
filter_unpartitioned = PathPartitionFilter.of(
style=PartitionStyle.DIRECTORY,
base_dir=base_dir,
field_names=["foo", "bar"],
filesystem=fs,
filter_fn=lambda d: bool(d),
)
_verify_resolved_paths_and_filesystem(filter_unpartitioned.parser.scheme)
base_dir = filter_unpartitioned.parser.scheme.normalized_base_dir
test_paths = [
posixpath.join(base_dir, "test.txt"),
posixpath.join(base_dir, "1/2/test.txt"),
posixpath.join(base_dir, "1/2/"),
posixpath.join(base_dir, "2/1/"),
posixpath.join(base_dir, "1/2/3"),
]
if base_dir:
# files outside of the base directory are implicitly unpartitioned
test_paths.extend(["test.txt", "1/2/test.txt"])
paths = filter_unpartitioned(test_paths)
assert paths == [
posixpath.join(base_dir, "1/2/test.txt"),
posixpath.join(base_dir, "1/2/"),
posixpath.join(base_dir, "2/1/"),
posixpath.join(base_dir, "1/2/3"),
]
filter_values = PathPartitionFilter.of(
style=PartitionStyle.DIRECTORY,
base_dir=base_dir,
field_names=["foo", "bar"],
filesystem=fs,
filter_fn=lambda d: d and d["foo"] == "1" and d["bar"] == "2",
)
_verify_resolved_paths_and_filesystem(filter_values.parser.scheme)
paths = filter_values(test_paths)
assert paths == [
posixpath.join(base_dir, "1/2/test.txt"),
posixpath.join(base_dir, "1/2/"),
posixpath.join(base_dir, "1/2/3"),
]
@pytest.mark.parametrize(
"partition_value,expected_type",
[
("1", int),
("1.0", float),
("spam", str),
("true", bool),
],
)
def test_field_types(partition_value, expected_type):
partitioning = Partitioning(style="hive", field_types={"key": expected_type})
parse = PathPartitionParser(partitioning)
partitions = parse(f"key={partition_value}/data.parquet")
assert set(partitions.keys()) == {"key"}
assert isinstance(partitions["key"], expected_type)
@pytest.mark.parametrize(
"path,predicate,expected_result,description",
[
# Simple equality matches
("country=US/file.parquet", col("country") == "US", True, "Exact match"),
("country=US/file.parquet", col("country") == "UK", False, "No match"),
# AND predicates
(
"country=US/year=2020/file.parquet",
(col("country") == "US") & (col("year") == "2020"),
True,
"AND both match",
),
(
"country=US/year=2020/file.parquet",
(col("country") == "US") & (col("year") == "2021"),
False,
"AND one doesn't match",
),
# OR predicates
(
"country=US/file.parquet",
(col("country") == "US") | (col("country") == "UK"),
True,
"OR first matches",
),
(
"country=FR/file.parquet",
(col("country") == "US") | (col("country") == "UK"),
False,
"OR neither matches",
),
# Comparison operators
("year=2020/file.parquet", col("year") > "2019", True, "Greater than"),
("year=2020/file.parquet", col("year") < "2019", False, "Less than"),
# NOT operator
("country=US/file.parquet", ~(col("country") == "UK"), True, "NOT false"),
("country=US/file.parquet", ~(col("country") == "US"), False, "NOT true"),
# IS_IN operator
(
"country=US/file.parquet",
col("country").is_in(["US", "UK"]),
True,
"IS_IN matches",
),
(
"country=FR/file.parquet",
col("country").is_in(["US", "UK"]),
False,
"IS_IN no match",
),
(
"year=2020/file.parquet",
col("year").is_in(["2019", "2020", "2021"]),
True,
"IS_IN with multiple values",
),
],
)
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("14.0.0"),
reason="Partition predicate evaluation requires pyarrow >= 14.0.0",
)
def test_evaluate_predicate_on_partition(path, predicate, expected_result, description):
"""Test partition predicate evaluation for automatic partition pruning."""
parser = PathPartitionParser(Partitioning("hive"))
result = parser.evaluate_predicate_on_partition(path, predicate)
assert (
result == expected_result
), f"{description}: Expected {expected_result}, got {result}"
def test_evaluate_predicate_on_unpartitioned_file():
"""Test that unpartitioned files are conservatively included."""
parser = PathPartitionParser(Partitioning("hive"))
# Unpartitioned file should return False when filtering on partition columns
# (we can't determine if it matches the predicate without partition values)
result = parser.evaluate_predicate_on_partition(
"data.parquet", col("country") == "US"
)
assert result is False
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| TestReadDirPartitionedFiles |
python | apache__airflow | providers/standard/tests/unit/standard/operators/test_python.py | {
"start": 81695,
"end": 82556
} | class ____(BaseTestBranchPythonVirtualenvOperator):
opcls = BranchPythonVirtualenvOperator
@staticmethod
def default_kwargs(*, python_version=DEFAULT_PYTHON_VERSION, **kwargs):
if "do_not_use_caching" in kwargs:
kwargs.pop("do_not_use_caching")
else:
# Caching by default makes the tests run faster except few cases we want to test with regular venv
if "venv_cache_path" not in kwargs:
kwargs["venv_cache_path"] = venv_cache_path
return kwargs
# when venv tests are run in parallel to other test they create new processes and this might take
# quite some time in shared docker environment and get some contention even between different containers
# therefore we have to extend timeouts for those tests
@pytest.mark.external_python_operator
| TestBranchPythonVirtualenvOperator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 27468,
"end": 27676
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("APPROVED", "CHANGES_REQUESTED", "REVIEW_REQUIRED")
| PullRequestReviewDecision |
python | walkccc__LeetCode | solutions/1745. Palindrome Partitioning IV/1745-2.py | {
"start": 0,
"end": 528
} | class ____:
def checkPartitioning(self, s: str) -> bool:
n = len(s)
# dp[i][j] := true if s[i..j] is a palindrome
dp = [[False] * (n + 1) for _ in range(n + 1)]
for i in range(n):
dp[i][i] = True
for d in range(1, n):
for i in range(n - d):
j = i + d
if s[i] == s[j]:
dp[i][j] = i + 1 > j - 1 or dp[i + 1][j - 1]
for i, j in itertools.combinations(range(n), 2):
if dp[0][i] and dp[i + 1][j] and dp[j + 1][n - 1]:
return True
return False
| Solution |
python | ray-project__ray | python/ray/dashboard/modules/log/log_manager.py | {
"start": 1298,
"end": 17162
} | class ____:
def __init__(self, data_source_client: StateDataSourceClient):
self.client = data_source_client
@property
def data_source_client(self) -> StateDataSourceClient:
return self.client
async def ip_to_node_id(self, node_ip: Optional[str]) -> Optional[str]:
"""Resolve the node id in hex from a given node ip.
Args:
node_ip: The node ip.
Returns:
node_id if there's a node id that matches the given node ip and is alive.
None otherwise.
"""
return await self.client.ip_to_node_id(node_ip)
async def list_logs(
self, node_id: str, timeout: int, glob_filter: str = "*"
) -> Dict[str, List[str]]:
"""Return a list of log files on a given node id filtered by the glob.
Args:
node_id: The node id where log files present.
timeout: The timeout of the API.
glob_filter: The glob filter to filter out log files.
Returns:
Dictionary of {component_name -> list of log files}
Raises:
ValueError: If a source is unresponsive.
"""
reply = await self.client.list_logs(node_id, glob_filter, timeout=timeout)
return self._categorize_log_files(reply.log_files)
async def stream_logs(
self,
options: GetLogOptions,
get_actor_fn: Callable[[ActorID], Awaitable[Optional[ActorTableData]]],
) -> AsyncIterable[bytes]:
"""Generate a stream of logs in bytes.
Args:
options: The option for streaming logs.
Return:
Async generator of streamed logs in bytes.
"""
node_id = options.node_id
if node_id is None:
node_id = await self.ip_to_node_id(options.node_ip)
res = await self.resolve_filename(
node_id=node_id,
log_filename=options.filename,
actor_id=options.actor_id,
task_id=options.task_id,
attempt_number=options.attempt_number,
pid=options.pid,
get_actor_fn=get_actor_fn,
timeout=options.timeout,
suffix=options.suffix,
submission_id=options.submission_id,
)
keep_alive = options.media_type == "stream"
stream = await self.client.stream_log(
node_id=res.node_id,
log_file_name=res.filename,
keep_alive=keep_alive,
lines=options.lines,
interval=options.interval,
# If we keepalive logs connection, we shouldn't have timeout
# otherwise the stream will be terminated forcefully
# after the deadline is expired.
timeout=options.timeout if not keep_alive else None,
start_offset=res.start_offset,
end_offset=res.end_offset,
)
async for streamed_log in stream:
yield streamed_log.data
async def _resolve_job_filename(self, sub_job_id: str) -> Tuple[str, str]:
"""Return the log file name and node id for a given job submission id.
Args:
sub_job_id: The job submission id.
Returns:
The log file name and node id.
"""
job_infos = await self.client.get_job_info(timeout=DEFAULT_RPC_TIMEOUT)
target_job = None
for job_info in job_infos:
if job_info.submission_id == sub_job_id:
target_job = job_info
break
if target_job is None:
logger.info(f"Submission job ID {sub_job_id} not found.")
return None, None
node_id = job_info.driver_node_id
if node_id is None:
raise ValueError(
f"Job {sub_job_id} has no driver node id info. "
"This is likely a bug. Please file an issue."
)
log_filename = JOB_LOGS_PATH_TEMPLATE.format(submission_id=sub_job_id)
return node_id, log_filename
async def _resolve_worker_file(
self,
node_id_hex: str,
worker_id_hex: Optional[str],
pid: Optional[int],
suffix: str,
timeout: int,
) -> Optional[str]:
"""Resolve worker log file."""
if worker_id_hex is not None and pid is not None:
raise ValueError(
f"Only one of worker id({worker_id_hex}) or pid({pid}) should be"
"provided."
)
if worker_id_hex is not None:
log_files = await self.list_logs(
node_id_hex, timeout, glob_filter=f"*{worker_id_hex}*{suffix}"
)
else:
log_files = await self.list_logs(
node_id_hex, timeout, glob_filter=f"*{pid}*{suffix}"
)
# Find matching worker logs.
for filename in [*log_files["worker_out"], *log_files["worker_err"]]:
# Worker logs look like worker-[worker_id]-[job_id]-[pid].out
if worker_id_hex is not None:
worker_id_from_filename = WORKER_LOG_PATTERN.match(filename).group(1)
if worker_id_from_filename == worker_id_hex:
return filename
else:
worker_pid_from_filename = int(
WORKER_LOG_PATTERN.match(filename).group(3)
)
if worker_pid_from_filename == pid:
return filename
return None
async def _resolve_actor_filename(
self,
actor_id: ActorID,
get_actor_fn: Callable[[ActorID], Awaitable[Optional[ActorTableData]]],
suffix: str,
timeout: int,
):
"""Resolve actor log file.
Args:
actor_id: The actor id.
get_actor_fn: The function to get actor information.
suffix: The suffix of the log file.
timeout: Timeout in seconds.
Returns:
The log file name and node id.
Raises:
ValueError: If actor data is not found or get_actor_fn is not provided.
"""
if get_actor_fn is None:
raise ValueError("get_actor_fn needs to be specified for actor_id")
actor_data = await get_actor_fn(actor_id)
if actor_data is None:
raise ValueError(f"Actor ID {actor_id} not found.")
# TODO(sang): Only the latest worker id can be obtained from
# actor information now. That means, if actors are restarted,
# there's no way for us to get the past worker ids.
worker_id_binary = actor_data.address.worker_id
if not worker_id_binary:
raise ValueError(
f"Worker ID for Actor ID {actor_id} not found. "
"Actor is not scheduled yet."
)
worker_id = WorkerID(worker_id_binary)
node_id_binary = actor_data.address.node_id
if not node_id_binary:
raise ValueError(
f"Node ID for Actor ID {actor_id} not found. "
"Actor is not scheduled yet."
)
node_id = NodeID(node_id_binary)
log_filename = await self._resolve_worker_file(
node_id_hex=node_id.hex(),
worker_id_hex=worker_id.hex(),
pid=None,
suffix=suffix,
timeout=timeout,
)
return node_id.hex(), log_filename
async def _resolve_task_filename(
self, task_id: str, attempt_number: int, suffix: str, timeout: int
):
"""Resolve log file for a task.
Args:
task_id: The task id.
attempt_number: The attempt number.
suffix: The suffix of the log file, e.g. out or err.
timeout: Timeout in seconds.
Returns:
The log file name, node id, the start and end offsets of the
corresponding task log in the file.
Raises:
FileNotFoundError: If the log file is not found.
ValueError: If the suffix is not out or err.
"""
log_filename = None
node_id = None
start_offset = None
end_offset = None
if suffix not in ["out", "err"]:
raise ValueError(f"Suffix {suffix} is not supported.")
reply = await self.client.get_all_task_info(
filters=[("task_id", "=", task_id)], timeout=timeout
)
# Check if the task is found.
if len(reply.events_by_task) == 0:
raise FileNotFoundError(
f"Could not find log file for task: {task_id}"
f" (attempt {attempt_number}) with suffix: {suffix}"
)
task_event = None
for t in reply.events_by_task:
if t.attempt_number == attempt_number:
task_event = t
break
if task_event is None:
raise FileNotFoundError(
"Could not find log file for task attempt:"
f"{task_id}({attempt_number})"
)
# Get the worker id and node id.
task = protobuf_to_task_state_dict(task_event)
worker_id = task.get("worker_id", None)
node_id = task.get("node_id", None)
log_info = task.get("task_log_info", None)
actor_id = task.get("actor_id", None)
if node_id is None:
raise FileNotFoundError(
"Could not find log file for task attempt."
f"{task_id}({attempt_number}) due to missing node info."
)
if log_info is None and actor_id is not None:
# This is a concurrent actor task. The logs will be interleaved.
# So we return the log file of the actor instead.
raise FileNotFoundError(
f"For actor task, please query actor log for "
f"actor({actor_id}): e.g. ray logs actor --id {actor_id} . Or "
"set RAY_ENABLE_RECORD_ACTOR_TASK_LOGGING=1 in actor's runtime env "
"or when starting the cluster. Recording actor task's log could be "
"expensive, so Ray turns it off by default."
)
elif log_info is None:
raise FileNotFoundError(
"Could not find log file for task attempt:"
f"{task_id}({attempt_number})."
f"Worker id = {worker_id}, node id = {node_id},"
f"log_info = {log_info}"
)
filename_key = "stdout_file" if suffix == "out" else "stderr_file"
log_filename = log_info.get(filename_key, None)
if log_filename is None:
raise FileNotFoundError(
f"Missing log filename info in {log_info} for task {task_id},"
f"attempt {attempt_number}"
)
start_offset = log_info.get(f"std{suffix}_start", None)
end_offset = log_info.get(f"std{suffix}_end", None)
return node_id, log_filename, start_offset, end_offset
async def resolve_filename(
self,
*,
node_id: Optional[str] = None,
log_filename: Optional[str] = None,
actor_id: Optional[str] = None,
task_id: Optional[str] = None,
attempt_number: Optional[int] = None,
pid: Optional[str] = None,
get_actor_fn: Optional[
Callable[[ActorID], Awaitable[Optional[ActorTableData]]]
] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
suffix: str = "out",
submission_id: Optional[str] = None,
) -> ResolvedStreamFileInfo:
"""Return the file name given all options.
Args:
node_id: The node's id from which logs are resolved.
log_filename: Filename of the log file.
actor_id: Id of the actor that generates the log file.
task_id: Id of the task that generates the log file.
pid: Id of the worker process that generates the log file.
get_actor_fn: Callback to get the actor's data by id.
timeout: Timeout for the gRPC to listing logs on the node
specified by `node_id`.
suffix: Log suffix if no `log_filename` is provided, when
resolving by other ids'. Default to "out".
submission_id: The submission id for a submission job.
"""
start_offset = None
end_offset = None
if suffix not in ["out", "err"]:
raise ValueError(f"Suffix {suffix} is not supported. ")
# TODO(rickyx): We should make sure we do some sort of checking on the log
# filename
if actor_id:
node_id, log_filename = await self._resolve_actor_filename(
ActorID.from_hex(actor_id), get_actor_fn, suffix, timeout
)
elif task_id:
(
node_id,
log_filename,
start_offset,
end_offset,
) = await self._resolve_task_filename(
task_id, attempt_number, suffix, timeout
)
elif submission_id:
node_id, log_filename = await self._resolve_job_filename(submission_id)
elif pid:
if node_id is None:
raise ValueError(
"Node id needs to be specified for resolving"
f" filenames of pid {pid}"
)
log_filename = await self._resolve_worker_file(
node_id_hex=node_id,
worker_id_hex=None,
pid=pid,
suffix=suffix,
timeout=timeout,
)
if log_filename is None:
raise FileNotFoundError(
"Could not find a log file. Please make sure the given "
"option exists in the cluster.\n"
f"\tnode_id: {node_id}\n"
f"\tfilename: {log_filename}\n"
f"\tactor_id: {actor_id}\n"
f"\ttask_id: {task_id}\n"
f"\tpid: {pid}\n"
f"\tsuffix: {suffix}\n"
f"\tsubmission_id: {submission_id}\n"
f"\tattempt_number: {attempt_number}\n"
)
res = ResolvedStreamFileInfo(
node_id=node_id,
filename=log_filename,
start_offset=start_offset,
end_offset=end_offset,
)
logger.info(f"Resolved log file: {res}")
return res
def _categorize_log_files(self, log_files: List[str]) -> Dict[str, List[str]]:
"""Categorize the given log files after filterieng them out using a given glob.
Returns:
Dictionary of {component_name -> list of log files}
"""
result = defaultdict(list)
for log_file in log_files:
if "worker" in log_file and (log_file.endswith(".out")):
result["worker_out"].append(log_file)
elif "worker" in log_file and (log_file.endswith(".err")):
result["worker_err"].append(log_file)
elif "core-worker" in log_file and log_file.endswith(".log"):
result["core_worker"].append(log_file)
elif "core-driver" in log_file and log_file.endswith(".log"):
result["driver"].append(log_file)
elif "raylet." in log_file:
result["raylet"].append(log_file)
elif "gcs_server." in log_file:
result["gcs_server"].append(log_file)
elif "log_monitor" in log_file:
result["internal"].append(log_file)
elif "monitor" in log_file:
result["autoscaler"].append(log_file)
elif "agent." in log_file:
result["agent"].append(log_file)
elif "dashboard." in log_file:
result["dashboard"].append(log_file)
else:
result["internal"].append(log_file)
return result
| LogsManager |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/polymorphic_function_test_cpu_only.py | {
"start": 995,
"end": 1715
} | class ____(test.TestCase, parameterized.TestCase):
"""Test that jit_compile=True correctly throws an exception if XLA is not available.
This test should only be run without `--config=cuda`, as that implicitly links
in XLA JIT.
"""
def testJitCompileRaisesExceptionWhenXlaIsUnsupported(self):
if test.is_built_with_rocm() or test_util.is_xla_enabled():
return
with self.assertRaisesRegex(errors.UnimplementedError,
'support for that platform linked in'):
@polymorphic_function.function(jit_compile=True)
def fn(x):
return x + x
fn([1, 1, 2, 3])
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| FunctionCpuOnlyTest |
python | pytorch__pytorch | test/quantization/fx/test_quantize_fx.py | {
"start": 31701,
"end": 275505
} | class ____(QuantizationTestCase):
def test_pattern_match(self):
""" test MatchAllNode with
conv - bn - add - relu pattern
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x, y):
x = self.conv(x)
x = self.bn(x)
x = x + y
x = self.relu(x)
return x
pattern = (nn.ReLU, (operator.add, (nn.BatchNorm2d, nn.Conv2d), MatchAllNode))
m = torch.fx.symbolic_trace(M())
modules = dict(m.named_modules())
for n in m.graph.nodes:
if n.op == 'call_module' and type(modules[n.target]) is nn.ReLU:
self.assertTrue(_is_match(modules, n, pattern))
def test_pattern_match_constant(self):
class M(torch.nn.Module):
def forward(self, x):
x, _ = torch.ops.aten.max_pool2d_with_indices.default(x)
return x
pattern = (operator.getitem, torch.ops.aten.max_pool2d_with_indices.default, 0)
m = torch.fx.symbolic_trace(M())
# eliminate the code that get the second output of maxpool, so that the pattern
# can be matched
m.graph.eliminate_dead_code()
modules = dict(m.named_modules())
for n in m.graph.nodes:
if n.op == "call_function" and n.target == operator.getitem:
self.assertTrue(_is_match(modules, n, pattern))
def test_fused_module_qat_swap(self):
class Tmp(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tmp = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.tmp(x)
return self.relu(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(Tmp(), torch.nn.Linear(5, 5))
self.mods2 = torch.nn.Linear(5, 5)
def forward(self, x):
a = self.mods1(x)
x = torch.add(x, 5)
x = self.mods2(x)
x = torch.add(x, 5)
return a, x
model = M().train()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.Linear, default_qat_qconfig),
(torch.nn.ReLU, default_qat_qconfig),
],
}
prepared = prepare_qat_fx(model, qconfig_dict, example_inputs=(torch.randn(1, 5),))
self.assertTrue(isinstance(getattr(prepared.mods1, "0").tmp, torch.ao.nn.intrinsic.qat.LinearReLU))
def _get_conv_linear_test_cases(self, is_reference):
""" Returns a list of test cases, with format:
is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_op
"""
class FunctionalConv1d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = 1
self.padding = 0
self.dilation = 1
self.groups = 1
def forward(self, x):
return F.conv1d(x, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
class Conv1d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv1d(*args)
def forward(self, x):
return self.conv(x)
conv1d_input = torch.rand(1, 3, 224)
conv1d_weight = torch.rand(3, 3, 3)
conv1d_module_args = (3, 3, 3)
class FunctionalConv2d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
class Conv2d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv2d(*args)
def forward(self, x):
return self.conv(x)
conv2d_input = torch.rand(1, 3, 224, 224)
conv2d_weight = torch.rand(3, 3, 3, 3)
conv2d_module_args = (3, 3, 3)
class FunctionalConv3d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1, 1)
self.padding = (0, 0, 0)
self.dilation = (1, 1, 1)
self.groups = 1
def forward(self, x):
return F.conv3d(
x,
self.weight,
None,
self.stride,
self.padding,
self.dilation,
self.groups,
)
class Conv3d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.conv = torch.nn.Conv3d(*args)
def forward(self, x):
return self.conv(x)
conv3d_input = torch.rand(1, 3, 32, 224, 224)
conv3d_weight = torch.rand(3, 3, 3, 3, 3)
conv3d_module_args = (3, 3, 3)
class Linear(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
linear_input = torch.rand(8, 5)
linear_weight = torch.rand(10, 5)
class LinearModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
# is_dynamic, ModuleClass, module_constructor_inputs,
# inputs, quantized_node, weight_prepack_node
tests = [
(
False,
FunctionalConv1d,
(conv1d_weight,),
(conv1d_input,),
ns.call_function(torch.nn.functional.conv1d if is_reference else torch.ops.quantized.conv1d) ,
ns.call_function(torch.ops.quantized.conv1d_prepack),
),
(
False,
FunctionalConv2d,
(conv2d_weight,),
(conv2d_input,),
ns.call_function(torch.nn.functional.conv2d if is_reference else torch.ops.quantized.conv2d),
ns.call_function(torch.ops.quantized.conv2d_prepack),
),
(
False,
FunctionalConv3d,
(conv3d_weight,),
(conv3d_input,),
ns.call_function(torch.nn.functional.conv3d if is_reference else torch.ops.quantized.conv3d),
ns.call_function(torch.ops.quantized.conv3d_prepack),
),
(
False,
Conv1d,
conv1d_module_args,
(conv1d_input,),
ns.call_module(nnqr.Conv1d if is_reference else nnq.Conv1d),
None
),
(
False,
Conv2d,
conv2d_module_args,
(conv2d_input,),
ns.call_module(nnqr.Conv2d if is_reference else nnq.Conv2d),
None
),
(
False,
Conv3d,
conv3d_module_args,
(conv3d_input,),
ns.call_module(nnqr.Conv3d if is_reference else nnq.Conv3d),
None
),
(
True,
Linear,
(linear_weight,),
(linear_input,),
None if is_reference else ns.call_function(torch.ops.quantized.linear_dynamic),
ns.call_function(torch.ops.quantized.linear_prepack),
),
(
False,
Linear,
(linear_weight,),
(linear_input,),
ns.call_function(torch.nn.functional.linear if is_reference else torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear_prepack),
),
(
True,
LinearModule,
(),
(linear_module_input,),
ns.call_module(nnqr.Linear) if is_reference else ns.call_module(nnqd.Linear),
None,
),
(
False,
LinearModule,
(),
(linear_module_input,),
ns.call_module(nnqr.Linear if is_reference else nnq.Linear),
None,
),
]
return tests
@skipIfNoFBGEMM
def test_conv_linear_not_reference(self):
""" Test quantizing conv and linear
"""
tests = self._get_conv_linear_test_cases(is_reference=False)
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=False)
@skipIfNoFBGEMM
def test_conv_linear_reference(self):
""" Test quantizing functional conv and linear with reference option
"""
tests = self._get_conv_linear_test_cases(is_reference=True)
def _get_keys(prefix, is_dynamic):
all_keys = [prefix + "." + k for k in ["weight_qscheme", "weight_dtype"]]
if not is_dynamic:
all_keys.extend([prefix + "." + k for k in ["weight_scale", "weight_zero_point"]])
return all_keys
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
result_dict = self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=True)
qr = result_dict["quantized_reference"]
def checkWeightQParams(model):
for module_name in ("linear", "conv"):
if hasattr(model, module_name):
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_qscheme"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_scale"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_zero_point"))
self.assertTrue("Reference" in qr.get_submodule(module_name)._get_name())
def checkSerDeser(model, is_dynamic):
for module_name in ("linear", "conv"):
if hasattr(model, module_name):
# make sure serialization works
state_dict = copy.deepcopy(model.state_dict())
all_keys = _get_keys(module_name, is_dynamic)
for key in all_keys:
self.assertTrue(key in state_dict)
# check load_state_dict restores states
module = getattr(model, module_name)
prev_scale = module.weight_scale
module.weight_scale = None
model.load_state_dict(state_dict)
module = getattr(model, module_name)
self.assertTrue(torch.equal(prev_scale, module.weight_scale))
checkWeightQParams(qr)
qr = copy.deepcopy(qr)
# make sure the qparams are preserved after copy
checkWeightQParams(qr)
checkSerDeser(qr, is_dynamic)
def _get_conv_transpose_test_cases(self, use_relu, is_reference):
""" Returns a list of test cases, with format:
is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_op
"""
class FunctionalConvTranspose1d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = 1
self.padding = 0
self.output_padding = 0
self.dilation = 1
self.groups = 1
def forward(self, x):
y = F.conv_transpose1d(
x,
self.weight,
None,
self.stride,
self.padding,
self.output_padding,
self.groups,
self.dilation
)
if use_relu:
y = F.relu(y)
return y
class ConvTranspose1d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.deconv = torch.nn.ConvTranspose1d(*args)
self.relu = torch.nn.ReLU()
def forward(self, x):
y = self.deconv(x)
if use_relu:
y = self.relu(y)
return y
conv_transpose1d_input = torch.rand(1, 3, 224)
conv_transpose1d_weight = torch.rand(3, 3, 3)
conv_transpose1d_module_args = (3, 3, 3)
class FunctionalConvTranspose2d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1)
self.padding = (0, 0)
self.output_padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
y = F.conv_transpose2d(
x,
self.weight,
None,
self.stride,
self.padding,
self.output_padding,
self.groups,
self.dilation
)
if use_relu:
y = F.relu(y)
return y
class ConvTranspose2d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.deconv = torch.nn.ConvTranspose2d(*args)
self.relu = torch.nn.ReLU()
def forward(self, x):
y = self.deconv(x)
if use_relu:
y = self.relu(y)
return y
conv_transpose2d_input = torch.rand(1, 3, 224, 224)
conv_transpose2d_weight = torch.rand(3, 3, 3, 3)
conv_transpose2d_module_args = (3, 3, 3)
class FunctionalConvTranspose3d(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1, 1)
self.padding = (0, 0, 0)
self.output_padding = (0, 0, 0)
self.dilation = (1, 1, 1)
self.groups = 1
def forward(self, x):
y = F.conv_transpose3d(
x,
self.weight,
None,
self.stride,
self.padding,
self.output_padding,
self.groups,
self.dilation
)
if use_relu:
y = F.relu(y)
return y
class ConvTranspose3d(torch.nn.Module):
def __init__(self, *args):
super().__init__()
self.deconv = torch.nn.ConvTranspose3d(*args)
self.relu = torch.nn.ReLU()
def forward(self, x):
y = self.deconv(x)
if use_relu:
y = self.relu(y)
return y
conv_transpose3d_input = torch.rand(1, 3, 32, 224, 224)
conv_transpose3d_weight = torch.rand(3, 3, 3, 3, 3)
conv_transpose3d_module_args = (3, 3, 3)
# is_dynamic, ModuleClass, module_constructor_inputs,
# inputs, quantized_node, weight_prepack_node
tests = [
(
False,
FunctionalConvTranspose1d,
(conv_transpose1d_weight,),
(conv_transpose1d_input,),
ns.call_function(
torch.nn.functional.conv_transpose1d if is_reference else torch.ops.quantized.conv_transpose1d
),
ns.call_function(torch.ops.quantized.conv_transpose1d_prepack),
),
(
False,
FunctionalConvTranspose2d,
(conv_transpose2d_weight,),
(conv_transpose2d_input,),
ns.call_function(
torch.nn.functional.conv_transpose2d if is_reference else torch.ops.quantized.conv_transpose2d
),
ns.call_function(torch.ops.quantized.conv_transpose2d_prepack),
),
(
False,
FunctionalConvTranspose3d,
(conv_transpose3d_weight,),
(conv_transpose3d_input,),
ns.call_function(
torch.nn.functional.conv_transpose3d if is_reference else torch.ops.quantized.conv_transpose3d),
ns.call_function(torch.ops.quantized.conv_transpose3d_prepack),
),
(
False,
ConvTranspose1d,
conv_transpose1d_module_args,
(conv_transpose1d_input,),
ns.call_module(nnqr.ConvTranspose1d if is_reference else nnq.ConvTranspose1d),
None
),
(
False,
ConvTranspose2d,
conv_transpose2d_module_args,
(conv_transpose2d_input,),
ns.call_module(nnqr.ConvTranspose2d if is_reference else nnq.ConvTranspose2d),
None
),
(
False,
ConvTranspose3d,
conv_transpose3d_module_args,
(conv_transpose3d_input,),
ns.call_module(nnqr.ConvTranspose3d if is_reference else nnq.ConvTranspose3d),
None
),
]
return tests
@skipIfNoFBGEMM
def test_conv_transpose_not_reference(self):
""" Test quantizing transposed conv
"""
tests = self._get_conv_transpose_test_cases(use_relu=False, is_reference=False)
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=False)
@skipIfNoFBGEMM
def test_conv_transpose_reference(self):
""" Test quantizing transposed conv with reference option
"""
tests = self._get_conv_transpose_test_cases(use_relu=False, is_reference=True)
def _get_keys(prefix, is_dynamic):
all_keys = [prefix + "." + k for k in ["weight_qscheme", "weight_dtype"]]
if not is_dynamic:
all_keys.extend([prefix + "." + k for k in ["weight_scale", "weight_zero_point"]])
return all_keys
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
result_dict = self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=True)
qr = result_dict["quantized_reference"]
def checkWeightQParams(model):
module_name = "deconv"
if hasattr(model, module_name):
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_qscheme"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_scale"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_zero_point"))
self.assertTrue("Reference" in qr.get_submodule(module_name)._get_name())
def checkSerDeser(model, is_dynamic):
module_name = "deconv"
if hasattr(model, module_name):
# make sure serialization works
state_dict = copy.deepcopy(model.state_dict())
all_keys = _get_keys(module_name, is_dynamic)
for key in all_keys:
self.assertTrue(key in state_dict)
# check load_state_dict restores states
module = getattr(model, module_name)
prev_scale = module.weight_scale
module.weight_scale = None
model.load_state_dict(state_dict)
module = getattr(model, module_name)
self.assertTrue(torch.equal(prev_scale, module.weight_scale))
checkWeightQParams(qr)
qr = copy.deepcopy(qr)
# make sure the qparams are preserved after copy
checkWeightQParams(qr)
checkSerDeser(qr, is_dynamic)
def test_conv_transpose_relu_not_reference(self):
""" Test quantizing transposed conv + relu
Fusion with relu is not supported.
"""
tests = self._get_conv_transpose_test_cases(use_relu=True, is_reference=False)
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
if quantized_node.op == 'call_module':
node_occurrence[ns.call_module(nn.ReLU)] = 1
else:
node_occurrence[ns.call_function(F.relu)] = 1
self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=False)
@skipIfNoFBGEMM
def test_conv_transpose_relu_reference(self):
""" Test quantizing transposed conv with reference option
Fusion with relu is not supported.
"""
tests = self._get_conv_transpose_test_cases(use_relu=True, is_reference=True)
def _get_keys(prefix, is_dynamic):
all_keys = [prefix + "." + k for k in ["weight_qscheme", "weight_dtype"]]
if not is_dynamic:
all_keys.extend([prefix + "." + k for k in ["weight_scale", "weight_zero_point"]])
return all_keys
for (is_dynamic, ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
quant_type = QuantType.DYNAMIC if is_dynamic else QuantType.STATIC
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
if quantized_node.op == 'call_module':
node_occurrence[ns.call_module(nn.ReLU)] = 1
else:
node_occurrence[ns.call_function(F.relu)] = 1
result_dict = self.checkGraphModeFxOp(
ModuleClass(*module_constructor_inputs),
inputs, quant_type,
expected_node=quantized_node,
expected_node_occurrence=node_occurrence,
is_reference=True)
qr = result_dict["quantized_reference"]
def checkWeightQParams(model):
module_name = "deconv"
if hasattr(model, module_name):
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_qscheme"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_scale"))
self.assertTrue(hasattr(qr.get_submodule(module_name), "weight_zero_point"))
self.assertTrue("Reference" in qr.get_submodule(module_name)._get_name())
def checkSerDeser(model, is_dynamic):
module_name = "deconv"
if hasattr(model, module_name):
# make sure serialization works
state_dict = copy.deepcopy(model.state_dict())
all_keys = _get_keys(module_name, is_dynamic)
for key in all_keys:
self.assertTrue(key in state_dict)
# check load_state_dict restores states
module = getattr(model, module_name)
prev_scale = module.weight_scale
module.weight_scale = None
model.load_state_dict(state_dict)
module = getattr(model, module_name)
self.assertTrue(torch.equal(prev_scale, module.weight_scale))
checkWeightQParams(qr)
qr = copy.deepcopy(qr)
# make sure the qparams are preserved after copy
checkWeightQParams(qr)
checkSerDeser(qr, is_dynamic)
@skipIfNoFBGEMM
def test_dynamic_quant_weight_observer(self):
''' Test that weight observer is run in convert step
'''
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
m = M(torch.rand(1, 1)).eval()
qconfig = default_dynamic_qconfig
qconfig_dict = {'': qconfig}
example_inputs = (torch.rand(1, 1),)
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
quantized = convert_to_reference_fx(prepared)
qparams = (quantized._scale_0, quantized._zero_point_0)
weight_obs = qconfig.weight()
weight_obs(quantized.weight)
# Get the actual value to avoid tensor size mismatch error, torch.Size([]) vs torch.Size([1])
ref_qparams = (weight_obs.calculate_qparams()[0].item(), weight_obs.calculate_qparams()[1].item())
self.assertEqual(qparams, ref_qparams)
def test_conv_bn_relu(self):
""" Tests fusion and quantization for "Conv - Bn" and "Conv - Bn - ReLU"
"""
convs = {
1: nn.Conv1d,
2: nn.Conv2d,
3: nn.Conv3d,
}
bns = {
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d,
}
quantized_convs = {
1: nnq.Conv1d,
2: nnq.Conv2d,
3: nnq.Conv3d,
}
quantized_conv_relus = {
1: nniq.ConvReLU1d,
2: nniq.ConvReLU2d,
3: nniq.ConvReLU3d,
}
class M(torch.nn.Module):
def __init__(self, dim, has_relu):
super().__init__()
self.conv = convs[dim](3, 3, 3)
self.bn = bns[dim](3)
self.relu = nn.ReLU() if has_relu else nn.Identity()
self.has_relu = has_relu
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.bn(x)
if self.has_relu:
x = self.relu(x)
x = self.dequant(x)
return x
options = itertools.product([1, 2, 3], [True, False], self.static_quant_types)
for dim, has_relu, quant_type in options:
expected_node = ns.call_module(
quantized_conv_relus[dim] if has_relu
else quantized_convs[dim])
m = M(dim, has_relu)
m_eager = copy.deepcopy(m)
result_dict = self.checkGraphModeFxOp(
m,
self.img_data_dict[dim],
quant_type,
expected_node=expected_node,
)
result = result_dict["quantized_output"]
# check numerics
qengine = torch.backends.quantized.engine
if quant_type == QuantType.STATIC:
m_eager.eval()
qconfig = get_default_qconfig(qengine)
prepare_fn = prepare
is_qat = False
else:
m_eager.train()
qconfig = get_default_qat_qconfig(qengine)
prepare_fn = prepare_qat
is_qat = True
fuse_list = ["conv", "bn"]
if has_relu:
fuse_list.append("relu")
if is_qat:
fuse_modules_qat(m_eager, fuse_list, inplace=True)
else:
fuse_modules(m_eager, fuse_list, inplace=True)
m_eager.qconfig = qconfig
m_eager = prepare_fn(m_eager)
prepared_fx = result_dict["prepared"]
m_eager(*self.img_data_dict[dim][0])
m_eager = convert(m_eager)
result_eager = m_eager(*self.img_data_dict[dim][0])
self.assertEqual(result, result_eager)
def test_linear_bn(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(4, 4)
self.bn = nn.BatchNorm1d(4)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.linear(x)
x = self.bn(x)
x = self.dequant(x)
return x
data = (torch.randn(4, 4),)
for quant_type in self.static_quant_types:
expected_node = ns.call_module(nnq.Linear)
m = M()
m_eager = copy.deepcopy(m)
result_dict = self.checkGraphModeFxOp(m, data, quant_type, expected_node=expected_node)
result = result_dict["quantized_output"]
# check numerics vs eager mode
fuse_list = ["linear", "bn"]
qengine = torch.backends.quantized.engine
if quant_type == QuantType.STATIC:
m_eager.eval()
qconfig = get_default_qconfig(qengine)
prepare_fn = prepare
fuse_modules(m_eager, fuse_list, inplace=True)
else:
m_eager.train()
qconfig = get_default_qat_qconfig(qengine)
prepare_fn = prepare_qat
fuse_modules_qat(m_eager, fuse_list, inplace=True)
m_eager.qconfig = qconfig
m_eager = prepare_fn(m_eager)
m_eager(*data)
m_eager = convert(m_eager)
result_eager = m_eager(*data)
self.assertEqual(result, result_eager)
@skipIfNoFBGEMM
def test_dynamic_quant_fp16(self):
with override_quantized_engine('fbgemm'):
class Linear(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
linear_input = torch.rand(8, 5)
linear_weight = torch.rand(10, 5)
class LinearModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
tests = [
(Linear, (linear_weight,), (linear_input,),
ns.call_function(torch.ops.quantized.linear_dynamic_fp16),
ns.call_function(torch.ops.quantized.linear_prepack_fp16)),
(LinearModule, (), (linear_module_input,),
ns.call_module(nnqd.Linear),
None),
]
for (ModuleClass, module_constructor_inputs,
inputs, quantized_node, weight_prepack_node) in tests:
for is_reference in [True, False]:
node_occurrence = {}
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
m = ModuleClass(*module_constructor_inputs).eval()
qconfig_dict = {"": float16_dynamic_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=inputs)
convert_fn = convert_to_reference_fx if is_reference else convert_fx
m = convert_fn(m)
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@override_qengines
def test_qat_prepare_device_affinity(self):
"""
Tests that FX QAT prepare pass respects device affinity
"""
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
model = Model()
qengine = torch.backends.quantized.engine
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig(qengine)}
device = torch.device('cuda:0')
model.to(device)
example_inputs = (torch.randn(4, 1, 4, 4, device=device),)
# QAT prepare
model = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
# ensure that running an input on CUDA works without any needed changes
model(*example_inputs)
# ensure all buffers and parameters are on the device we expect
model_devices = {p.device for p in model.parameters()} | \
{p.device for p in model.buffers()}
self.assertEqual(len(model_devices), 1)
model_device = next(iter(model_devices))
self.assertEqual(model_device, device)
@skipIfNoFBGEMM
def test_dict_output(self):
""" Make sure quantization runs for models with dictionary output
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return {"output": self.conv(x["input"])}
example_inputs = ({"input": torch.randn(1, 1, 1, 1)},)
m = M().eval()
qconfig_dict = {"": default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
@override_qengines
def test_attention(self):
""" Make sure quantization runs for a corner case in attention module
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
q, k, v = x.chunk(3, dim=0)
q = q.contiguous().view(-1, 1).transpose(0, 1)
k = k.contiguous().view(-1, 1).transpose(0, 1)
v = v.contiguous().view(-1, 1).transpose(0, 1)
torch._assert(
k.size(1) == 1, "key size should be equal to 1"
)
r = torch.mm(k, v)
return q * k + r
example_inputs = (torch.randn(3, 1, 1, 1),)
m = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(nn.Conv2d, default_qconfig),
]
}
# make sure it runs
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
def _test_standalone_module(
self,
interface_config,
prepare_count_check,
standalone_prepare_count_check,
convert_count_check,
standalone_convert_count_check):
""" Test standalone module with different quantized input/quantized output
configurations
"""
class StandaloneModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.standalone = StandaloneModule()
def forward(self, x):
x = self.conv(x)
x = self.standalone(x)
return x
class RefM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
example_inputs = (torch.randn(1, 1, 1, 1),)
# instantiate M and RefM and align the parameters
original_m = M().eval()
original_ref_m = RefM().eval()
original_ref_m.conv1.weight = torch.nn.Parameter(original_m.conv.weight.detach())
original_ref_m.conv1.bias = torch.nn.Parameter(original_m.conv.bias.detach())
original_ref_m.conv2.weight = torch.nn.Parameter(original_m.standalone.conv.weight.detach())
original_ref_m.conv2.bias = torch.nn.Parameter(original_m.standalone.conv.bias.detach())
for is_name in [True, False]:
sm_example_inputs = example_inputs
if is_name:
prepare_config = {
"standalone_module_name": [("standalone", None, sm_example_inputs, interface_config, None)]
}
else:
prepare_config = {
"standalone_module_class": [(StandaloneModule, None, sm_example_inputs, interface_config, None)]
}
original_m_copy = copy.deepcopy(original_m)
original_ref_m_copy = copy.deepcopy(original_ref_m)
qconfig_dict = {"": default_qconfig}
# check prepared model
m = prepare_fx(
original_m_copy,
qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_config)
# calibration
m(*example_inputs)
self.checkGraphModuleNodes(m, expected_node_occurrence=prepare_count_check)
self.checkGraphModuleNodes(m.standalone, expected_node_occurrence=standalone_prepare_count_check)
# check converted/quantized model
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node_occurrence=convert_count_check)
self.checkGraphModuleNodes(m.standalone, expected_node_occurrence=standalone_convert_count_check)
res = m(*example_inputs)
# quantize the reference model
ref_m = prepare_fx(
original_ref_m_copy,
qconfig_dict,
example_inputs=example_inputs,
)
ref_m(*example_inputs)
ref_m = convert_fx(ref_m)
ref_res = ref_m(*example_inputs)
self.assertEqual(res, ref_res)
def test_standalone_module_float_interface(self):
float_interface_config = {
"input_quantized_idxs": [], # float input
"output_quantized_idxs": [], # float output
}
interface_config = float_interface_config
# input and output of first conv, observer for standalone module
# will be inserted in the standalone module itself
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
}
# for input and output of conv in the standalone module
standalone_prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Conv2d) : 1,
ns.call_method("dequantize") : 1,
}
standalone_convert_count_check = {
# standalone module will take float as input and output
# so we'll see quantize and dequantize in the modoule
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Conv2d): 1,
ns.call_method("dequantize") : 1,
}
self._test_standalone_module(
interface_config,
prepare_count_check,
standalone_prepare_count_check,
convert_count_check,
standalone_convert_count_check)
def test_standalone_module_quantized_interface(self):
quantized_interface_config = {
"input_quantized_idxs": [0], # quantized input
"output_quantized_idxs": [0], # quantized output
}
interface_config = quantized_interface_config
# observer for input and output of first conv
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
}
# for output of conv in the standalone module
standalone_prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 1
}
convert_count_check = {
# quantizing input for conv
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Conv2d) : 1,
# dequantizing output of standalone module
ns.call_method("dequantize") : 1,
}
standalone_convert_count_check = {
# quantization of input happens in parent module
# quantization of output happens in the quantized conv module
ns.call_function(torch.quantize_per_tensor) : 0,
ns.call_module(nnq.Conv2d): 1,
# dequantization for output happens in parent module
ns.call_method("dequantize") : 0,
}
self._test_standalone_module(
interface_config,
prepare_count_check,
standalone_prepare_count_check,
convert_count_check,
standalone_convert_count_check)
@skipIfNoFBGEMM
def test_qconfig_none(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"": default_qconfig,
"module_name": [("conv2", None)]}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_module(nn.Conv2d),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_module_type(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.linear = nn.Linear(9, 3)
def forward(self, x):
x = self.conv(x)
x = x.reshape((1, -1))
x = self.linear(x)
return x
m = M().eval()
qconfig_dict = {"object_type": [(torch.nn.Conv2d, default_qconfig)]}
example_inputs = (torch.randn(1, 1, 3, 3),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# conv is quantized, linear is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_qat_module_type(self):
class LinearRelu(nn.Sequential):
def __init__(self) -> None:
super().__init__(
nn.Linear(5, 5),
nn.ReLU(),
)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin_relu = LinearRelu()
self.linear = nn.Linear(5, 5)
def forward(self, x):
x = self.lin_relu(x)
x = self.linear(x)
return x
model = M().train()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.Linear, default_qat_qconfig),
(torch.nn.ReLU, default_qat_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nniq.LinearReLU),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_function(self):
class M(torch.nn.Module):
def forward(self, x, y):
return x + y
m = M().eval()
qconfig_dict = {"object_type": [(operator.add, default_qconfig)]}
data = torch.randn(1, 1, 1, 1)
example_inputs = (data, data)
m = prepare_fx(m, qconfig_dict, example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_module_name_regex(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"module_name_regex": [("conv*", default_qconfig)]}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
# first conv is quantized, second conv is not quantized
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_precedence(self):
for device in get_supported_device_types():
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(1, 1)
self.conv = nn.Conv2d(1, 1, 1)
self.module_conv1 = nn.Conv2d(1, 1, 1)
self.module_conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
# global
x = self.linear(x)
# global + object_type --> object_type
x = self.conv(x)
# global + object_type + module_name_regex --> module_name_regex
x = self.module_conv1(x)
# global + object_type + module_name_regex + module_name --> module_name
x = self.module_conv2(x)
return x
m = M().to(device).eval()
global_qconfig = default_qconfig
object_type_qconfig = default_dynamic_qconfig
module_name_regex_qconfig = float16_dynamic_qconfig
module_name_qconfig = default_qat_qconfig
qconfig_dict = {
"": global_qconfig,
"object_type": [(nn.Conv2d, object_type_qconfig)],
"module_name_regex": [("module_conv*", module_name_regex_qconfig)],
"module_name": [("module_conv2", module_name_qconfig)]}
m_prep = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1),))
self.assertEqual(m_prep.linear.qconfig.activation.p.func, global_qconfig.activation.p.func)
self.assertEqual(m_prep.linear.qconfig.weight.p.func, global_qconfig.weight.p.func)
self.assertEqual(m_prep.conv.qconfig.activation.p.func, object_type_qconfig.activation.p.func)
self.assertEqual(m_prep.conv.qconfig.weight.p.func, object_type_qconfig.weight.p.func)
self.assertEqual(m_prep.module_conv1.qconfig.activation.p.func, module_name_regex_qconfig.activation.p.func)
self.assertEqual(m_prep.module_conv1.qconfig.weight.p.func, module_name_regex_qconfig.weight.p.func)
self.assertEqual(m_prep.module_conv2.qconfig.activation.p.func, module_name_qconfig.activation.p.func)
self.assertEqual(m_prep.module_conv2.qconfig.weight.p.func, module_name_qconfig.weight.p.func)
def test_qconfig_module_name_object_type_order(self):
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
return x
class M2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
self.m1 = M1()
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
x = self.m1(x)
return x
class M3(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
self.m2 = M2()
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
x = self.m2(x)
return x
m = M3().eval()
qconfig_dict = {
"module_name_object_type_order": [
# test various FQNs: global, single child, multiple children
("", nn.Linear, 0, torch.ao.quantization.default_qconfig),
("", torch.add, 0, torch.ao.quantization.default_qconfig),
("m2", nn.Linear, 1, torch.ao.quantization.default_qconfig),
("m2", torch.add, 1, torch.ao.quantization.default_qconfig),
("m2.m1", nn.Linear, 0, torch.ao.quantization.default_qconfig),
("m2.m1", torch.add, 0, torch.ao.quantization.default_qconfig),
],
}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
# m3
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.add),
# m2
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_function(torch.add),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
# m1
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.add),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
# test that function order overrides global qconfig
class M4(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(1, 1)
self.fc2 = nn.Linear(1, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = torch.add(x, x)
x = torch.add(x, x)
return x
m = M4().eval()
qconfig_dict = {
"": torch.ao.quantization.default_qconfig,
"module_name_object_type_order": [
("", nn.Linear, 1, None),
("", torch.add, 1, None),
],
}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
ns.call_module(nn.Linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.add),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
@override_qengines
def test_qconfig_dict_with_fused_modules(self):
class LinearReLUModel(torch.nn.Module):
def __init__(self, relu):
super().__init__()
self.linear = torch.nn.Linear(3, 3)
self.relu = relu
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class ConvReLUModel(torch.nn.Module):
def __init__(self, relu):
super().__init__()
self.conv = torch.nn.Conv1d(3, 3, 3)
self.relu = relu
def forward(self, x):
x = self.conv(x)
x = self.relu(x)
return x
class ConvBnReLUModel(torch.nn.Module):
def __init__(self, relu):
super().__init__()
self.conv = torch.nn.Conv1d(3, 3, 3)
self.bn = torch.nn.BatchNorm1d(3)
self.relu = relu
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
for model in [LinearReLUModel, ConvReLUModel, ConvBnReLUModel]:
for relu in [torch.nn.ReLU(), torch.nn.functional.relu, torch.relu]:
m = model(relu).eval()
qengine = torch.backends.quantized.engine
qconfig_dict = torch.ao.quantization.get_default_qconfig_mapping(qengine)
# should not crash as in https://github.com/pytorch/pytorch/issues/75825
prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 3, 3, 3),))
# TODO: move QConfigMapping tests to test/quantization/core
def test_qconfig_mapping_set_global(self):
qconfig = get_default_qconfig()
qconfig_mapping = QConfigMapping()
self.assertEqual(qconfig_mapping.global_qconfig, None)
qconfig_mapping.set_global(qconfig)
self.assertEqual(qconfig_mapping.global_qconfig, qconfig)
def test_qconfig_mapping_set_object_type(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.object_type_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_object_type(torch.nn.Linear, qconfig1)
qconfig_mapping.set_object_type(torch.nn.ReLU, qconfig2)
self.assertEqual(len(qconfig_mapping.object_type_qconfigs), 2)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.Linear], qconfig1)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.ReLU], qconfig2)
# Override existing key
qconfig_mapping.set_object_type(torch.nn.Linear, qconfig3)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.Linear], qconfig3)
self.assertEqual(qconfig_mapping.object_type_qconfigs[torch.nn.ReLU], qconfig2)
self.assertEqual(_get_object_type_qconfig(qconfig_mapping, torch.nn.Linear, None), qconfig3)
self.assertEqual(_get_object_type_qconfig(qconfig_mapping, torch.nn.ReLU, None), qconfig2)
self.assertEqual(_get_object_type_qconfig(qconfig_mapping, "nomatch", None), None)
def test_qconfig_mapping_set_module_name_regex(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.module_name_regex_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_module_name_regex("foo.*bar", qconfig1)
qconfig_mapping.set_module_name_regex("foo.*", qconfig2)
self.assertEqual(len(qconfig_mapping.module_name_regex_qconfigs), 2)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*bar"], qconfig1)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*"], qconfig2)
# Override existing key
qconfig_mapping.set_module_name_regex("foo.*bar", qconfig3)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*bar"], qconfig3)
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs["foo.*"], qconfig2)
self.assertEqual(_get_module_name_regex_qconfig(qconfig_mapping, "foo123bar", None), qconfig3)
self.assertEqual(_get_module_name_regex_qconfig(qconfig_mapping, "foobar", None), qconfig3)
self.assertEqual(_get_module_name_regex_qconfig(qconfig_mapping, "foobaz", None), qconfig2)
self.assertEqual(_get_module_name_regex_qconfig(qconfig_mapping, "foo", None), qconfig2)
self.assertEqual(_get_module_name_regex_qconfig(qconfig_mapping, "nomatch", None), None)
def test_qconfig_mapping_set_module_name(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.module_name_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_module_name("mod1", qconfig1)
qconfig_mapping.set_module_name("mod2", qconfig2)
self.assertEqual(len(qconfig_mapping.module_name_qconfigs), 2)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod1"], qconfig1)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod2"], qconfig2)
# Override existing key
qconfig_mapping.set_module_name("mod1", qconfig3)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod1"], qconfig3)
self.assertEqual(qconfig_mapping.module_name_qconfigs["mod2"], qconfig2)
self.assertEqual(_get_module_name_qconfig(qconfig_mapping, "mod1", None), qconfig3)
self.assertEqual(_get_module_name_qconfig(qconfig_mapping, "mod2", None), qconfig2)
self.assertEqual(_get_module_name_qconfig(qconfig_mapping, "nomatch", None), None)
def test_qconfig_mapping_set_module_name_object_type_order(self):
qconfig1 = get_default_qconfig()
qconfig2 = get_default_qconfig()
qconfig3 = get_default_qconfig()
self.assertNotEqual(qconfig1, qconfig2)
self.assertNotEqual(qconfig1, qconfig3)
qconfig_mapping = QConfigMapping()
self.assertEqual(len(qconfig_mapping.module_name_object_type_order_qconfigs), 0)
# Insert some entries
qconfig_mapping.set_module_name_object_type_order("mod1", torch.nn.Linear, 0, qconfig1)
qconfig_mapping.set_module_name_object_type_order("mod2", torch.nn.ReLU, 1, qconfig2)
self.assertEqual(len(qconfig_mapping.module_name_object_type_order_qconfigs), 2)
key1 = ("mod1", torch.nn.Linear, 0)
key2 = ("mod2", torch.nn.ReLU, 1)
self.assertEqual(next(iter(qconfig_mapping.module_name_object_type_order_qconfigs)), key1)
self.assertEqual(list(qconfig_mapping.module_name_object_type_order_qconfigs)[1], key2)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key1], qconfig1)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key2], qconfig2)
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod1", torch.nn.Linear, 0, None), qconfig1)
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod2", torch.nn.ReLU, 1, None), qconfig2)
# Override existing key
qconfig_mapping.set_module_name_object_type_order("mod1", torch.nn.Linear, 0, qconfig3)
self.assertEqual(len(qconfig_mapping.module_name_object_type_order_qconfigs), 2)
self.assertEqual(next(iter(qconfig_mapping.module_name_object_type_order_qconfigs)), key1)
self.assertEqual(list(qconfig_mapping.module_name_object_type_order_qconfigs)[1], key2)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key1], qconfig3)
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs[key2], qconfig2)
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod1", torch.nn.Linear, 0, None), qconfig3)
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod2", torch.nn.ReLU, 1, None), qconfig2)
# No match
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod123", torch.nn.Linear, 0, None), None)
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod1", torch.nn.Linear, 35, None), None)
self.assertEqual(_maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_mapping, "mod2", torch.nn.Conv2d, 1, None), None)
def _get_qconfig_dict_for_qconfig_mapping_test(self, global_qconfig, qconfig1, qconfig2):
"""
Return a dummy qconfig_dict to test QConfigMapping's to_dict and from_dict methods.
"""
return {
_GLOBAL_DICT_KEY: global_qconfig,
_OBJECT_TYPE_DICT_KEY: [
(torch.nn.Linear, qconfig1),
(torch.nn.ReLU, qconfig2),
],
_MODULE_NAME_REGEX_DICT_KEY: [
("foo.*bar", qconfig1),
("foo.*", qconfig2),
],
_MODULE_NAME_DICT_KEY: [
("bazbaz", qconfig1),
("borbor", qconfig2),
],
_MODULE_NAME_OBJECT_TYPE_ORDER_DICT_KEY: [
("bazbaz", torch.nn.Linear, 0, qconfig1),
("foofoo", torch.nn.ReLU, 1, qconfig2),
],
}
with self.assertRaises(ValueError) as context:
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 3, 3, 3),)) # noqa: F821
self.assertTrue(
'Expected qconfig_dict to have the following keys:' in str(context.exception)
)
self.assertTrue('But found \'object_typo\' instead.' in str(context.exception))
def test_qconfig_mapping_from_dict(self):
global_qconfig = QConfig(123, "global")
qconfig1 = QConfig(1, "one")
qconfig2 = QConfig(2, "two")
qconfig_dict = self._get_qconfig_dict_for_qconfig_mapping_test(global_qconfig, qconfig1, qconfig2)
qconfig_dict["undefined_dict_key"] = [(123, qconfig1), (234, qconfig2)]
qconfig_mapping = QConfigMapping.from_dict(qconfig_dict)
self.assertEqual(qconfig_mapping.global_qconfig, global_qconfig)
self.assertEqual(qconfig_mapping.object_type_qconfigs, OrderedDict({
torch.nn.Linear: qconfig1,
torch.nn.ReLU: qconfig2,
}))
self.assertEqual(qconfig_mapping.module_name_regex_qconfigs, OrderedDict({
"foo.*bar": qconfig1,
"foo.*": qconfig2,
}))
self.assertEqual(qconfig_mapping.module_name_qconfigs, OrderedDict({
"bazbaz": qconfig1,
"borbor": qconfig2,
}))
self.assertEqual(qconfig_mapping.module_name_object_type_order_qconfigs, OrderedDict({
("bazbaz", torch.nn.Linear, 0): qconfig1,
("foofoo", torch.nn.ReLU, 1): qconfig2,
}))
def test_qconfig_mapping_to_dict(self):
global_qconfig = QConfig(123, "global")
qconfig1 = QConfig(1, "one")
qconfig2 = QConfig(2, "two")
qconfig_mapping = QConfigMapping().set_global(global_qconfig) \
.set_object_type(torch.nn.Linear, qconfig1) \
.set_object_type(torch.nn.ReLU, qconfig2) \
.set_module_name_regex("foo.*bar", qconfig1) \
.set_module_name_regex("foo.*", qconfig2) \
.set_module_name("bazbaz", qconfig1) \
.set_module_name("borbor", qconfig2) \
.set_module_name_object_type_order("bazbaz", torch.nn.Linear, 0, qconfig1) \
.set_module_name_object_type_order("foofoo", torch.nn.ReLU, 1, qconfig2)
qconfig_dict = self._get_qconfig_dict_for_qconfig_mapping_test(global_qconfig, qconfig1, qconfig2)
self.assertEqual(qconfig_mapping.to_dict(), qconfig_dict)
def test_qconfig_mapping_repr(self):
self.assertTrue(isinstance(get_default_qconfig_mapping().__repr__(), str))
def test_default_qconfig_mapping_override_global(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
m = M().eval()
my_qconfig = QConfig(activation=MinMaxObserver, weight=default_weight_observer)
qconfig_mapping = get_default_qconfig_mapping()
# Override global qconfig
old_global_qconfig = qconfig_mapping.global_qconfig
qconfig_mapping.set_global(my_qconfig)
# Verify the correct qconfig was used
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_mapping, example_inputs)
self.assertTrue(isinstance(old_global_qconfig.activation(), HistogramObserver))
self.assertTrue(isinstance(my_qconfig.activation(), MinMaxObserver))
self.assertTrue(hasattr(m, "activation_post_process_0"))
self.assertTrue(hasattr(m, "activation_post_process_1"))
self.assertTrue(isinstance(m.activation_post_process_0, MinMaxObserver))
self.assertTrue(isinstance(m.activation_post_process_1, MinMaxObserver))
# Dummy classes for PrepareCustomConfig testing
class _DummyStandaloneModule:
pass
class _DummyFloatModule:
pass
class _DummyObservedModule:
pass
class _DummyQuantizedModule:
pass
class _DummyNonTraceableModule1:
pass
class _DummyNonTraceableModule2:
pass
def test_prepare_custom_config_set_standalone_module_name(self):
qconfig_mapping = QConfigMapping()
example_inputs = (torch.randn(3),)
child_prepare_custom_config = PrepareCustomConfig()
backend_config = BackendConfig("my_backend")
config_entry = StandaloneModuleConfigEntry(
qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.standalone_module_names), 0)
prepare_custom_config.set_standalone_module_name(
"module1", qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
self.assertEqual(list(prepare_custom_config.standalone_module_names.keys()), ["module1"])
self.assertEqual(prepare_custom_config.standalone_module_names["module1"], config_entry)
def test_prepare_custom_config_set_standalone_module_class(self):
qconfig_mapping = QConfigMapping()
example_inputs = (torch.randn(3),)
child_prepare_custom_config = PrepareCustomConfig()
backend_config = BackendConfig("my_backend")
config_entry = StandaloneModuleConfigEntry(
qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.standalone_module_classes), 0)
prepare_custom_config.set_standalone_module_class(
self._DummyStandaloneModule, qconfig_mapping, example_inputs, child_prepare_custom_config, backend_config)
self.assertEqual(len(prepare_custom_config.standalone_module_classes), 1)
self.assertTrue(self._DummyStandaloneModule in prepare_custom_config.standalone_module_classes)
self.assertEqual(prepare_custom_config.standalone_module_classes[self._DummyStandaloneModule], config_entry)
def test_prepare_custom_config_set_float_to_observed_mapping(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping), 0)
prepare_custom_config.set_float_to_observed_mapping(self._DummyFloatModule, self._DummyObservedModule, QuantType.STATIC)
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping), 1)
self.assertEqual(list(prepare_custom_config.float_to_observed_mapping.keys()), [QuantType.STATIC])
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC]), 1)
self.assertTrue(self._DummyFloatModule in prepare_custom_config.float_to_observed_mapping[QuantType.STATIC])
self.assertEqual(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC][self._DummyFloatModule],
self._DummyObservedModule)
def test_prepare_custom_config_set_non_traceable_module_names(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.non_traceable_module_names), 0)
prepare_custom_config.set_non_traceable_module_names(["module1", "module2"])
self.assertEqual(prepare_custom_config.non_traceable_module_names, ["module1", "module2"])
def test_prepare_custom_config_set_non_traceable_module_classes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.non_traceable_module_classes), 0)
prepare_custom_config.set_non_traceable_module_classes([self._DummyNonTraceableModule1, self._DummyNonTraceableModule2])
self.assertEqual(prepare_custom_config.non_traceable_module_classes,
[self._DummyNonTraceableModule1, self._DummyNonTraceableModule2])
def test_prepare_custom_config_set_input_quantized_indexes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.input_quantized_indexes), 0)
prepare_custom_config.set_input_quantized_indexes([0, 1])
self.assertEqual(prepare_custom_config.input_quantized_indexes, [0, 1])
def test_prepare_custom_config_set_output_quantized_indexes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.output_quantized_indexes), 0)
prepare_custom_config.set_output_quantized_indexes([0, 1])
self.assertEqual(prepare_custom_config.output_quantized_indexes, [0, 1])
def test_prepare_custom_config_set_preserved_attributes(self):
prepare_custom_config = PrepareCustomConfig()
self.assertEqual(len(prepare_custom_config.preserved_attributes), 0)
prepare_custom_config.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(prepare_custom_config.preserved_attributes, ["attr1", "attr2"])
def _get_dummy_prepare_custom_config_dict(self):
"""
Return a dummy prepare_custom_config_dict to test PrepareCustomConfig's to_dict and from_dict methods.
"""
return {
STANDALONE_MODULE_NAME_DICT_KEY: [(
"module1",
QConfigMapping(),
(torch.randn(3),),
PrepareCustomConfig(),
BackendConfig("my_backend"),
)],
STANDALONE_MODULE_CLASS_DICT_KEY: [(
self._DummyStandaloneModule,
QConfigMapping(),
(torch.randn(10),),
PrepareCustomConfig(),
BackendConfig("my_backend"),
)],
FLOAT_TO_OBSERVED_DICT_KEY: {
"static": {
self._DummyFloatModule: self._DummyObservedModule
},
},
NON_TRACEABLE_MODULE_NAME_DICT_KEY: ["module2", "module3"],
NON_TRACEABLE_MODULE_CLASS_DICT_KEY: [self._DummyNonTraceableModule1, self._DummyNonTraceableModule2],
INPUT_QUANTIZED_INDEXES_DICT_KEY: [0, 1],
OUTPUT_QUANTIZED_INDEXES_DICT_KEY: [0, 1],
PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]
}
def test_prepare_custom_config_from_dict(self):
prepare_custom_config_dict = self._get_dummy_prepare_custom_config_dict()
(sm_name, qm1, ei1, pcc1, bcd1) = prepare_custom_config_dict[STANDALONE_MODULE_NAME_DICT_KEY][0]
(sm_class, qm2, ei2, pcc2, bcd2) = prepare_custom_config_dict[STANDALONE_MODULE_CLASS_DICT_KEY][0]
sm_config_entry1 = StandaloneModuleConfigEntry(qm1, ei1, pcc1, bcd1)
sm_config_entry2 = StandaloneModuleConfigEntry(qm2, ei2, pcc2, bcd2)
prepare_custom_config = PrepareCustomConfig.from_dict(prepare_custom_config_dict)
# Standalone modules
self.assertEqual(len(prepare_custom_config.standalone_module_names), 1)
self.assertTrue(sm_name in prepare_custom_config.standalone_module_names)
self.assertEqual(prepare_custom_config.standalone_module_names[sm_name], sm_config_entry1)
self.assertEqual(len(prepare_custom_config.standalone_module_classes), 1)
self.assertTrue(sm_class in prepare_custom_config.standalone_module_classes)
self.assertEqual(prepare_custom_config.standalone_module_classes[sm_class], sm_config_entry2)
# Float to observed mapping
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping), 1)
self.assertEqual(list(prepare_custom_config.float_to_observed_mapping.keys()), [QuantType.STATIC])
self.assertEqual(len(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC]), 1)
self.assertTrue(self._DummyFloatModule in prepare_custom_config.float_to_observed_mapping[QuantType.STATIC])
self.assertEqual(prepare_custom_config.float_to_observed_mapping[QuantType.STATIC][self._DummyFloatModule],
self._DummyObservedModule)
# Other
self.assertEqual(prepare_custom_config.non_traceable_module_names, ["module2", "module3"])
self.assertEqual(prepare_custom_config.non_traceable_module_classes,
[self._DummyNonTraceableModule1, self._DummyNonTraceableModule2])
self.assertEqual(prepare_custom_config.input_quantized_indexes, [0, 1])
self.assertEqual(prepare_custom_config.output_quantized_indexes, [0, 1])
self.assertEqual(prepare_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_prepare_custom_config_to_dict(self):
prepare_custom_config_dict = self._get_dummy_prepare_custom_config_dict()
(sm_name, qm1, ei1, pcc1, bcd1) = prepare_custom_config_dict[STANDALONE_MODULE_NAME_DICT_KEY][0]
(sm_class, qm2, ei2, pcc2, bcd2) = prepare_custom_config_dict[STANDALONE_MODULE_CLASS_DICT_KEY][0]
prepare_custom_config = PrepareCustomConfig() \
.set_standalone_module_name(sm_name, qm1, ei1, pcc1, bcd1) \
.set_standalone_module_class(sm_class, qm2, ei2, pcc2, bcd2) \
.set_float_to_observed_mapping(self._DummyFloatModule, self._DummyObservedModule) \
.set_non_traceable_module_names(["module2", "module3"]) \
.set_non_traceable_module_classes([self._DummyNonTraceableModule1, self._DummyNonTraceableModule2]) \
.set_input_quantized_indexes([0, 1]) \
.set_output_quantized_indexes([0, 1]) \
.set_preserved_attributes(["attr1", "attr2"])
# PrepareCustomConfig.to_dict also converts internal QConfigMappings and PrepareCustomConfigs to dicts
prepare_custom_config_dict[STANDALONE_MODULE_NAME_DICT_KEY][0] = (sm_name, qm1.to_dict(), ei1, pcc1.to_dict(), bcd1)
prepare_custom_config_dict[STANDALONE_MODULE_CLASS_DICT_KEY][0] = (sm_class, qm2.to_dict(), ei2, pcc2.to_dict(), bcd2)
self.assertEqual(prepare_custom_config.to_dict(), prepare_custom_config_dict)
def test_convert_custom_config_set_observed_to_quantized_mapping(self):
convert_custom_config = ConvertCustomConfig()
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping), 0)
convert_custom_config.set_observed_to_quantized_mapping(
self._DummyObservedModule, self._DummyQuantizedModule, QuantType.STATIC)
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping), 1)
self.assertEqual(list(convert_custom_config.observed_to_quantized_mapping.keys()), [QuantType.STATIC])
self.assertTrue(self._DummyObservedModule in convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC])
self.assertEqual(convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC][self._DummyObservedModule],
self._DummyQuantizedModule)
def test_convert_custom_config_set_preserved_attributes(self):
convert_custom_config = ConvertCustomConfig()
self.assertEqual(len(convert_custom_config.preserved_attributes), 0)
convert_custom_config.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(convert_custom_config.preserved_attributes, ["attr1", "attr2"])
def _get_dummy_convert_custom_config_dict(self):
"""
Return a dummy convert_custom_config_dict to test ConvertCustomConfig's to_dict and from_dict methods.
"""
return {
OBSERVED_TO_QUANTIZED_DICT_KEY: {
"static": {
self._DummyObservedModule: self._DummyQuantizedModule
},
},
PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]
}
def test_convert_custom_config_from_dict(self):
convert_custom_config_dict = self._get_dummy_convert_custom_config_dict()
convert_custom_config = ConvertCustomConfig.from_dict(convert_custom_config_dict)
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping), 1)
self.assertEqual(list(convert_custom_config.observed_to_quantized_mapping.keys()), [QuantType.STATIC])
self.assertEqual(len(convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC]), 1)
self.assertTrue(self._DummyObservedModule in convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC])
self.assertEqual(convert_custom_config.observed_to_quantized_mapping[QuantType.STATIC][self._DummyObservedModule],
self._DummyQuantizedModule)
self.assertEqual(convert_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_convert_custom_config_to_dict(self):
convert_custom_config = ConvertCustomConfig() \
.set_observed_to_quantized_mapping(self._DummyObservedModule, self._DummyQuantizedModule) \
.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(convert_custom_config.to_dict(), self._get_dummy_convert_custom_config_dict())
def test_fuse_custom_config_set_preserved_attributes(self):
fuse_custom_config = FuseCustomConfig()
self.assertEqual(len(fuse_custom_config.preserved_attributes), 0)
fuse_custom_config.set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(fuse_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_fuse_custom_config_from_dict(self):
fuse_custom_config_dict = {PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]}
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config_dict)
self.assertEqual(fuse_custom_config.preserved_attributes, ["attr1", "attr2"])
def test_fuse_custom_config_to_dict(self):
fuse_custom_config_dict = {PRESERVED_ATTRIBUTES_DICT_KEY: ["attr1", "attr2"]}
fuse_custom_config = FuseCustomConfig().set_preserved_attributes(["attr1", "attr2"])
self.assertEqual(fuse_custom_config.to_dict(), fuse_custom_config_dict)
def test_remove_qconfig(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.avg_pool = torch.nn.AvgPool2d(1)
def forward(self, x):
return self.avg_pool(x)
m = M().eval()
qconfig_dict = {'': default_qconfig}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
m(*example_inputs)
for name, module in m.named_modules():
self.assertFalse(hasattr(module, 'qconfig'),
'qconfig is not removed for ' + name)
def test_return_none(self):
class M(torch.nn.Module):
def forward(self, x):
pass
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1),))
m = convert_fx(m)
def test_default_quant_after_none_qconfig(self):
""" Make sure default quant is inserted properly"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = x.transpose(1, 2)
x = self.conv2(x)
m = M().eval()
qconfig_dict = {
"": default_qconfig,
"module_name": [
("conv1", None)
]
}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1),))
m = convert_fx(m)
def test_qconfig_for_call_method(self):
class Sub(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = x.transpose(2, 3)
x = self.conv(x)
return x.transpose(2, 3)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = Sub()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.sub(x)
x = self.conv2(x)
return x.transpose(2, 3)
qconfig_dict1 = {"": default_qconfig, "module_name": [("sub", None)]}
# since sub is configured to have qconfig None, we should dequantize the output
# of self.conv1 and quantize the input of self.conv2
# dequantize after conv2 should happen after transpose since
# it is configured with default_qconfig
# nodes in Sub module instance is not quantized
node_list1 = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_method("transpose"),
ns.call_module(nn.Conv2d),
ns.call_method("transpose"),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("transpose"),
ns.call_method("dequantize")
]
qconfig_dict2 = {"": None, "module_name": [("sub", default_qconfig)]}
# Only nodes in Sub module instance are quantized
# the first transpose is not quantized because the input is not quantized
node_list2 = [
ns.call_module(nn.Conv2d),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("transpose"),
ns.call_module(nnq.Conv2d),
ns.call_method("transpose"),
ns.call_method("dequantize"),
ns.call_module(nn.Conv2d),
ns.call_method("transpose"),
]
for qconfig_dict, node_list in [
(qconfig_dict1, node_list1),
(qconfig_dict2, node_list2)
]:
example_inputs = (torch.randn(2, 1, 3, 3),)
m = M().eval()
m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
m(torch.randn(2, 1, 3, 3))
m = convert_fx(m)
self.checkGraphModuleNodes(m, expected_node_list=node_list)
# make sure it runs
m(*example_inputs)
def test_qconfig_for_call_func(self):
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
example_inputs = (torch.rand(5, 5),)
qconfig_dict = {"": default_qconfig, "module_name": [("mods2", None)]}
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear),
ns.call_method('dequantize'),
ns.call_function(torch.nn.functional.linear)
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
m(torch.rand(5, 5))
def test_preserve_attributes(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
m = M()
m.eval()
m.preserved_attr = 3
prepare_custom_config_dict = {
"preserved_attributes": ["preserved_attr"]
}
example_inputs = (torch.randn(1, 1, 1, 1),)
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
def assertAttrPreserved(m):
self.assertTrue(hasattr(m, "preserved_attr"))
self.assertEqual(m.preserved_attr, 3)
assertAttrPreserved(m)
convert_custom_config_dict = {
"preserved_attributes": ["preserved_attr"]
}
m = convert_fx(m, convert_custom_config=convert_custom_config_dict)
assertAttrPreserved(m)
@skipIfNoFBGEMM
def test_qat_and_script(self):
model = LinearModelWithSubmodule().train()
qengine = torch.backends.quantized.engine
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig(qengine)}
x = torch.randn(5, 5)
example_inputs = (x,)
model = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
# ensure scripting works
scripted = torch.jit.script(model)
# run one round to make sure model runs
scripted(x)
FileCheck().check_count('FakeQuantize = prim::GetAttr[name="', 4, exactly=True) \
.run(scripted.graph)
# disable fake_quant and observer
for epoch in range(3):
if epoch == 1:
scripted.apply(torch.ao.quantization.disable_observer)
if epoch == 2:
scripted.apply(torch.ao.quantization.disable_fake_quant)
# ensure the fake_quant and observer have been disabled.
matches = ['.fake_quant_enabled', '.observer_enabled']
for key, v in scripted.state_dict().items():
if any(x in key for x in matches):
self.assertEqual(v, torch.tensor([0], dtype=torch.int64))
# enable them back
scripted.apply(torch.ao.quantization.enable_fake_quant)
scripted.apply(torch.ao.quantization.enable_observer)
for key, v in scripted.state_dict().items():
if any(x in key for x in matches):
self.assertEqual(v, torch.tensor([1], dtype=torch.int64))
@skipIfNoFBGEMM
def test_save_observer_state_dict(self):
orig = LinearModelWithSubmodule().eval()
model = orig
qconfig_dict = {'': torch.ao.quantization.get_default_qconfig('fbgemm')}
x = torch.randn(5, 5)
model = prepare_fx(model, qconfig_dict, example_inputs=(x,))
# run it through input
model(x)
# save state_dict of model
obs_dict = torch.ao.quantization.get_observer_state_dict(model)
quant = convert_fx(model)
b = io.BytesIO()
torch.save(obs_dict, b)
# Load the stats into new model
for weights_only in [True, False]:
b.seek(0)
model_2 = orig
model_2 = prepare_fx(model_2, qconfig_dict, example_inputs=(x,))
loaded_dict = torch.load(b, weights_only=weights_only)
torch.ao.quantization.load_observer_state_dict(model_2, loaded_dict)
quant_2 = convert_fx(model_2)
# Verify that loaded state dict produces same results.
self.assertEqual(quant(x), quant_2(x))
@skipIfNoFBGEMM
def test_custom_module_class(self):
class CustomModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.linear)
observed.qconfig = float_module.qconfig
return observed
class StaticQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.linear.activation_post_process = \
observed_module.activation_post_process
quantized = cls(nnq.Linear.from_float(observed_module.linear))
return quantized
class DynamicQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
observed_module.linear.qconfig = observed_module.qconfig
quantized = cls(nnqd.Linear.from_float(observed_module.linear))
return quantized
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
self.custom = CustomModule()
def forward(self, x):
x = self.linear(x)
x = self.custom(x)
return x
class RefM(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(3, 3)
self.linear2 = torch.nn.Linear(3, 3)
def forward(self, x):
x = self.linear1(x)
x = self.linear2(x)
return x
# instantiate M and RefM and align the parameters
original_m = M().eval()
original_ref_m = RefM().eval()
original_ref_m.linear1.weight = torch.nn.Parameter(original_m.linear.weight.detach())
original_ref_m.linear1.bias = torch.nn.Parameter(original_m.linear.bias.detach())
original_ref_m.linear2.weight = torch.nn.Parameter(original_m.custom.linear.weight.detach())
original_ref_m.linear2.bias = torch.nn.Parameter(original_m.custom.linear.bias.detach())
a16_qconfig = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.qint32, quant_min=0, quant_max=65536),
weight=default_weight_observer,
)
test_configs = {
"static": (default_qconfig, StaticQuantCustomModule, 3),
"static_a16": (a16_qconfig, StaticQuantCustomModule, 3),
"dynamic": (default_dynamic_qconfig, DynamicQuantCustomModule, 0)
}
for quant_type in [QuantType.STATIC, QuantType.DYNAMIC]:
key = _get_quant_type_to_str(quant_type)
qconfig, quantized_module_class, num_observers = test_configs[key]
qconfig_dict = {"": qconfig}
if key == "static":
prepare_custom_config_dict = {
"float_to_observed_custom_module_class": {
"static": {
CustomModule: ObservedCustomModule
}
}
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"static": {
ObservedCustomModule: quantized_module_class
}
}
}
else:
prepare_custom_config_dict = {
"non_traceable_module_class": [
CustomModule
]
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"dynamic": {
CustomModule: quantized_module_class
}
}
}
example_inputs = (torch.randn(3, 3),)
# check prepared model
m = prepare_fx(
copy.deepcopy(original_m),
qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
# calibration
m(*example_inputs)
# all activation observers are inserted in the top level module
count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): num_observers
}
self.checkGraphModuleNodes(m, expected_node_occurrence=count_check)
# check converted/quantized model
m = convert_fx(
m,
convert_custom_config=convert_custom_config_dict)
if quant_type == QuantType.STATIC:
count_check = {
ns.call_function(torch.quantize_per_tensor) : 1,
ns.call_module(nnq.Linear) : 1,
ns.call_method('dequantize') : 1,
}
self.checkGraphModuleNodes(m, expected_node_occurrence=count_check)
self.assertEqual(type(m.custom), quantized_module_class)
res = m(*example_inputs)
# quantize the reference model
ref_m = prepare_fx(
copy.deepcopy(original_ref_m), qconfig_dict, example_inputs=example_inputs)
ref_m(*example_inputs)
ref_m = convert_fx(ref_m)
ref_res = ref_m(*example_inputs)
self.assertEqual(res, ref_res)
@skipIfNoFBGEMM
def test_custom_module_class_input_has_multiple_users(self):
""" Tests that the flow still works when the input of custom module
has multiple users
"""
class CustomModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.linear)
observed.qconfig = float_module.qconfig
return observed
class StaticQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.linear.activation_post_process = \
observed_module.activation_post_process
quantized = cls(nnq.Linear.from_float(observed_module.linear))
return quantized
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
self.custom = CustomModule()
def forward(self, x0):
x1 = self.custom(x0)
x2 = self.linear(x0)
return x1 + x2
prepare_custom_config_dict = {
"float_to_observed_custom_module_class": {
"static": {
CustomModule: ObservedCustomModule
}
}
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"static": {
ObservedCustomModule: StaticQuantCustomModule
}
}
}
m = M().eval()
example_inputs = (torch.randn(3, 3),)
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
# make sure it works
m = convert_fx(
m,
convert_custom_config=convert_custom_config_dict)
# make sure it runs
m(*example_inputs)
@skipIfNoFBGEMM
def test_custom_module_class_input_has_duplicate_nodes(self):
""" Tests that the flow still works when the graph has
multiple nodes with the same custom module target.
"""
class CustomModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(3, 3)
def forward(self, x):
return self.linear(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.linear)
observed.qconfig = float_module.qconfig
return observed
class StaticQuantCustomModule(torch.nn.Module):
def __init__(self, linear):
super().__init__()
self.linear = linear
def forward(self, x):
return self.linear(x)
@classmethod
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.linear.activation_post_process = \
observed_module.activation_post_process
quantized = cls(nnq.Linear.from_float(observed_module.linear))
return quantized
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.custom = CustomModule()
def forward(self, x0):
x1 = self.custom(x0)
x2 = self.custom(x0)
return x1 + x2
prepare_custom_config_dict = {
"float_to_observed_custom_module_class": {
"static": {
CustomModule: ObservedCustomModule
}
}
}
convert_custom_config_dict = {
"observed_to_quantized_custom_module_class": {
"static": {
ObservedCustomModule: StaticQuantCustomModule
}
}
}
m = M().eval()
example_inputs = (torch.randn(3, 3),)
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
# make sure it works
m = convert_fx(
m,
convert_custom_config=convert_custom_config_dict)
# make sure it runs
m(*example_inputs)
@skipIfNoFBGEMM
def test_non_traceable_module(self):
class NonTraceable(torch.nn.Module):
def forward(self, x):
for k in x:
print(x[k])
return x
class NonTraceable2(torch.nn.Module):
def forward(self, x):
# data dependent control flow is not traceable
for i in x:
print(i)
return x
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.m1 = NonTraceable()
self.m2 = NonTraceable2()
def forward(self, x):
x = self.m1(x)
x = self.m2(x)
return x
m = M().eval()
qconfig_dict = {"": default_qconfig}
prepare_custom_config_dict = {
"non_traceable_module_name": [
"m1"
],
"non_traceable_module_class": [
NonTraceable2
]
}
m = prepare_fx(
m, qconfig_dict,
example_inputs=({"key": torch.randn(1)},),
prepare_custom_config=prepare_custom_config_dict)
node_occurrence = {
ns.call_module(NonTraceable) : 1,
ns.call_module(NonTraceable2) : 1,
}
# make sure these modules are not traced
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def test_prepared_model_deepcopy(self):
"""Ensures that copy.deepcopy works correctly on a prepared model.
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self._foobar = 'foobar'
self.foobar2 = 'foobar2'
def forward(self, x):
x = self.conv(x)
return x
m = M()
m.eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(4, 1, 4, 4),)
prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# calibrate
prepared(*example_inputs)
# copy
prepared_copy = copy.deepcopy(prepared)
# quantize, should run with no errors
quantized = convert_fx(prepared_copy)
def test_quantized_model_type(self):
""" Test state_dict and deepcopy works properly in the quantized model
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
example_inputs = (torch.rand(8, 5),)
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m = convert_fx(m)
# test deepcopy
m_copy = copy.deepcopy(m)
self.assertEqual(m_copy(*example_inputs), m(*example_inputs))
# test state_dict
state_dict = m.state_dict()
m_new = M().eval()
m_new = prepare_fx(m_new, {"": default_qconfig}, example_inputs=example_inputs)
m_new = convert_fx(m_new)
m_new.load_state_dict(state_dict)
self.assertEqual(m_new(*example_inputs), m(*example_inputs))
def test_dequantize(self):
r""" Test to make sure dequantize node are placed before
non-quantizable node
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.act = torch.nn.GELU()
def forward(self, x):
x = self.conv(x)
return self.act(x)
data = torch.rand(5, 1, 3, 3, dtype=torch.float)
for quant_type in self.static_quant_types:
node_list = [
ns.call_module(nnq.Conv2d),
ns.call_method("dequantize"),
ns.call_module(nn.GELU),
]
self.checkGraphModeFxOp(
M().eval(), (data,), quant_type, expected_node_list=node_list)
def test_sequential(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.convs = torch.nn.Sequential(
torch.nn.Conv2d(1, 1, 1),
torch.nn.Conv2d(1, 1, 1)
)
def forward(self, x):
x = self.convs(x)
return x
data = torch.rand(5, 1, 3, 3, dtype=torch.float)
for quant_type in self.static_quant_types:
node_list = [
ns.call_module(nnq.Conv2d),
ns.call_module(nnq.Conv2d),
]
self.checkGraphModeFxOp(
M().eval(), (data,), quant_type, expected_node_list=node_list)
def _test_quantized_inputs_outputs(
self, prepare_custom_config_dict, prepare_count_check,
convert_count_check):
"""
Test the option to have inputs and outputs of the graph quantized
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
# quantized input, quantized output
m = M()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(1, 1, 4, 4),)
m.eval()
mp = torch.ao.quantization.quantize_fx.prepare_fx(
m, qconfig_dict,
example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict)
self.checkGraphModuleNodes(mp, expected_node_occurrence=prepare_count_check)
mp(*example_inputs)
mq = torch.ao.quantization.quantize_fx.convert_fx(mp)
self.checkGraphModuleNodes(mq, expected_node_occurrence=convert_count_check)
def test_quantized_input_quantized_output(self):
prepare_custom_config_dict = {
'input_quantized_idxs': [0], 'output_quantized_idxs': [0]}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 0,
ns.call_method('dequantize'): 0,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
def test_fp32_input_quantized_output(self):
prepare_custom_config_dict = {
'output_quantized_idxs': [0]}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 3,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method('dequantize'): 0,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
def test_quantized_input_fp32_output(self):
prepare_custom_config_dict = {
'input_quantized_idxs': [0]}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 0,
ns.call_method('dequantize'): 1,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
def test_fp32_input_fp32_output(self):
prepare_custom_config_dict = {}
prepare_count_check = {
ns.call_module(torch.ao.quantization.MinMaxObserver): 3,
}
convert_count_check = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method('dequantize'): 1,
}
self._test_quantized_inputs_outputs(
prepare_custom_config_dict, prepare_count_check, convert_count_check)
@skipIfNoFBGEMM
def test_convtranspose_per_channel_fails_early(self):
r"""
Verifies that attempting to quantize a ConvTranspose module with per-Channel
weight observers fails in the prepare step, as opposed to the convert step.
"""
m = torch.nn.Sequential(torch.nn.ConvTranspose2d(1, 1, 1))
m.eval()
qconfig_dict = {'': torch.ao.quantization.get_default_qconfig('fbgemm')}
with self.assertRaises(AssertionError) as context:
mp = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1),))
self.assertTrue(
str(context.exception) ==
'Per channel weight observer is not supported yet for ConvTranspose{n}d.')
@skipIfNoFBGEMM
def test_qparams_buffers(self):
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
qconfig_dict = {"": default_qconfig}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
keys = m.state_dict().keys()
quant_scale_count = quant_zero_point = scale_count = zero_point_count = 0
for k in keys:
if 'input_scale' in k:
quant_scale_count = quant_scale_count + 1
elif 'input_zero_point' in k:
quant_zero_point = quant_zero_point + 1
elif 'scale' in k:
scale_count = scale_count + 1
elif 'zero_point' in k:
zero_point_count = zero_point_count + 1
# Expect each quantized linear op to have a scale and zero point
self.assertTrue(scale_count == 3, "Expect each quantized linear op to have a scale in state_dict")
self.assertTrue(zero_point_count == 3, "Expect each quantized linear op to have a zero_point in state_dict")
m(*example_inputs)
# ensure it is scriptable
scripted = torch.jit.script(m)
scripted_keys = scripted.state_dict().keys()
scripted.mods1_0_packed_weight_0 = m.state_dict()["mods1_0_packed_weight_0"]
non_packed_weight_keys = [key for key in keys if "_packed_weight" not in key]
self.assertTrue(
set(scripted_keys) == set(non_packed_weight_keys),
"Expected the scripted model to preserve the state_dict for non-packed weight attributes")
# TODO: probably don't want to hardcode the attribute names, since they are generated
for attr_name in [
"mods1_0_input_scale_0", "mods1_0_input_zero_point_0",
"mods1_0_scale_1", "mods1_0_zero_point_1",
"mods1_1_scale_1", "mods1_1_zero_point_1",
"mods2_scale_1", "mods2_zero_point_1"]:
self.assertTrue(hasattr(m, attr_name), attr_name + " not found.")
@skipIfNoFBGEMM
def test_packed_weight_fused_op(self):
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return F.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
self.relu = F.relu
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
x = self.relu(x)
return x
model = M().eval()
example_inputs = (torch.rand(5, 5),)
qconfig_dict = {"": default_qconfig}
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
assert hasattr(m, "mods1_0_packed_weight_0")
assert hasattr(m, "mods1_1_packed_weight_0")
assert hasattr(m, "mods2_packed_weight_0")
@skipIfNoFBGEMM
def test_mul_add_fp16_config(self):
with override_quantized_engine('fbgemm'):
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
def forward(self, x):
x = x * 5
x = x + 5
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
qconfig_dict = {"": float16_dynamic_qconfig}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
# make sure it runs
m(*example_inputs)
def test_getattr_with_nontensor_result(self):
"""
Verifies that binary ops get quantized correctly if some
of the args are nodes but not Tensors, such as an `x.ndim`
pattern.
"""
class M1(torch.nn.Module):
def forward(self, x):
dims = x.ndim
dims_sub = dims - 1
dims_sub2 = dims_sub - 1
x = torch.add(x, dims_sub2)
return x
class M2(torch.nn.Module):
def forward(self, x):
dims = x.ndim
dims_sub = dims - 2
mul = [1] * dims_sub
dims_list = [-1, x.size(1)] + mul
x = x.view(dims_list)
return x
class M3(torch.nn.Module):
def forward(self, x):
shape = x.shape
x = x.view(shape)
return x
for cls in (M1, M2, M3):
m = cls().eval()
example_inputs = (torch.rand(4, 4, 4, 4),)
m(*example_inputs)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(torch.rand(4, 4, 4, 4))
mc = convert_fx(mp)
class _NonReferenceTestModel(nn.Module):
def __init__(self, func, lin_in, lin_out):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.lin = nn.Linear(lin_in, lin_out)
self.func = func
def forward(self, x, y, z):
x = self.pool(F.relu(self.conv1(x)))
x = torch.flatten(x, 1)
x = self.func(x, y, z)
x = self.lin(x)
return x
# This function looks at the node specified by the NodeInfo in the key of
# node_info_to_non_tensor_args and checks that the args at specified indices
# are not observed (since they are non tensors). If the args at those indices
# are a tuple/list (which do not show up as nodes) the function checks the
# individual elements of the tuple/list recursively.
def _check_not_observed(self, model, node_info_to_non_tensor_args):
# this is a helper function (for easier recursion) that checks whether
# arg_node is observed
def _check_node_not_observed(model, arg_node, node):
if isinstance(arg_node, (tuple, list)):
for new_node in arg_node:
_check_node_not_observed(model, new_node, node)
elif arg_node.op == "call_module":
self.assertTrue(
not _is_activation_post_process(getattr(model, arg_node.target)),
f"Arg: {arg_node} of node: {node} is observed but is not a float tensor",
)
for node in model.graph.nodes:
indices = node_info_to_non_tensor_args.get(
NodeInfo(node.op, node.target), []
)
for index in indices:
if index < len(node.args):
arg_node = node.args[index]
_check_node_not_observed(model, arg_node, node)
# This test checks that the model gets prepared correct, doesn't have observers
# on specific ops (see _check_not_observed) and that the prepared model runs
def _test_dtype_propagation(self, model, node_info_to_non_tensor_args, *args):
model.eval()
qconfig_dict = {"": torch.ao.quantization.get_default_qconfig("fbgemm")}
prepared_model = prepare_fx(model, qconfig_dict, example_inputs=tuple(args))
self._check_not_observed(prepared_model, node_info_to_non_tensor_args)
prepared_model(*args)
def test_masked_fill_nontensor_args_not_observed(self):
def func(x, y, z):
return x.masked_fill(y, z)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), torch.randn(1176) > 0, 0.1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "masked_fill"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_permute_nontensor_args_not_observed(self):
def func(x, y, z):
return x.permute(y, z)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 0, 1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "permute"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_repeat_nontensor_args_not_observed(self):
def func(x, y, z):
return x.repeat(y, z)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 2, 1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "repeat"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_reshape_nontensor_args_not_observed(self):
def func(x, y, z):
return x.reshape(-1, y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 5, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_size_nontensor_args_not_observed(self):
def func(x, y, z):
return x.reshape((-1, x.size(y)))
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 0, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "size"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_transpose_nontensor_args_not_observed(self):
def func(x, y, z):
return x.transpose(y, z)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 0, 1]
node_info_to_non_tensor_args = {NodeInfo("call_method", "transpose"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_torch_transpose_nontensor_args_not_observed(self):
# TODO: make torch.transpose traceable by fx when using
# variable nontensor arguments
# func = lambda x, y, z: torch.transpose(x, y, z) # error
def func(x, y, z):
return torch.transpose(x, 0, 1)
model = self._NonReferenceTestModel(func, 5, 1)
node_info_to_non_tensor_args = {
NodeInfo("call_method", torch.transpose): [1, 2]
}
args = [torch.randn(5, 3, 32, 32), 0, 1]
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_unsqueeze_nontensor_args_not_observed(self):
def func(x, y, z):
return x.unsqueeze(y)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 1, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "unsqueeze"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_unsqueeze__nontensor_args_not_observed(self):
def func(x, y, z):
return x.unsqueeze_(y)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 1, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "unsqueeze_"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_torch_unsqueeze_nontensor_args_not_observed(self):
# TODO: make torch.unsqueeze scriptable by fx when using
# variable nontensor arguments
# func = lambda x, y, z: torch.unsqueeze(x, y) # error
def func(x, y, z):
return torch.unsqueeze(x, 1)
model = self._NonReferenceTestModel(func, 1176, 1)
args = [torch.randn(5, 3, 32, 32), 1, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", torch.unsqueeze): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_view_nontensor_args_not_observed(self):
def func(x, y, z):
return x.view(-1, y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), 5, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "view"): [2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_list_args(self):
def func(x, y, z):
return x.reshape(y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), [-1, 5], None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_split_list_args(self):
def func(x, y, z):
return x.reshape([y, z])
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), -1, 5]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_tuple_args(self):
def func(x, y, z):
return x.reshape(y)
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), (-1, 5), None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_split_tuple_args(self):
def func(x, y, z):
return x.reshape((y, z))
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), -1, 5]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_dict_args(self):
def func(x, y, z):
return x.transpose(y["first"], y["second"])
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), {"first": 0, "second": 1}, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "transpose"): [1, 2]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_dict_tuple_args(self):
class reshape_module(nn.Module):
def forward(self, x, y, z):
return x.reshape(y["shape"])
model = self._NonReferenceTestModel(reshape_module(), 5, 1)
args = [torch.randn(5, 3, 32, 32), {"shape": (-1, 5)}, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "reshape"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_propagate_dtypes_for_known_nodes_dict_split_tuple_args(self):
def func(x, y, z):
return x.reshape((y["first"], y["second"]))
model = self._NonReferenceTestModel(func, 5, 1)
args = [torch.randn(5, 3, 32, 32), {"first": -1, "second": 5}, None]
node_info_to_non_tensor_args = {NodeInfo("call_method", "transpose"): [1]}
self._test_dtype_propagation(model, node_info_to_non_tensor_args, *args)
def test_assert_on_size_after_quant_layer(self):
"""
Verifies that calculating a size of a quantized tensor works
correctly in quantization passes.
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
torch._assert(x.size(1) == 1, 'foobar')
return x
m = M().eval()
example_inputs = (torch.rand(4, 1, 4, 4),)
m(*example_inputs)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(mp)
mc(*example_inputs)
def test_fp32_sum(self):
"""
Verifies that fp32 sum works correctly if it's before or after
quantized layers.
"""
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = torch.stack([x])
x = torch.sum(x)
return x
class M2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x1 = torch.stack([x])
x1 = torch.sum(x1, dim=0)
x2 = self.conv2(x1)
return x2
for cls in (M1, M2):
m = cls().eval()
example_inputs = (torch.rand(4, 1, 4, 4),)
m(*example_inputs)
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(mp)
mc(*example_inputs)
def test_fusion_pattern_unquantized(self):
"""
Ensure that leaving a possible fusion pattern of multiple nodes
unquantized runs through the APIs without errors.
"""
class Child(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = torch.add(x, 1.0)
x = torch.nn.functional.relu(x)
return x
class Parent(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.child = Child()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.child(x)
x = self.conv(x)
return x
m = Parent().eval()
qconfig_dict = {
'': torch.ao.quantization.default_qconfig,
'module_name': [
('child', None),
],
}
example_inputs = (torch.rand(1, 1, 1, 1),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
mp(*example_inputs)
mc = convert_fx(mp)
def test_state_dict(self):
""" Make sure packed params appear in state_dict
"""
# test linear packed weight
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.rand(4, 30)
self.b = torch.rand(4)
def forward(self, x):
return F.linear(x, self.w, self.b)
m = M1().eval()
qconfig_dict = {"": default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 30),))
m = convert_fx(m)
state_dict = m.state_dict()
self.assertTrue("_packed_weight_0" in state_dict)
# test conv packed weight
class M2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.rand(3, 3, 3, 3)
self.b = torch.rand(3)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.w, self.b, self.stride, self.padding, self.dilation, self.groups)
m = M2().eval()
qconfig_dict = {"": default_qconfig}
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 3, 3, 3),))
m = convert_fx(m)
state_dict = m.state_dict()
self.assertTrue("_packed_weight_0" in state_dict)
# test load
ref_weight, ref_bias = torch.ops.quantized.conv2d_unpack(state_dict["_packed_weight_0"])
data = torch.rand(1, 3, 5, 5)
ref_res = m(data)
m = M2().eval()
m = prepare_fx(m, qconfig_dict, (data,))
m = convert_fx(m)
res = m(data)
weight, bias = m._packed_weight_0.unpack()
# check that random model weight/bias does not match ref weight/bias
self.assertNotEqual(weight, ref_weight)
self.assertNotEqual(bias, ref_bias)
self.assertNotEqual(res, ref_res)
m.load_state_dict(state_dict)
def checkModel(m, data, ref_weight, ref_bias, ref_res):
res = m(data)
weight, bias = m._packed_weight_0.unpack()
# check that weight/bias matches after load the state_dict
self.assertEqual(weight, ref_weight)
self.assertEqual(bias, ref_bias)
self.assertEqual(res, ref_res)
checkModel(m, data, ref_weight, ref_bias, ref_res)
# Test save to disk and load back
m = M2().eval()
m = prepare_fx(m, qconfig_dict, example_inputs=(data,))
m = convert_fx(m)
m.load_state_dict(state_dict)
with TemporaryFileName() as fname:
torch.save(m.state_dict(), fname)
# weights_only=False as this is loading a ScriptModule
m.load_state_dict(torch.load(fname, weights_only=False))
checkModel(m, data, ref_weight, ref_bias, ref_res)
@skipIfNoFBGEMM
def test_preserve_qconfig(self):
"""
Test to make sure the temporary config option to preserve qconfig attributes
in the model works
"""
with override_quantized_engine('fbgemm'):
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = torch.nn.Sigmoid()
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
return x
model = M().eval()
qconfig_dict = {
"object_type": [
(torch.nn.functional.linear, float16_dynamic_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m, _remove_qconfig=False)
self.assertTrue(hasattr(m.mods2, 'qconfig'))
def test_not_used(self):
""" Test quantizing a not used value"""
class M(torch.nn.Module):
def forward(self, x):
x = x + x
x.sigmoid_()
return x
m = M().eval()
qconfig_mapping = get_default_qconfig_mapping().set_global(float16_static_qconfig)
# make sure quantization runs
m = prepare_fx(m, qconfig_mapping, example_inputs=(torch.randn(1),))
m = convert_fx(m)
def test_qparams_fqn(self):
""" Test that the FQN of input_scale/zero_point is set
to that of first linear use. """
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
def forward(self, x):
x = torch.cat((x,), 1)
tmp = x.size()
x = self.mods1(x)
y = x * tmp[0]
return y
model = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.functional.linear, default_qconfig),
(torch.nn.functional.relu, default_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
keys = m.state_dict().keys()
m(torch.randn(5, 5))
# TODO: probably don't want to hardcode the attribute names, since they are generated
for attr_name in [
"mods1_0_input_scale_0", "mods1_0_input_zero_point_0",
"mods1_0_scale_0", "mods1_0_zero_point_0",
"mods1_1_scale_0", "mods1_1_zero_point_0"]:
self.assertTrue(hasattr(m, attr_name), attr_name + " not found.")
def test_no_obs_between_unmatched_node_and_copy_node(self):
"""
Verifies that an observer is not inserted between an unmatched
node and a node matched to CopyNodeQuantizeHandler. This is done
because observers require activations to be Tensors, and there is
no guarantee that an output of an unmatched node is a Tensor.
"""
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = nn.ReLU()
def forward(self, x):
x = _user_func_with_complex_return_type(x)
x1 = x[0] + 1
return x1, x[1]
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
example_inputs = (torch.randn(4, 4, 4, 4),)
mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs)
# if an observer is inserted after _user_func_with_complex_return_type,
# the following call will fail
mp(*example_inputs)
mc = convert_fx(mp)
mc(*example_inputs)
def test_fold_quant_dequant(self):
""" Test that the sequence of quant-dequant nodes in the
graph, get folded and we erase the extra dequant nodes.
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
x = torch.cat((x,), 1)
tmp = x.size()
x = torch.nn.functional.linear(x, self.w, self.b)
y = x * tmp[0]
return y
model = M().eval()
qconfig_dict = {
"": None,
"object_type": [
(torch.nn.functional.linear, default_qconfig),
],
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m(*example_inputs)
m = convert_fx(m)
keys = m.state_dict().keys()
m(*example_inputs)
dequant = 0
quant = 0
for n in m.graph.nodes:
if n.op == "call_method" and n.target == "dequantize":
dequant = dequant + 1
if n.op == "call_function" and n.target == torch.quantize_per_tensor:
quant = quant + 1
self.assertEqual(dequant, 1)
self.assertEqual(quant, 1)
def test_quant_output_always_observed(self):
"""
If the output is hardcoded to be quantized, ensure that
there is always an observer, even if the last non-output node is not
quantizeable.
"""
qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}
prepare_custom_config_dict = {'output_quantized_idxs': [0]}
example_inputs = (torch.randn(4, 1, 4, 4),)
# non-quantizeable node, quantized output
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.identity = torch.nn.Identity()
def forward(self, x):
x = self.identity(x)
return x
m1 = M1()
self.checkGraphModeFxOp(
m1, example_inputs, QuantType.QAT,
prepare_expected_node_occurrence={
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 2,
},
expected_node_occurrence={
ns.call_function(torch.quantize_per_tensor): 1,
},
prepare_custom_config=prepare_custom_config_dict)
# quantizeable node, quantized output
class M2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return x
m2 = M2()
self.checkGraphModeFxOp(
m2, example_inputs, QuantType.QAT,
prepare_expected_node_occurrence={
# one for weights, one for activations
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 2,
},
expected_node_occurrence={
ns.call_function(torch.quantize_per_tensor): 1,
},
prepare_custom_config=prepare_custom_config_dict)
# quantizeable node, quantized dictionary output
class M3(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return {"output": x}
m3 = M3()
self.checkGraphModeFxOp(
m3, example_inputs, QuantType.QAT,
prepare_expected_node_occurrence={
# one for weights, one for activations
ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 2,
},
expected_node_occurrence={
ns.call_function(torch.quantize_per_tensor): 1,
},
prepare_custom_config=prepare_custom_config_dict)
def test_deepcopy_preserve_attributes(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr = 3
def forward(self, x):
return x
m = M().eval()
m = prepare_fx(
m,
{"": default_qconfig},
example_inputs=(torch.randn(1),),
prepare_custom_config={"preserved_attributes": ["attr"]})
# preserved attributes are also stored in meta so that it doesn't get lost
# during deepcopy
self.assertTrue(hasattr(m, "attr"))
self.assertTrue("attr" in m.meta[_USER_PRESERVED_ATTRIBUTES_KEY])
m2 = copy.deepcopy(m)
self.assertTrue(hasattr(m2, "attr"))
self.assertTrue("attr" in m2.meta[_USER_PRESERVED_ATTRIBUTES_KEY])
m = convert_fx(m, convert_custom_config={"preserved_attributes": ["attr"]})
self.assertTrue(hasattr(m, "attr"))
self.assertTrue("attr" in m.meta[_USER_PRESERVED_ATTRIBUTES_KEY])
m2 = copy.deepcopy(m)
self.assertTrue(hasattr(m2, "attr"))
self.assertTrue("attr" in m2.meta[_USER_PRESERVED_ATTRIBUTES_KEY])
def test_output_lists_and_dicts(self):
"""Verify that specifying complicated output types does not crash.
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return {'foo': [x]}, [{'foo': [[x]]}]
m = M().eval()
qconfig_dict = {'': torch.ao.quantization.default_qconfig}
mp = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1),))
mc = convert_fx(mp)
def test_shape_followed_by_quantized_op(self):
""" Make sure that shape does not dequantize
the Tensor before the next operator
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = torch.nn.Conv2d(2, 2, 2)
self.conv2 = torch.nn.Conv2d(2, 2, 2)
def forward(self, x):
x = self.conv1(x)
s = x.shape
torch._assert(s == x.shape, "")
x = self.conv2(x)
return x
# make sure quantization runs
m = M().eval()
example_inputs = (torch.randn(2, 2, 4, 4),)
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def test_trace_quantize_per_tensor(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv(x)
return x
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(torch.randn(1, 1, 3, 3),))
m = convert_fx(m)
# Make sure this runs without error
m = torch.fx.Transformer(m).transform()
def test_copy_node_has_shared_actpp_instance(self):
""" Test the output of CopyNode to have the same
observer/fake_quant instance as the input
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.avgpool2d = torch.nn.AvgPool2d(kernel_size=3)
def forward(self, x):
x = self.avgpool2d(x)
return x
for quant_type in self.static_quant_types:
m = M()
# Checks that we have an observer for both input and output
occurrence_map = {
QuantType.STATIC: {
ns.call_module(torch.ao.quantization.MinMaxObserver): 2
},
QuantType.QAT: {
ns.call_module(torch.ao.quantization.FakeQuantize): 2
}
}
if quant_type == QuantType.QAT:
m.train()
prepare = prepare_qat_fx
qconfig = default_qat_qconfig
actpp_module_class = torch.ao.quantization.FakeQuantize
else:
m.eval()
prepare = prepare_fx
qconfig = default_qconfig
actpp_module_class = torch.ao.quantization.MinMaxObserver
example_inputs = (torch.randn(1, 3, 3, 3),)
m = prepare(m, {"": qconfig}, example_inputs=example_inputs)
# check that there is a duplicated observer instance
actpp_module_count = 0
for module in m.modules(remove_duplicate=False):
if isinstance(module, actpp_module_class):
actpp_module_count += 1
self.assertEqual(actpp_module_count, 2)
actpp_module_count = 0
for module in m.modules():
if isinstance(module, actpp_module_class):
actpp_module_count += 1
self.assertEqual(actpp_module_count, 1)
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_reference = convert_to_reference_fx(m_copy)
# checks for non-reference quantized model
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(torch.nn.AvgPool2d),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence, expected_node_list=node_list)
# checks for reference quantized model, for copy nodes we'll have
# dequant - copy_node - quant patterns which will be fused later
# in the backend lowering step
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2
}
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
ns.call_module(torch.nn.AvgPool2d),
ns.call_function(torch.quantize_per_tensor),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(m_reference, expected_node_occurrence=node_occurrence, expected_node_list=node_list)
def test_linear_qint8_activation(self):
"""Test support for qint8 activation in reference pattern
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 2, 2, 2)
self.linear = torch.nn.Linear(8, 5)
def forward(self, x):
x = self.conv(x)
x = torch.flatten(x, 1)
x = self.linear(x)
return x
m = M().eval()
example_inputs = (torch.rand(2, 1, 5, 5),)
m = prepare_fx(
m,
{"": torch.ao.quantization.QConfig(
activation=torch.ao.quantization.HistogramObserver.with_args(
qscheme=torch.per_tensor_symmetric, dtype=torch.qint8
), weight=torch.ao.quantization.default_per_channel_weight_observer)},
example_inputs=example_inputs)
m = convert_to_reference_fx(m)
m(*example_inputs)
def test_preserve_tuple(self):
""" Test tuple input type is preserved
"""
class LSTM(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lstm = nn.LSTM(50, 50, 1)
def forward(self, inputs: torch.Tensor, state: list[torch.Tensor]):
h = state[0]
c = state[1]
return self.lstm(inputs, (h, c))
m = LSTM().eval()
example_inputs = (torch.randn(5, 3, 50), torch.randn(2, 3, 50), torch.randn(2, 3, 50))
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
# make sure the arg[1] of lstm module is a tuple
for n in m.graph.nodes:
if n.target == "lstm":
self.assertEqual(type(n.args[1]), tuple)
def _test_static_lstm_helper(self, model, prepare_node_occurrence, convert_node_occurrence):
"""
Helper method to validate the graph of a model with static LSTM.
"""
qconfig_mapping = get_default_qconfig_mapping()
prepare_custom_config = PrepareCustomConfig() \
.set_float_to_observed_mapping(torch.nn.LSTM, torch.ao.nn.quantizable.LSTM)
convert_custom_config = ConvertCustomConfig() \
.set_observed_to_quantized_mapping(torch.ao.nn.quantizable.LSTM, torch.ao.nn.quantized.LSTM)
example_inputs = (torch.rand(5, 3, 50), torch.rand(1, 3, 50), torch.randn(1, 3, 50))
model = prepare_fx(model, qconfig_mapping, example_inputs, prepare_custom_config=prepare_custom_config)
self.checkGraphModuleNodes(model, expected_node_occurrence=prepare_node_occurrence)
model(*example_inputs)
model = convert_fx(model, convert_custom_config=convert_custom_config)
self.checkGraphModuleNodes(model, expected_node_occurrence=convert_node_occurrence)
model(*example_inputs)
def test_static_lstm(self):
"""
Test statically quantized custom module LSTM followed by ops that consume individual
tensors of the output tuple.
"""
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lstm = nn.LSTM(50, 50, 1)
self.linear1 = nn.Linear(50, 10)
self.linear2 = nn.Linear(50, 10)
self.linear3 = nn.Linear(50, 10)
def forward(self, inputs: torch.Tensor, h0: torch.Tensor, c0: torch.Tensor):
(out, (h0_out, c0_out)) = self.lstm(inputs, (h0, c0))
out = self.linear1(out)
h0_out = self.linear2(h0_out)
c0_out = self.linear3(c0_out)
return (out, (h0_out, c0_out))
m = MyModel()
prepare_node_occurrence = {
ns.call_module(torch.ao.nn.quantizable.LSTM): 1,
}
convert_node_occurrence = {
ns.call_module(torch.ao.nn.quantized.LSTM): 1,
ns.call_function(torch.quantize_per_tensor): 3,
# lstm[0].dequantize()
# lstm[1][0].dequantize()
# lstm[1][1].dequantize()
ns.call_method("dequantize"): 3,
# lstm[0], lstm[1], lstm[1][0], lstm[1][1]
ns.call_function(operator.getitem): 4,
# No tuples are consumed
ns.call_function(tuple): 0,
}
self._test_static_lstm_helper(m, prepare_node_occurrence, convert_node_occurrence)
def test_static_lstm_consume_tuple(self):
"""
Test statically quantized custom module LSTM followed by a module that consumes the
output tuple, either as a whole or part of it.
"""
class ModuleAfterLSTM(nn.Module):
def __init__(self) -> None:
super().__init__()
self.identity = torch.nn.Identity()
def forward(self, x):
return self.identity(x)
class ConsumeWholeTuple(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lstm = nn.LSTM(50, 50, 1)
self.module_after_lstm = ModuleAfterLSTM()
def forward(self, inputs: torch.Tensor, h0: torch.Tensor, c0: torch.Tensor):
x = self.lstm(inputs, (h0, c0))
x = self.module_after_lstm(x) # consume tuple (output, (hidden0, hidden1))
return x
class ConsumeHiddenTuple(ConsumeWholeTuple):
def forward(self, inputs: torch.Tensor, h0: torch.Tensor, c0: torch.Tensor):
x = self.lstm(inputs, (h0, c0))
x = self.module_after_lstm(x[1]) # consume tuple (hidden0, hidden1)
return x
# Test consuming the whole tuple (output, (hidden0, hidden1))
m1 = ConsumeWholeTuple()
prepare_node_occurrence = {
ns.call_module(torch.ao.nn.quantizable.LSTM): 1,
}
convert_node_occurrence1 = {
ns.call_module(torch.ao.nn.quantized.LSTM): 1,
ns.call_function(torch.quantize_per_tensor): 3,
# lstm[0].dequantize()
# lstm[1][0].dequantize()
# lstm[1][1].dequantize()
ns.call_method("dequantize"): 3,
# lstm[0], lstm[1], lstm[1][0], lstm[1][1]
ns.call_function(operator.getitem): 4,
# tuple(output_dq, tuple(hidden0_dq, hidden1_dq))
ns.call_function(tuple): 2,
}
self._test_static_lstm_helper(m1, prepare_node_occurrence, convert_node_occurrence1)
# Test consuming just the hidden tuple (hidden0, hidden1)
m2 = ConsumeHiddenTuple()
convert_node_occurrence2 = {
ns.call_module(torch.ao.nn.quantized.LSTM): 1,
ns.call_function(torch.quantize_per_tensor): 3,
# lstm[1][0].dequantize()
# lstm[1][1].dequantize()
ns.call_method("dequantize"): 2,
# lstm[1], lstm[1][0], lstm[1][1]
ns.call_function(operator.getitem): 3,
# tuple(hidden0_dq, hidden1_dq)
ns.call_function(tuple): 1,
}
self._test_static_lstm_helper(m2, prepare_node_occurrence, convert_node_occurrence2)
def test_static_lstm_with_custom_fixed_qparams(self):
"""
Test statically quantized LSTM with custom fixed qparams assigned to each of the
inner submodules. This flow requires users to extend `torch.ao.nn.quantizable.LSTM`
and use the child class in the custom module mapping.
"""
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.my_lstm = torch.nn.LSTM(50, 50, 1)
def forward(self, inputs: torch.Tensor, h0: torch.Tensor, c0: torch.Tensor):
x = self.my_lstm(inputs, (h0, c0))
return x
# Construct a BackendConfig that supports qint32 for certain ops
# TODO: build a BackendConfig from scratch instead of modifying an existing one
qint32_dtype_config = DTypeConfig(input_dtype=torch.qint32, output_dtype=torch.qint32)
my_backend_config = get_qnnpack_backend_config()
for config in my_backend_config.configs:
if config.pattern in [torch.nn.Sigmoid, torch.nn.Tanh, torch.add, torch.mul]:
config.add_dtype_config(qint32_dtype_config)
class UserObservedLSTM(torch.ao.nn.quantizable.LSTM):
"""
Example of user provided LSTM implementation that assigns fixed qparams
to the inner ops.
"""
@classmethod
def from_float(cls, float_lstm):
assert isinstance(float_lstm, cls._FLOAT_MODULE)
# uint16, [-16, 16)
linear_output_obs_ctr = FixedQParamsObserver.with_args(scale=2 ** -11, zero_point=2 ** 15, dtype=torch.qint32)
# uint16, [0, 1)
sigmoid_obs_ctr = FixedQParamsObserver.with_args(scale=2 ** -16, zero_point=0, dtype=torch.qint32)
# uint16, [-1, 1)
tanh_obs_ctr = FixedQParamsObserver.with_args(scale=2 ** -15, zero_point=2 ** 15, dtype=torch.qint32)
# int16, [-16, 16)
cell_state_obs_ctr = FixedQParamsObserver.with_args(scale=2 ** -11, zero_point=0, dtype=torch.qint32)
# uint8, [-1, 1)
hidden_state_obs_ctr = FixedQParamsObserver.with_args(scale=2 ** -7, zero_point=2 ** 7, dtype=torch.quint8)
example_inputs = (torch.rand(5, 3, 50), (torch.rand(1, 3, 50), torch.randn(1, 3, 50)))
return torch.ao.quantization.fx.lstm_utils._get_lstm_with_individually_observed_parts(
float_lstm=float_lstm,
example_inputs=example_inputs,
backend_config=my_backend_config,
linear_output_obs_ctr=linear_output_obs_ctr,
sigmoid_obs_ctr=sigmoid_obs_ctr,
tanh_obs_ctr=tanh_obs_ctr,
cell_state_obs_ctr=cell_state_obs_ctr,
hidden_state_obs_ctr=hidden_state_obs_ctr,
)
class UserQuantizedLSTM(torch.ao.nn.quantized.LSTM):
"""
Example of user provided LSTM implementation that produces a reference
quantized module from a `UserObservedLSTM`.
"""
@classmethod
def from_observed(cls, observed_lstm):
assert isinstance(observed_lstm, cls._FLOAT_MODULE)
return torch.ao.quantization.fx.lstm_utils._get_reference_quantized_lstm_module(
observed_lstm=observed_lstm,
backend_config=my_backend_config,
)
# FX graph mode quantization
m = MyModel()
qconfig_mapping = get_default_qconfig_mapping("qnnpack")
example_inputs = (torch.rand(5, 3, 50), torch.rand(1, 3, 50), torch.randn(1, 3, 50))
prepare_custom_config = PrepareCustomConfig() \
.set_float_to_observed_mapping(torch.nn.LSTM, UserObservedLSTM)
convert_custom_config = ConvertCustomConfig() \
.set_observed_to_quantized_mapping(torch.ao.nn.quantizable.LSTM, UserQuantizedLSTM)
prepared = prepare_fx(
m,
qconfig_mapping,
example_inputs,
prepare_custom_config,
backend_config=my_backend_config,
)
prepared(*example_inputs)
converted = convert_fx(
prepared,
convert_custom_config,
backend_config=my_backend_config,
)
converted(*example_inputs)
# Find the patterns [dq - op - q_to_specific_dtype] in the graph and
# verify that qparams and dtypes are set correctly in the quantize ops
node_name_to_expected_quantize_args = {
"igates": (None, None, torch.quint8),
"hgates": (None, None, torch.quint8),
"add": (2 ** -11, 2 ** 15, torch.qint32), # gates.add
"input_gate": (2 ** -16, 0, torch.qint32),
"forget_gate": (2 ** -16, 0, torch.qint32),
"cell_gate": (2 ** -15, 2 ** 15, torch.qint32),
"output_gate": (2 ** -16, 0, torch.qint32),
"mul": (2 ** -11, 0, torch.qint32), # fgate_cx.mul
"mul_1": (2 ** -11, 0, torch.qint32), # igate_cgate.mul
"add_1": (2 ** -11, 0, torch.qint32), # fgate_cx_igate_cgate.add
"mul_2": (2 ** -7, 2 ** 7, torch.quint8), # ogate_cy.mul
}
cell = converted.my_lstm.layers.get_submodule("0").layer_fw.cell
matched_names = set()
for node in cell.graph.nodes:
if node.name not in node_name_to_expected_quantize_args:
continue
matched_names.add(node.name)
# Match preceding dequantize
self.assertTrue(all(arg.target == "dequantize" for arg in node.args))
# Match following quantize with the specific qparams and dtypes
expected_scale, expected_zp, expected_dtype = node_name_to_expected_quantize_args[node.name]
for user in node.users:
self.assertEqual(user.target, torch.quantize_per_tensor)
if expected_scale is not None:
self.assertEqual(getattr(cell, user.args[1].target), expected_scale)
if expected_zp is not None:
self.assertEqual(getattr(cell, user.args[2].target), expected_zp)
self.assertEqual(user.args[-1], expected_dtype)
# Ensure all patterns were matched
self.assertEqual(matched_names, set(node_name_to_expected_quantize_args.keys()))
def test_reroute_tuple_getitem_patterns(self):
"""
The following graph should redirect the output to `b`. After the transformation,
all other nodes, including the inputs `a` and `c`, are no longer needed.
a b c
| \\ /
\\ tuple
\\ /
tuple
/ \\
/ \\
| \\
| \\
| \\
getitem0 getitem1
| / \\
| getitem0 getitem1
| \\ /
\\ tuple
\\ /
\\ /
tuple
|
getitem1
|
getitem0
|
output
"""
# Construct graph manually because symbolic_trace does not insert tuple and getitem nodes
graph = torch.fx.Graph()
a = graph.create_node("placeholder", "a")
b = graph.create_node("placeholder", "b")
c = graph.create_node("placeholder", "c")
bc = graph.call_function(tuple, args=([b, c],))
abc = graph.call_function(tuple, args=([a, bc],))
# Break down tuple and reconstruct it again
a2 = graph.call_function(operator.getitem, args=(abc, 0))
bc2 = graph.call_function(operator.getitem, args=(abc, 1))
b2 = graph.call_function(operator.getitem, args=(bc2, 0))
c2 = graph.call_function(operator.getitem, args=(bc2, 1))
bc3 = graph.call_function(tuple, args=([b2, c2],))
abc2 = graph.call_function(tuple, args=([a2, bc3],))
# Output tuple[1][0]
bc4 = graph.call_function(operator.getitem, args=(abc2, 1))
b3 = graph.call_function(operator.getitem, args=(bc4, 0))
output = graph.output(b3)
# Do reroute
_reroute_tuple_getitem_pattern(graph)
# Assert that output reroutes to `b` directly, and all other nodes can be removed
output_ancestors = []
def gather_ancestors(current_node): # noqa: E306
for arg in current_node.args:
output_ancestors.append(arg)
gather_ancestors(arg)
gather_ancestors(output)
self.assertEqual(output_ancestors, [b])
self.assertEqual(output.args[0], b)
def test_relu_lowering(self):
class M(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.relu(x)
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(torch.randn(1),))
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
node_occurrence_ref = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
self.checkGraphModuleNodes(m_ref, expected_node_occurrence=node_occurrence_ref)
@skipIfNoFBGEMM
def test_dynamic_with_fusion(self):
"""
Tests that dynamic quantization APIs work with Linear + Relu fusion
"""
with override_quantized_engine('fbgemm'):
class LinearRelu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
return self.relu(x)
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(LinearRelu(), LinearRelu())
self.mods2 = Linear()
self.relu = F.relu
def forward(self, x):
x = self.mods1(x)
x = self.mods2(x)
x = self.relu(x)
return x
dynamic_quantized_ops = {
float16_dynamic_qconfig: torch.ops.quantized.linear_relu_dynamic_fp16,
default_dynamic_qconfig: torch.ops.quantized.linear_relu_dynamic
}
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
model = M().eval()
qconfig_dict = {
"": qconfig
}
example_inputs = (torch.rand(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_module(nniqd.LinearReLU),
ns.call_module(nniqd.LinearReLU),
ns.call_function(dynamic_quantized_ops[qconfig]),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
@skipIfNoFBGEMM
def test_dynamic_with_fusion_multiple_uses(self):
"""
Tests that dynamic quantization APIs work with Linear + Relu fusion
"""
with override_quantized_engine('fbgemm'):
class LinearRelu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
return self.relu(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear_relu = LinearRelu()
def forward(self, x):
x = self.linear_relu(x)
x = self.linear_relu(x)
return x
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
model = M().eval()
qconfig_dict = {
"": qconfig
}
example_inputs = (torch.randn(5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_module(nniqd.LinearReLU),
ns.call_module(nniqd.LinearReLU),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
@skipIfNoFBGEMM
def test_dynamic_linear_input_multiple_use(self):
"""
Tests input for dynamic linear being used by multiple ops
"""
with override_quantized_engine('fbgemm'):
class LinearRelu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
return self.relu(x)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod1 = LinearRelu()
self.mod2 = LinearRelu()
def forward(self, x):
y1 = self.mod1(x)
y2 = self.mod2(x)
return y1 + y2
for qconfig in [float16_dynamic_qconfig, default_dynamic_qconfig]:
model = M().eval()
qconfig_dict = {
"": qconfig
}
example_inputs = (torch.rand(5, 5, 5),)
m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs)
m = convert_fx(m)
m(*example_inputs)
node_list = [
ns.call_module(nniqd.LinearReLU),
ns.call_module(nniqd.LinearReLU),
]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_ref_linear_module(self):
""" Make sure the numerics for models with ref linear module
matches models with fbgemm/qnnpack module
"""
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 5)
def forward(self, x):
return self.linear(x)
class M2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(10, 5)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.linear(x))
for M in [M1, M2]:
m = M().eval()
example_inputs = (torch.randn(5, 10),)
m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs)
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
result = m(*example_inputs)
result_ref = m_ref(*example_inputs)
self.assertTrue(torch.equal(result, result_ref))
def test_ref_conv_module(self):
""" Make sure the numerics for models with ref conv module
matches models with fbgemm/qnnpack module
"""
convs = {
1: nn.Conv1d,
2: nn.Conv2d,
3: nn.Conv3d,
}
class M1(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
def forward(self, x):
return self.conv(x)
class M2(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(self.conv(x))
for dim, M in itertools.product([1, 2, 3], [M1, M2]):
m = M(dim).eval()
data = self.img_data_dict[dim][0][0]
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(data,))
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
result = m(data)
result_ref = m_ref(data)
self.assertTrue(torch.equal(result, result_ref))
def test_sub_scalar(self):
class M(torch.nn.Module):
def forward(self, x):
x = x + 1
x = x - 1
x = x + 3
x = x - 4
return x
m = M().eval()
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(torch.rand(3),))
m = convert_fx(m)
occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2
}
self.checkGraphModuleNodes(m, expected_node_occurrence=occurrence)
def test_observer_fqn(self):
"""
Test to make sure the observer FQN is based on the quantizable op/module that it is observing
and uses the modules FQN to determine the observer name.
"""
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods2 = Linear()
self.mods3 = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.mods1(x)
x = torch.add(x, 4)
x = self.mods2(x)
y = torch.add(x, 2)
z = torch.mul(x, 5)
a = self.mods3(y)
return a, z
model = M().eval()
prepared = prepare_fx(model, {"": default_qconfig}, example_inputs=(torch.randn(1, 5)))
name_list = []
for name, mod in prepared.named_modules():
if isinstance(mod, torch.ao.quantization.observer.MinMaxObserver):
name_list.append(name)
expected_name_list = ['activation_post_process_0',
'activation_post_process_1',
'activation_post_process_2',
'activation_post_process_3',
'activation_post_process_4',
'activation_post_process_6',
'activation_post_process_7',
'activation_post_process_10']
assert name_list == expected_name_list
def test_conv_lowering(self):
convs = {1: nn.Conv1d, 2: nn.Conv2d, 3: nn.Conv3d}
qconvs = {1: nn.quantized.Conv1d, 2: nn.quantized.Conv2d, 3: nn.quantized.Conv3d}
class M(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = convs[dim](3, 3, 3)
def forward(self, x):
return self.conv(x)
for dim in range(1, len(convs) + 1):
m = M(dim).eval()
data = self.img_data_dict[dim][0][0]
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(data,))
m_ref = copy.deepcopy(m)
m_ref = convert_to_reference_fx(m_ref)
m = convert_fx(m)
out_ref = m_ref(data)
out = m(data)
# check that reference pattern for quantized conv module is fused
expected_node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_module(qconvs[dim]): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(m, expected_node_occurrence=expected_node_occurrence)
# checking result match
self.assertTrue(torch.equal(out_ref, out))
def test_convert_qconfig_mapping(self):
class Linear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.w = torch.ones(5, 5)
self.b = torch.zeros(5)
def forward(self, x):
return torch.nn.functional.linear(x, self.w, self.b)
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods1 = torch.nn.Sequential(
Linear(),
Linear()
)
self.mods3 = torch.nn.Linear(5, 5)
def forward(self, x):
x = self.mods1(x)
x = torch.add(x, 4)
z = torch.mul(x, 5)
x = self.mods3(z)
return x
model = M().train()
for check in ["module_name", "object_type"]:
qconfig_dict = {"": None,
"object_type": [
(nn.functional.linear, get_default_qat_qconfig("fbgemm")),
(torch.add, get_default_qat_qconfig("fbgemm")),
(nn.Linear, get_default_qat_qconfig("fbgemm")),
],
}
example_inputs = (torch.rand(5, 5),)
prepared = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs)
prepared(*example_inputs)
if check == "module_name":
convert_qconfig_dict = {"": None,
"object_type": [
(nn.functional.linear, get_default_qat_qconfig("fbgemm")),
(torch.add, get_default_qat_qconfig("fbgemm")),
(nn.Linear, get_default_qat_qconfig("fbgemm")),
],
"module_name": [("mods1.0", None)]}
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_function(torch.nn.functional.linear): 1,
ns.call_function(torch.ops.quantized.linear): 1,
ns.call_function(torch.ops.quantized.add): 1,
ns.call_method("dequantize"): 2
}
order_check = [
ns.call_function(torch.nn.functional.linear),
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Linear),
ns.call_method("dequantize"),
]
elif check == "object_type":
convert_qconfig_dict = {"": None,
"object_type": [
(nn.functional.linear, get_default_qat_qconfig("fbgemm")),
(torch.add, get_default_qat_qconfig("fbgemm")),
(nn.Linear, None),
]}
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_function(torch.ops.quantized.linear): 2,
ns.call_function(torch.ops.quantized.add): 1,
ns.call_function(torch.mul): 1,
ns.call_method("dequantize"): 1
}
order_check = [
ns.call_function(torch.quantize_per_tensor),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.linear),
ns.call_function(torch.ops.quantized.add),
ns.call_method("dequantize"),
ns.call_function(torch.mul),
ns.call_module(nn.Linear),
]
converted = convert_fx(prepared, qconfig_mapping=convert_qconfig_dict)
converted(torch.rand(5, 5))
self.checkGraphModuleNodes(
converted,
expected_node_occurrence=node_occurrence,
expected_node_list=order_check)
def _assertFixedQParamsFakeQuantizeEqual(self, fq1, fq2):
self.assertEqual(fq1()._observer_ctr, fq2()._observer_ctr)
def test_register_patterns(self):
def cleanUp():
del _DEFAULT_FUSION_PATTERNS["dummy_fusion"]
del _DEFAULT_QUANTIZATION_PATTERNS["dummy_quant"]
del _DEFAULT_QUANTIZATION_PATTERNS["dummy_quant2"]
del _DEFAULT_QUANTIZATION_PATTERNS["dummy_quant3"]
del _DEFAULT_OUTPUT_OBSERVER_MAP["dummy_quant2"]
del _DEFAULT_OUTPUT_OBSERVER_MAP["dummy_quant3"]
del _DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP["dummy_quant2"]
del _DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP["dummy_quant3"]
self.addCleanup(cleanUp)
@_register_fusion_pattern("dummy_fusion")
class DummyFusion:
pass
@_register_quant_pattern("dummy_quant")
class DummyQuant:
pass
@_register_quant_pattern("dummy_quant2", default_fixed_qparams_range_0to1_observer)
class DummyQuant2:
pass
@_register_quant_pattern("dummy_quant3", default_fixed_qparams_range_neg1to1_observer)
class DummyQuant3:
pass
self.assertEqual(_DEFAULT_FUSION_PATTERNS["dummy_fusion"], DummyFusion)
self.assertEqual(_DEFAULT_QUANTIZATION_PATTERNS["dummy_quant"], DummyQuant)
self.assertEqual(_DEFAULT_QUANTIZATION_PATTERNS["dummy_quant2"], DummyQuant2)
self.assertEqual(_DEFAULT_QUANTIZATION_PATTERNS["dummy_quant3"], DummyQuant3)
self.assertEqual(_DEFAULT_OUTPUT_OBSERVER_MAP["dummy_quant2"], default_fixed_qparams_range_0to1_observer)
self.assertEqual(_DEFAULT_OUTPUT_OBSERVER_MAP["dummy_quant3"], default_fixed_qparams_range_neg1to1_observer)
self._assertFixedQParamsFakeQuantizeEqual(_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP["dummy_quant2"],
default_fixed_qparams_range_0to1_fake_quant)
self._assertFixedQParamsFakeQuantizeEqual(_DEFAULT_OUTPUT_FAKE_QUANTIZE_MAP["dummy_quant3"],
default_fixed_qparams_range_neg1to1_fake_quant)
output_fake_quantize_map = get_default_output_activation_post_process_map(is_training=True)
output_observer_map = get_default_output_activation_post_process_map(is_training=False)
self.assertEqual(output_observer_map.get("dummy_quant3"), default_fixed_qparams_range_neg1to1_observer)
self._assertFixedQParamsFakeQuantizeEqual(output_fake_quantize_map.get("dummy_quant3"),
default_fixed_qparams_range_neg1to1_fake_quant)
def test_reuse_input_qconfig(self):
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
x = self.conv(x)
x = x.reshape()
return x
class M2(torch.nn.Module):
def forward(self, x):
x = x.reshape()
return x
options = itertools.product([M1, M2], [True, False])
for M, is_qat in options:
m = M1().eval()
example_inputs = (torch.randn(1, 3, 3, 3),)
m = prepare_fx(m, get_default_qconfig_mapping(), example_inputs=example_inputs)
m = convert_fx(m)
node_list = [
ns.call_function(torch.quantize_per_tensor),
ns.call_module(nnq.Conv2d),
ns.call_method("reshape"),
ns.call_method("dequantize"),
]
self.checkGraphModuleNodes(
m,
expected_node_list=node_list)
m = M2().eval()
m = prepare_fx(m, get_default_qconfig_mapping(), example_inputs=example_inputs)
m = convert_fx(m)
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 0,
ns.call_method("dequnatize"): 0,
}
node_list = [
ns.call_method("reshape"),
]
self.checkGraphModuleNodes(
m,
expected_node_occurrence=node_occurrence,
expected_node_list=node_list)
def test_stack_trace_preserved_linear(self):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
x = self.linear(x)
return x
m = M().eval()
mp = prepare_fx(m, get_default_qconfig_mapping(), example_inputs=(torch.randn(1, 1),))
found_stack_trace = False
for n in mp.graph.nodes:
if n.op == 'call_module' and n.target == 'linear':
found_stack_trace = n.stack_trace is not None
break
self.assertTrue(found_stack_trace)
# test reference model
mq = convert_to_reference_fx(copy.deepcopy(mp))
found_stack_trace = False
for n in mq.graph.nodes:
if n.op == 'call_module' and n.target == 'linear':
found_stack_trace = n.stack_trace is not None
break
self.assertTrue(found_stack_trace, f"stack trace not found, node: {n.format_node()}, is_reference: True")
# test quantized model
mq = convert_fx(mp)
found_stack_trace = False
for n in mq.graph.nodes:
if n.op == 'call_module' and n.target == 'linear':
found_stack_trace = n.stack_trace is not None
break
self.assertTrue(found_stack_trace, f"stack trace not found, node: {n.format_node()}, is_reference: False")
def test_qat_skip_untraced(self):
class UnTraceableModuleClass(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2)
def forward(self, x):
return self.linear(x)
class UnTraceableModuleName(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2)
def forward(self, x):
return self.linear(x)
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.untraceable_module_class = UnTraceableModuleClass()
self.untraceable_module_name = UnTraceableModuleClass()
def forward(self, x):
x = self.untraceable_module_class(x)
x = self.untraceable_module_name(x)
return x
mod = M()
qconfig_dict = {"": torch.ao.quantization.get_default_qat_qconfig()}
prepare_custom_config_dict = {
"non_traceable_module_class": [UnTraceableModuleClass],
"non_traceable_module_name": ["untraceable_module_name"],
}
example_inputs = (torch.randn(2, 2),)
mod_prep = torch.ao.quantization.quantize_fx.prepare_qat_fx(
mod.train(), qconfig_dict, example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict
)
mod_prep = torch.ao.quantization.quantize_fx.prepare_qat_fx(
mod.train(), qconfig_dict, example_inputs=example_inputs,
prepare_custom_config=prepare_custom_config_dict
)
self.assertTrue(
isinstance(mod_prep.untraceable_module_class.linear, torch.nn.Linear)
)
self.assertTrue(
isinstance(mod_prep.untraceable_module_name.linear, torch.nn.Linear)
)
self.assertTrue(
type(mod_prep.untraceable_module_class.linear)
is not torch.ao.nn.qat.modules.linear.Linear,
"prepare_qat_fx should not convert anything inside untraced module classes",
)
self.assertTrue(
type(mod_prep.untraceable_module_name.linear)
is not torch.ao.nn.qat.modules.linear.Linear,
"prepare_qat_fx should not convert anything inside modules named in untraced_module_names",
)
def test_qconfig_dict_setup(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.Conv1d = torch.nn.Conv1d(1, 1, 1)
self.Conv2d = torch.nn.Conv2d(1, 1, 1)
self.Conv3d = torch.nn.Conv3d(1, 1, 1)
self.ConvTranspose1d = torch.nn.ConvTranspose1d(1, 1, 1)
self.ConvTranspose2d = torch.nn.ConvTranspose2d(1, 1, 1)
self.ConvTranspose3d = torch.nn.ConvTranspose3d(1, 1, 1)
self.Linear = torch.nn.Linear(1, 1, 1)
def forward(self, x):
x = self.Conv1d(x)
x = self.Conv2d(x)
x = self.Conv3d(x)
x = self.ConvTranspose1d(x)
x = self.ConvTranspose2d(x)
x = self.ConvTranspose3d(x)
x = self.Linear(x)
x = torch.nn.functional.conv1d(x, torch.rand(2, 2))
x = torch.nn.functional.conv2d(x, torch.rand(2, 2))
x = torch.nn.functional.conv3d(x, torch.rand(2, 2))
x = torch.nn.functional.linear(x, torch.rand(2, 2))
return x
backends = ["qnnpack", "fbgemm"]
for func in [get_default_qconfig_mapping, get_default_qat_qconfig_mapping]:
for backend in backends:
m = M().eval()
qconfig_dict = func(backend)
m = prepare_fx(m, qconfig_dict, example_inputs=(torch.randn(1, 1, 1, 1)))
for mod in m.modules():
if _is_activation_post_process(mod) and mod.dtype == torch.quint8:
if backend == "fbgemm":
lower_bnd = 0
upper_bnd = 127
else:
lower_bnd = 0
upper_bnd = 255
if issubclass(type(mod), FakeQuantize):
self.assertEqual(mod.activation_post_process.quant_min, lower_bnd)
self.assertEqual(mod.activation_post_process.quant_max, upper_bnd)
else:
self.assertEqual(mod.quant_min, lower_bnd)
self.assertEqual(mod.quant_max, upper_bnd)
def test_prepare_mode(self):
class LinearModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
def _test(prepare_fn, qconfig_dict):
m = LinearModel()
m1 = copy.deepcopy(m)
m1.train()
example_inputs = (torch.randn(1, 5),)
prepare_fn(m1, qconfig_dict, example_inputs=example_inputs)
m2 = copy.deepcopy(m)
m2.eval()
prepare_fn(m2, qconfig_dict, example_inputs=example_inputs)
# Ensure prepare_fx and prepare_qat_fx work in both training and eval modes
_test(prepare_fx, get_default_qconfig_mapping())
_test(prepare_qat_fx, get_default_qat_qconfig_mapping())
def _validate_qconfig_against_backend_config_constraints(
self,
model: torch.nn.Module,
qconfig: QConfig,
backend_config: BackendConfig,
satisfies_constraints: bool,
qconfig_name: Optional[str] = None):
"""
Helper method to validate whether `qconfig` satisfies the constraints specified in `backend_config`.
"""
qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
example_inputs = (torch.rand((1, 30), dtype=torch.float),)
model = prepare_fx(model, qconfig_mapping, example_inputs, backend_config=backend_config)
model(*example_inputs)
model = convert_fx(model, backend_config=backend_config)
if satisfies_constraints:
expected_node_occurrence = {
ns.call_module(torch.ao.nn.quantized.Linear) : 1,
ns.call_module(torch.nn.Linear) : 0,
}
else:
expected_node_occurrence = {
ns.call_module(torch.ao.nn.quantized.Linear) : 0,
ns.call_module(torch.nn.Linear) : 1,
}
try:
self.checkGraphModuleNodes(model, expected_node_occurrence=expected_node_occurrence)
except AssertionError as e:
if qconfig_name is not None:
print(f"ERROR: Validation for QConfig '{qconfig_name}' failed")
raise e
def test_backend_config_quantization_range(self):
"""
Check that quantization ranges specified through the BackendConfig are reflected in
the observers inserted into the model.
"""
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(30, 4).float()
def forward(self, x):
return self.linear(x)
dtype_config = DTypeConfig(
input_dtype=DTypeWithConstraints(
dtype=torch.quint8,
quant_min_lower_bound=0,
quant_max_upper_bound=31,
),
output_dtype=DTypeWithConstraints(
dtype=torch.quint8,
quant_min_lower_bound=0,
quant_max_upper_bound=31,
),
weight_dtype=DTypeWithConstraints(
dtype=torch.qint8,
quant_min_lower_bound=-64,
quant_max_upper_bound=63,
),
bias_dtype=torch.float,
)
backend_config = BackendConfig() \
.set_backend_pattern_config(BackendPatternConfig(torch.nn.Linear)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E128
.add_dtype_config(dtype_config)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear))
def validate_qconfig(qconfig: QConfig, satisfies_constraints: bool):
self._validate_qconfig_against_backend_config_constraints(
MyModel(), qconfig, backend_config, satisfies_constraints)
# Case 1: QConfig ranges fit within backend ranges, OK
qconfig1 = QConfig(
activation=MinMaxObserver.with_args(quant_min=0, quant_max=15, dtype=torch.quint8),
weight=MinMaxObserver.with_args(quant_min=-32, quant_max=31, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric))
validate_qconfig(qconfig1, satisfies_constraints=True)
# Case 2: QConfig activation range falls outside backend range, should fail
qconfig2 = QConfig(
activation=MinMaxObserver.with_args(quant_min=0, quant_max=63, dtype=torch.quint8),
weight=MinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric))
validate_qconfig(qconfig2, satisfies_constraints=False)
# Case 3: QConfig weight range falls outside backend range, should fail
qconfig3 = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.quint8),
weight=MinMaxObserver.with_args(quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric))
validate_qconfig(qconfig3, satisfies_constraints=False)
# Case 4: QConfig doesn't specify range, should fail
qconfig4 = QConfig(activation=ReuseInputObserver, weight=ReuseInputObserver)
validate_qconfig(qconfig4, satisfies_constraints=False)
def test_backend_config_scale_min(self):
"""
Test QConfig eps validation against the BackendConfig's min scale value.
"""
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(30, 4).float()
def forward(self, x):
return self.linear(x)
dtype_config = DTypeConfig(
input_dtype=DTypeWithConstraints(dtype=torch.quint8, scale_min_lower_bound=2 ** -12),
output_dtype=DTypeWithConstraints(dtype=torch.quint8, scale_min_lower_bound=2 ** -12),
weight_dtype=DTypeWithConstraints(dtype=torch.qint8, scale_min_lower_bound=2 ** -12),
bias_dtype=torch.float,
)
backend_config = BackendConfig() \
.set_backend_pattern_config(BackendPatternConfig(torch.nn.Linear)
.set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) # noqa: E128
.add_dtype_config(dtype_config)
.set_root_module(torch.nn.Linear)
.set_reference_quantized_module(nnqr.Linear))
def validate_qconfig(qconfig: QConfig, satisfies_constraints: bool):
self._validate_qconfig_against_backend_config_constraints(
MyModel(), qconfig, backend_config, satisfies_constraints)
# Case 1: QConfig min scale value == backend min scale value, OK
qconfig1 = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.quint8, eps=2 ** -12),
weight=MinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, eps=2 ** -12))
validate_qconfig(qconfig1, satisfies_constraints=True)
# Case 2: QConfig min scale value > backend min scale value, OK
qconfig2 = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.quint8, eps=2 ** -10),
weight=MinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, eps=2 ** -10))
validate_qconfig(qconfig2, satisfies_constraints=True)
# Case 3: QConfig activation min scale value < backend min scale value, should fail
qconfig3 = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.quint8, eps=2 ** -14),
weight=MinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric))
validate_qconfig(qconfig3, satisfies_constraints=False)
# Case 3: QConfig weight min scale value < backend min scale value, should fail
qconfig4 = QConfig(
activation=MinMaxObserver.with_args(dtype=torch.quint8),
weight=MinMaxObserver.with_args(dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, eps=2 ** -14))
validate_qconfig(qconfig4, satisfies_constraints=False)
# Case 5: QConfig doesn't specify eps, should fail
qconfig5 = QConfig(
activation=FixedQParamsObserver.with_args(scale=1.0, zero_point=0),
weight=FixedQParamsObserver.with_args(scale=1.0, zero_point=0))
validate_qconfig(qconfig5, satisfies_constraints=False)
def test_qnnpack_backend_config(self):
"""
Test whether default QNNPACK QConfigs are compatible with the QNNPACK BackendConfig.
"""
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(30, 4).float()
def forward(self, x):
return self.linear(x)
all_qconfigs: list[tuple[QConfig, str]] = [
(get_default_qconfig("qnnpack", version=0), "default_qnnpack_qconfig_v0"),
(get_default_qat_qconfig("qnnpack", version=0), "default_qat_qnnpack_qconfig_v0"),
(get_default_qat_qconfig("qnnpack", version=1), "default_qat_qnnpack_qconfig_v1"),
(default_symmetric_qnnpack_qconfig, "default_symmetric_qnnpack_qconfig"),
(default_symmetric_qnnpack_qat_qconfig, "default_symmetric_qnnpack_qat_qconfig"),
# TODO: Test these QConfigs once they are fixed, see https://github.com/pytorch/pytorch/issues/85862
# (default_per_channel_symmetric_qnnpack_qconfig, "default_per_channel_symmetric_qnnpack_qconfig"),
# (default_per_channel_symmetric_qnnpack_qat_qconfig, "default_per_channel_symmetric_qnnpack_qat_qconfig"),
]
backend_config = get_qnnpack_backend_config()
for qconfig, qconfig_name in all_qconfigs:
self._validate_qconfig_against_backend_config_constraints(
MyModel(), qconfig, backend_config, satisfies_constraints=True, qconfig_name=qconfig_name)
def test_symmetric_qnnpack_qconfig_mapping(self):
"""
Test whether `torch.ao.quantization.qconfig_mapping._get_symmetric_qnnpack_qconfig_mapping`
works with the QNNPACK BackendConfig.
"""
if "qnnpack" not in supported_qengines:
return
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(30, 4).float()
def forward(self, x):
return self.linear(x)
with override_quantized_engine("qnnpack"):
qconfig_mapping = _get_symmetric_qnnpack_qconfig_mapping()
example_inputs = (torch.rand((1, 30), dtype=torch.float),)
backend_config = get_qnnpack_backend_config()
model = MyModel()
model = prepare_fx(model, qconfig_mapping, example_inputs, backend_config=backend_config)
model(*example_inputs)
model = convert_fx(model, backend_config=backend_config)
expected_node_occurrence = {
ns.call_module(torch.ao.nn.quantized.Linear) : 1,
ns.call_module(torch.nn.Linear) : 0,
}
self.checkGraphModuleNodes(model, expected_node_occurrence=expected_node_occurrence)
model(*example_inputs)
def test_symmetric_qnnpack_qat_qconfig_mapping(self):
"""
Test whether `torch.ao.quantization.qconfig_mapping._get_symmetric_qnnpack_qat_qconfig_mapping`
works with the QNNPACK BackendConfig.
"""
if "qnnpack" not in supported_qengines:
return
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(30, 4).float()
def forward(self, x):
return self.linear(x)
with override_quantized_engine("qnnpack"):
qconfig_mapping = _get_symmetric_qnnpack_qat_qconfig_mapping()
example_inputs = (torch.rand((1, 30), dtype=torch.float),)
backend_config = get_qnnpack_backend_config()
model = MyModel()
model = prepare_fx(model, qconfig_mapping, example_inputs, backend_config=backend_config)
model(*example_inputs)
model = convert_fx(model, backend_config=backend_config)
expected_node_occurrence = {
ns.call_module(torch.ao.nn.quantized.Linear) : 1,
ns.call_module(torch.nn.Linear) : 0,
}
self.checkGraphModuleNodes(model, expected_node_occurrence=expected_node_occurrence)
model(*example_inputs)
def test_get_executorch_backend_config(self):
from torch.ao.quantization.backend_config import get_executorch_backend_config
# make sure this runs
executorch_backend_config = get_executorch_backend_config()
def test_backend_config_check_for_weight_and_bias(self):
""" Test to make sure the backend_config check for weight and bias
runs when the qconfig is None for the ops with weight and bias
previously the error was not hit because we first check input, and
the check for weight and bias are skipped.
"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.tensor((5, 5))
self.bias = torch.tensor((5,))
def forward(self, x):
return torch.addmm(self.bias, x, self.weight)
m = M().eval()
qconfig_mapping = QConfigMapping()
observation_type = ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT
weighted_op_quint8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
weight_dtype=torch.qint8,
bias_dtype=torch.float,
)
dtype_configs = [weighted_op_quint8_dtype_config]
backend_pattern_config = BackendPatternConfig(torch.addmm) \
.set_observation_type(observation_type) \
.set_dtype_configs(dtype_configs) \
._set_input_type_to_index({"weight": 2, "bias": 0})
backend_config = BackendConfig() \
.set_backend_pattern_config(backend_pattern_config)
example_inputs = (torch.rand(1, 5),)
# make sure this runs
m = prepare_fx(m, qconfig_mapping, example_inputs, backend_config=backend_config)
def test_get_default_qconfig_valid_backend(self):
""" Checks that AssertionError is raised when non expected backend input is specified
"""
invalid_backends = ["imaginary_backend", 3]
for invalid_backend in invalid_backends:
with self.assertRaisesRegex(AssertionError, "not supported"):
qconfig = get_default_qconfig(invalid_backend)
with self.assertRaisesRegex(AssertionError, "not supported"):
qconfig = get_default_qat_qconfig(invalid_backend)
with self.assertRaisesRegex(AssertionError, "not supported"):
qconfig_mapping = get_default_qconfig_mapping(invalid_backend)
with self.assertRaisesRegex(AssertionError, "not supported"):
qconfig_mapping = get_default_qat_qconfig_mapping(invalid_backend)
def test__convert_to_reference_decomposed_fx(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
m = M().eval()
qconfig_mapping = get_default_qconfig_mapping("fbgemm")
example_inputs = (torch.randn(1, 5),)
m = prepare_fx(m, qconfig_mapping, example_inputs)
m_ref = copy.deepcopy(m)
m_ref = convert_to_reference_fx(m_ref)
m = _convert_to_reference_decomposed_fx(m)
expected_occurrence = {
ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 2,
ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 2,
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence)
# make sure it runs
res_ref = m_ref(*example_inputs)
res = m(*example_inputs)
self.assertEqual(res, res_ref)
@skipIfNoQNNPACK
def test__convert_to_reference_decomposed_fx_dynamic_quant(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
# to avoid reduce_range
with override_quantized_engine("qnnpack"):
m = M().eval()
qconfig_mapping = get_default_qconfig_mapping("fbgemm") \
.set_object_type(torch.nn.Linear, default_dynamic_qconfig)
example_inputs = (torch.randn(1, 5),)
m = prepare_fx(m, qconfig_mapping, example_inputs)
m(*example_inputs)
m_ref = copy.deepcopy(m)
m_ref = convert_to_reference_fx(m_ref)
m = _convert_to_reference_decomposed_fx(m)
expected_occurrence = {
ns.call_function(torch.ops.quantized_decomposed.choose_qparams.tensor): 1,
ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.tensor): 1,
ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.tensor): 1,
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence)
# make sure it runs
res_ref = m_ref(*example_inputs)
res = m(*example_inputs)
self.assertEqual(res, res_ref)
def test__convert_to_reference_decomposed_fx_per_channel_quant(self):
class M(torch.nn.Module):
def forward(self, x, weight, bias):
return F.linear(x, weight, bias)
m = M().eval()
qconfig_mapping = get_default_qconfig_mapping("fbgemm") \
.set_object_type(F.linear, default_per_channel_qconfig)
example_inputs = (torch.randn(1, 5), torch.randn(10, 5), torch.randn(10,))
m = prepare_fx(m, qconfig_mapping, example_inputs)
m(*example_inputs)
m_ref = copy.deepcopy(m)
m_ref = convert_to_reference_fx(m_ref)
m = _convert_to_reference_decomposed_fx(m)
expected_occurrence = {
# for input and output activations
ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor.default): 2,
ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor.default): 2,
# for weight
ns.call_function(torch.ops.quantized_decomposed.quantize_per_channel.default): 1,
ns.call_function(torch.ops.quantized_decomposed.dequantize_per_channel.default): 1,
}
self.checkGraphModuleNodes(
m,
expected_node_occurrence=expected_occurrence)
# make sure it runs
res_ref = m_ref(*example_inputs)
res = m(*example_inputs)
self.assertEqual(res, res_ref)
def test_change_backend_config_for_fixed_qparam_ops(self):
""" Making sure we can skip validation of qconfigs for fixedqparam ops based
on BackendConfig
"""
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.tanh = torch.nn.Tanh()
def forward(self, x: torch.Tensor):
x = self.tanh(x)
return x
model = M().eval()
# we set a global default_qconfig, which will be ignored since the backend
# we defined doesn't support anything
# this is to make sure we don't validate the qconfig when BackendConfig does not
# have fixed qparam op related configurations
qconfig_mapping = QConfigMapping().set_global(default_qconfig)
backend_config = BackendConfig()
# make sure this runs
model = prepare_fx(
model,
qconfig_mapping=qconfig_mapping,
example_inputs=(torch.randn(1, 2, 3, 4),),
backend_config=backend_config
)
def test_channel_shuffle_lowering(self):
# Three versions of channel shuffle
class M1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.op = torch.nn.ChannelShuffle(2)
def forward(self, x):
return self.op(x + x) + x
class M2(torch.nn.Module):
def forward(self, x):
return torch.channel_shuffle(x + x, 2) + x
class M3(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.channel_shuffle(x + x, 2) + x
x = torch.randn(4, 4, 4, 4)
# torch.channel_shuffle is equivalent to torch.nn.functional.channel_shuffle
model_node_pairs = [
(M1().eval(), ns.call_module(torch.nn.ChannelShuffle)),
(M2().eval(), ns.call_function(torch.channel_shuffle)),
(M3().eval(), ns.call_function(torch.channel_shuffle))
]
for m, node in model_node_pairs:
m = prepare_fx(m, {"": default_qconfig}, example_inputs=(x,))
m_copy = copy.deepcopy(m)
m = convert_fx(m)
m_ref = convert_to_reference_fx(m_copy)
node_occurrence = {
node: 1,
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
node_occurrence_ref = {
node: 1,
ns.call_function(torch.quantize_per_tensor): 4,
ns.call_method("dequantize"): 4
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
self.checkGraphModuleNodes(m_ref, expected_node_occurrence=node_occurrence_ref)
def test_match_pattern_with_multiple_args(self):
""" Test that we can match a pattern that has multiple arguments
Pattern:
shape \
transpose (observed) -> reshape -> output (observed) ->
where `reshape` has two arguments
"""
def _get_pattern_configs():
backend_pattern_configs = []
observation_type = ObservationType.OUTPUT_SHARE_OBSERVER_WITH_INPUT
weighted_op_quint8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
weight_dtype=torch.qint8,
bias_dtype=torch.float,
)
dtype_configs = [weighted_op_quint8_dtype_config]
def root_node_getter(node_pattern):
reshape, transpose, shape = node_pattern
return transpose
backend_pattern_configs.append(
BackendPatternConfig()
._set_pattern_complex_format((torch.reshape, torch.transpose, MatchAllNode)) # noqa: E131
.set_observation_type(observation_type)
.set_dtype_configs(dtype_configs)
._set_root_node_getter(root_node_getter)
)
return backend_pattern_configs
backend_config = BackendConfig().set_backend_pattern_configs(_get_pattern_configs())
class M(torch.nn.Module):
def forward(self, x):
x = torch.transpose(x, 0, 1)
x = torch.reshape(x, (-1,))
return x
m = M().eval()
qconfig_mapping = QConfigMapping().set_global(default_qconfig)
example_inputs = (torch.randn(1, 3, 3, 3),)
m = prepare_fx(m, qconfig_mapping, example_inputs, backend_config=backend_config)
node_occurrence = {
# one for input of the pattern and one for output of the pattern
ns.call_module(MinMaxObserver): 2
}
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
def _test_linear_activation_fusion_lowering_helper(
self, module, example_inputs, qconfig_mapping,
backend_config, fused_module, root_module, activation_module):
node_occurrence = {
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1,
ns.call_module(fused_module): 1,
ns.call_module(root_module): 0,
ns.call_module(activation_module): 0,
}
node_occurrence_ref = {
ns.call_function(torch.quantize_per_tensor): 2,
ns.call_method("dequantize"): 2,
}
m = module.eval()
m = prepare_fx(m, qconfig_mapping,
example_inputs=example_inputs,
backend_config=backend_config)
m_copy = copy.deepcopy(m)
m = convert_fx(m, backend_config=backend_config)
m_ref = convert_to_reference_fx(m_copy)
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
self.checkGraphModuleNodes(m_ref, expected_node_occurrence=node_occurrence_ref)
m(*example_inputs)
@skipIfNoONEDNN
def test_linear_leaky_relu_lowering(self):
""" Test fusion and lowering of Linear - (bn -) LeakyReLU
by FX. For onednn backend only.
"""
from torch.ao.quantization.backend_config import get_onednn_backend_config
qconfig_mapping = get_default_qconfig_mapping('onednn')
with override_quantized_engine('onednn'):
for with_bn in [True, False]:
m = LinearBnLeakyReluModel(with_bn)
self._test_linear_activation_fusion_lowering_helper(
m,
m.get_example_inputs(),
qconfig_mapping,
get_onednn_backend_config(),
nniq.LinearLeakyReLU,
nn.Linear,
nn.LeakyReLU)
@skipIfNoONEDNN
def test_linear_tanh_lowering(self):
""" Test fusion and lowering of Linear - Tanh
by FX. For onednn backend only.
"""
from torch.ao.quantization.backend_config import get_onednn_backend_config
qconfig_mapping = get_default_qconfig_mapping('onednn')
# TODO Currently it's required that separate ops in a fused op/module have the same qconfig.
# Need to be able to support fusion of ops with different qconfigs
# Since tanh must have 'fixed_qparams_qconfig' while linear should use
# the global qconfig, we need to set qconfigs for them manually here for
# fusion and cannot put such configs in onednn's default qconfig_mapping.
# Known issue:
# Cannot fuse linear - tanh and quantize standalone tanh at the same time.
qconfig = get_default_qconfig('onednn')
qconfig_mapping.set_object_type(torch.nn.Linear, qconfig)
qconfig_mapping.set_object_type(torch.nn.Tanh, qconfig)
with override_quantized_engine('onednn'):
m = LinearTanhModel()
self._test_linear_activation_fusion_lowering_helper(
m,
m.get_example_inputs(),
qconfig_mapping,
get_onednn_backend_config(),
nniq.LinearTanh,
nn.Linear,
nn.Tanh)
@override_qengines
def test_linear_size_view(self):
class M(torch.nn.Module):
def __init__(self, use_relu=False):
super().__init__()
self.linear = torch.nn.Linear(16, 32)
self.relu = torch.nn.ReLU()
self.use_relu = use_relu
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
return x.view(x.size(0), 1, 4, 8)
for use_relu in [False, True]:
model_fp32 = M(use_relu).eval()
qengine = torch.backends.quantized.engine
qconfig_mapping = get_default_qconfig_mapping(qengine)
x = torch.randn((5, 16))
model_fp32(x)
prepared_model = prepare_fx(model_fp32, qconfig_mapping, x)
prepared_model(x)
quantized_model = convert_fx(prepared_model)
node_occurrence = {
ns.call_module(nnq.Linear): 0 if use_relu else 1,
ns.call_module(nniq.LinearReLU): 1 if use_relu else 0,
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(quantized_model, expected_node_occurrence=node_occurrence)
@override_qengines
def test_linear_shape_view(self):
class M(torch.nn.Module):
def __init__(self, use_relu=False):
super().__init__()
self.linear = torch.nn.Linear(16, 32)
self.relu = torch.nn.ReLU()
self.use_relu = use_relu
def forward(self, x):
x = self.linear(x)
if self.use_relu:
x = self.relu(x)
return x.view(x.shape[0], 1, 4, 8)
for use_relu in [False, True]:
model_fp32 = M(use_relu).eval()
qengine = torch.backends.quantized.engine
qconfig_mapping = get_default_qconfig_mapping(qengine)
x = torch.randn((5, 16))
model_fp32(x)
prepared_model = prepare_fx(model_fp32, qconfig_mapping, x)
prepared_model(x)
quantized_model = convert_fx(prepared_model)
node_occurrence = {
ns.call_module(nnq.Linear): 0 if use_relu else 1,
ns.call_module(nniq.LinearReLU): 1 if use_relu else 0,
ns.call_function(torch.quantize_per_tensor): 1,
ns.call_method("dequantize"): 1
}
self.checkGraphModuleNodes(quantized_model, expected_node_occurrence=node_occurrence)
def test_mixed_dtypes(self):
"""
Test that multiple dtypes can be used in the same model for different layers,
and the dtypes will be converted correctly between the layers.
"""
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear1 = torch.nn.Linear(5, 5)
self.linear2 = torch.nn.Linear(5, 5)
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.float_functional = torch.ao.nn.quantized.FloatFunctional()
def forward(self, x: torch.Tensor):
x = self.linear1(x) # qint32
x = self.linear2(x) # quint8
linear2 = x
x = self.sigmoid(x) # back to qint32
x = self.tanh(x) # back to quint8
x = self.float_functional.add(linear2, x) # adding two quint8's together
return x
def make_qconfig(scale, zp, dtype):
return QConfig(
activation=FixedQParamsObserver.with_args(scale=scale, zero_point=zp, dtype=dtype),
weight=torch.ao.quantization.default_weight_observer)
# Set up a QConfigMapping that specifies different qparams and dtypes for different layers
qconfig_mapping = QConfigMapping() \
.set_global(get_default_qconfig("qnnpack")) \
.set_module_name("linear1", make_qconfig(1234, 11, torch.qint32)) \
.set_module_name("linear2", make_qconfig(2345, 22, torch.quint8)) \
.set_object_type(torch.nn.Sigmoid, make_qconfig(3456, 33, torch.qint32)) \
.set_object_type(torch.nn.Tanh, make_qconfig(4567, 44, torch.quint8))
# Set up BackendConfig that supports the dtypes configured in the above QConfigMapping
weighted_op_qint32_dtype_config = DTypeConfig(
input_dtype=torch.qint32,
output_dtype=torch.qint32,
weight_dtype=torch.qint8,
bias_dtype=torch.float,
)
fixed_qparams_op_quint8_dtype_config = DTypeConfig(
input_dtype=torch.quint8,
output_dtype=torch.quint8,
)
fixed_qparams_op_qint32_dtype_config = DTypeConfig(
input_dtype=torch.qint32,
output_dtype=torch.qint32,
)
backend_config = get_qnnpack_backend_config()
for config in backend_config.configs:
if config.pattern == torch.nn.Linear:
config.add_dtype_config(weighted_op_qint32_dtype_config)
elif config.pattern in [torch.nn.Sigmoid, torch.nn.Tanh]:
config.add_dtype_config(fixed_qparams_op_quint8_dtype_config)
config.add_dtype_config(fixed_qparams_op_qint32_dtype_config)
# Produce the reference quantized model
m = MyModule()
example_inputs = (torch.rand(5, 5),)
prepared = prepare_fx(m, qconfig_mapping, example_inputs, backend_config=backend_config)
prepared(*example_inputs) # calibrate
converted = convert_to_reference_fx(prepared, backend_config=backend_config)
converted(*example_inputs)
# Verify that the reference model is correct
#
# Reference model until add should be:
# fp32_input -> q_to_int32 -> [dq -> linear1_fp32 -> q_to_int32] -> dq ->
# q_to_uint8 -> [dq -> linear2_fp32 -> q_to_uint8] -> dq (linear2_dq) ->
# q_to_int32 -> [dq -> sigmoid_fp32 -> q_to_int32] -> dq ->
# q_to_uint8 -> [dq -> tanh_fp32 -> q_to_uint8] -> dq (tanh_dq)
#
# Complete reference model with add should be:
# [(linear2_dq, tanh_dq) -> add_fp32 -> q_to_uint8] -> dq -> fp32_output
target_to_expected_dtypes = {
"linear1": torch.qint32,
"linear2": torch.quint8,
"sigmoid": torch.qint32,
"tanh": torch.quint8,
torch.add: torch.quint8,
}
# Find the patterns [dq - op_fp32 - q_to_specific_dtype] in the graph
linear2_node = tanh_node = None
for node in converted.graph.nodes:
if node.target not in target_to_expected_dtypes:
continue
# Match preceding dequantize
self.assertTrue(len(node.args) == 1 or len(node.args) == 2)
self.assertTrue(all(arg.target == "dequantize" for arg in node.args))
# Match following quantize with the specific dtypes
self.assertEqual(len(node.users), 1)
user = next(iter(node.users.keys()))
self.assertEqual(user.target, torch.quantize_per_tensor)
self.assertEqual(user.args[-1], target_to_expected_dtypes[node.target])
# Match [dq - torch.add(linear2_dq, tanh_dq) - q]
if node.target == "linear2":
linear2_node = node
elif node.target == "tanh":
tanh_node = node
elif node.target == torch.add:
linear2_dq, tanh_dq = node.args
self.assertEqual(tanh_dq.args[0].args[0], tanh_node)
self.assertEqual(linear2_dq.args[0].args[0], linear2_node)
def test_lowering_functional_conv_with_kwargs(self):
dim_to_op = {
1: F.conv1d,
2: F.conv2d,
3: F.conv3d,
}
dim_to_qop = {
1: torch.ops.quantized.conv1d,
2: torch.ops.quantized.conv2d,
3: torch.ops.quantized.conv3d,
}
class Mod(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dimension):
super().__init__()
self.dim = dimension
self.op = dim_to_op[dimension]
kernel_sizes = [kernel_size] * self.dim
self.weight = nn.Parameter(torch.randn(out_channels, in_channels, *kernel_sizes))
def forward(self, input):
return self.op(input, self.weight, bias=None, stride=[1] * self.dim,
padding=[0] * self.dim, dilation=[1] * self.dim, groups=1)
for dimension in [1, 2, 3]:
model = Mod(3, 16, 3, dimension)
model.eval()
qconfig_mapping = get_default_qconfig_mapping()
input_shape = (1, 3, *([8] * dimension))
example_inputs = torch.randn(input_shape)
prepared_model = prepare_fx(model, qconfig_mapping, example_inputs)
prepared_model(example_inputs)
quantized_model = convert_fx(prepared_model)
# This should pass
quantized_model(example_inputs)
# Ensure the quantized model has the expected op
node_occurrence = {
ns.call_function(dim_to_qop[dimension]): 1,
}
self.checkGraphModuleNodes(quantized_model, expected_node_occurrence=node_occurrence)
def test_lowering_functional_conv_transpose_with_kwargs(self):
dim_to_op = {
1: F.conv_transpose1d,
2: F.conv_transpose2d,
3: F.conv_transpose3d,
}
dim_to_qop = {
1: torch.ops.quantized.conv_transpose1d,
2: torch.ops.quantized.conv_transpose2d,
3: torch.ops.quantized.conv_transpose3d,
}
class Mod(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dimension):
super().__init__()
self.dim = dimension
self.op = dim_to_op[dimension]
kernel_sizes = [kernel_size] * self.dim
self.weight = nn.Parameter(torch.randn(in_channels, out_channels, *kernel_sizes))
def forward(self, input):
return self.op(input, self.weight, bias=None, stride=[1] * self.dim,
padding=[0] * self.dim, output_padding=[0] * self.dim,
dilation=[1] * self.dim, groups=1)
for dimension in [1, 2, 3]:
model = Mod(3, 16, 3, dimension)
model.eval()
qconfig_mapping = get_default_qconfig_mapping()
input_shape = (1, 3, *([8] * dimension))
example_inputs = torch.randn(input_shape)
prepared_model = prepare_fx(model, qconfig_mapping, example_inputs)
prepared_model(example_inputs)
quantized_model = convert_fx(prepared_model)
# This should pass
quantized_model(example_inputs)
# Ensure the quantized model has the expected op
node_occurrence = {
ns.call_function(dim_to_qop[dimension]): 1,
}
self.checkGraphModuleNodes(quantized_model, expected_node_occurrence=node_occurrence)
def test_lowering_functional_linear_with_kwargs(self):
class Mod(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_channels, in_channels))
def forward(self, input):
return F.linear(input, self.weight, bias=None)
model = Mod(8, 4)
model.eval()
qconfig_mapping = get_default_qconfig_mapping()
example_inputs = torch.randn(1, 8)
prepared_model = prepare_fx(model, qconfig_mapping, example_inputs)
prepared_model(example_inputs)
quantized_model = convert_fx(prepared_model)
# This should pass
quantized_model(example_inputs)
# Ensure the quantized model has the expected op
node_occurrence = {
ns.call_function(torch.ops.quantized.linear): 1,
}
self.checkGraphModuleNodes(quantized_model, expected_node_occurrence=node_occurrence)
@skipIfNoFBGEMM
def test_keep_original_weights(self):
class SubModule(nn.Module):
"""
A simple submodule containing a linear layer.
"""
def __init__(self, input_dim, output_dim):
super().__init__()
self.w = nn.Parameter(torch.randn(input_dim, output_dim))
self.b = nn.Parameter(torch.randn(input_dim))
def forward(self, x):
return F.linear(x, self.w, self.b)
class MainModule(nn.Module):
"""
The main module containing the submodule.
"""
def __init__(self, input_dim, hidden_dim, output_dim):
super().__init__()
self.submodule_1 = SubModule(hidden_dim, input_dim)
setattr(self, 'submodule|2', SubModule(hidden_dim, hidden_dim))
setattr(self, 'submodule/3', SubModule(hidden_dim, hidden_dim))
setattr(self, 'submodule:4', SubModule(hidden_dim, hidden_dim))
setattr(self, 'submodule: 5', SubModule(hidden_dim, hidden_dim))
self._w = nn.Parameter(torch.randn(output_dim, hidden_dim))
def forward(self, x):
x1 = self.submodule_1(x)
x2 = getattr(self, 'submodule|2')(x1)
x3 = getattr(self, 'submodule/3')(x2)
x4 = getattr(self, 'submodule:4')(x3)
x5 = getattr(self, 'submodule: 5')(x4)
x6 = F.linear(x5, self._w)
return x6
input_dim = 10
hidden_dim = 20
output_dim = 5
model = MainModule(input_dim, hidden_dim, output_dim)
model.eval()
example_inputs = torch.randn(1, input_dim)
_ = model(*example_inputs)
qconfig_mapping = QConfigMapping().set_object_type(nn.functional.linear, float16_dynamic_qconfig)
prepared_model = prepare_fx(model, qconfig_mapping, example_inputs)
prepared_model(example_inputs)
quantized_model = convert_fx(prepared_model, keep_original_weights=True)
self.assertTrue(len(quantized_model.original_weights_lookup) == 6)
self.assertTrue("submodule_1_packed_weight_0" in quantized_model.original_weights_lookup)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_1_packed_weight_0"][0],
model.submodule_1.w
)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_1_packed_weight_0"][1],
model.submodule_1.b
)
self.assertTrue("submodule_2_packed_weight_0" in quantized_model.original_weights_lookup)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_2_packed_weight_0"][0],
getattr(model, "submodule|2").w
)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_2_packed_weight_0"][1],
getattr(model, "submodule|2").b
)
self.assertTrue("submodule_3_packed_weight_0" in quantized_model.original_weights_lookup)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_3_packed_weight_0"][0],
getattr(model, "submodule/3").w
)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_3_packed_weight_0"][1],
getattr(model, "submodule/3").b
)
self.assertTrue("submodule_4_packed_weight_0" in quantized_model.original_weights_lookup)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_4_packed_weight_0"][0],
getattr(model, "submodule:4").w
)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_4_packed_weight_0"][1],
getattr(model, "submodule:4").b
)
self.assertTrue("submodule_5_packed_weight_0" in quantized_model.original_weights_lookup)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_5_packed_weight_0"][0],
getattr(model, "submodule: 5").w
)
torch.testing.assert_close(
quantized_model.original_weights_lookup["submodule_5_packed_weight_0"][1],
getattr(model, "submodule: 5").b
)
self.assertTrue("_packed_weight_0" in quantized_model.original_weights_lookup)
torch.testing.assert_close(
quantized_model.original_weights_lookup["_packed_weight_0"][0],
model._w
)
torch.testing.assert_close(
quantized_model.original_weights_lookup["_packed_weight_0"][1],
None
)
@skipIfNoFBGEMM
| TestQuantizeFx |
python | lepture__authlib | authlib/oauth1/rfc5849/models.py | {
"start": 2904,
"end": 3418
} | class ____(dict, TemporaryCredentialMixin):
def get_client_id(self):
return self.get("client_id")
def get_user_id(self):
return self.get("user_id")
def get_redirect_uri(self):
return self.get("oauth_callback")
def check_verifier(self, verifier):
return self.get("oauth_verifier") == verifier
def get_oauth_token(self):
return self.get("oauth_token")
def get_oauth_token_secret(self):
return self.get("oauth_token_secret")
| TemporaryCredential |
python | explosion__spaCy | spacy/ty.py | {
"start": 918,
"end": 1297
} | class ____(Protocol):
model: Any
listeners: Sequence[Model]
listener_map: Dict[str, Sequence[Model]]
listening_components: List[str]
def add_listener(self, listener: Model, component_name: str) -> None: ...
def remove_listener(self, listener: Model, component_name: str) -> bool: ...
def find_listeners(self, component) -> None: ...
| ListenedToComponent |
python | dagster-io__dagster | scripts/gen_airbyte_classes.py | {
"start": 5410,
"end": 21160
} | class ____(SchemaType):
def __init__(self, inner: Sequence[SchemaType]):
self.inner = inner
def __str__(self):
return f"Union[{', '.join([str(x) for x in self.inner])}]"
def annotation(
self, scope: Optional[str] = None, quote: bool = False, hide_default: bool = False
):
return f"Union[{', '.join([x.annotation(scope, quote, hide_default) for x in self.inner])}]"
def get_check(self, name: str, scope: Optional[str] = None):
scoped_names = [x.annotation(scope) for x in self.inner]
return "check.inst_param({}, '{}', {})".format(
name, name, "({})".format(", ".join(scoped_names))
)
def _union_or_singular(inner: list[SchemaType]) -> SchemaType:
if len(inner) == 1:
return inner[0]
return UnionType(inner)
def get_class_definitions(name: str, schema: dict) -> dict[str, dict[str, SchemaType]]:
"""Parses an Airbyte source or destination schema, turning it into a representation of the
corresponding Python class structure - a dictionary mapping class names with the fields
that the new classes should have.
Each class will be turned into a Python class definition with the given name and fields.
"""
class_definitions: dict[str, dict[str, SchemaType]] = {}
fields = {}
required_fields = set(schema.get("required", []))
for raw_field_name, field in schema["properties"].items():
if raw_field_name == "option_title":
continue
field_name = _remove_invalid_chars(raw_field_name)
if "oneOf" in field:
# Union type, parse all subfields
union_type: list[SchemaType] = []
for sub_field in field["oneOf"]:
title = sub_field.get("properties", {}).get("option_title", {}).get("const")
if not title:
title = sub_field.get("title")
title = _to_class_name(title)
class_definitions = {
**class_definitions,
**get_class_definitions(title, sub_field),
}
union_type.append(RawType(title))
fields[field_name] = _union_or_singular(union_type)
else:
field_type = field.get("type", "string")
if field_type == "object":
# Object type requires subclass
title = _to_class_name(field.get("title") or field.get("description"))
class_definitions = {
**class_definitions,
**get_class_definitions(title, field),
}
fields[field_name] = RawType(title)
elif type(field_type) == list:
# List becomes a union type
fields[field_name] = _union_or_singular(
[RawType(sub_type) for sub_type in field_type if sub_type != "null"]
)
if "null" in field_type:
fields[field_name] = OptType(fields[field_name])
else:
if field_type == "array":
array_type = field.get("items", {}).get("type") or field.get("item") or "string"
check.not_none(array_type)
# Arrays with complex, object members requires a subclass
if array_type == "object":
items_data = field.get("items", {})
title = _to_class_name(
items_data.get("title")
or items_data.get("description")
or f"{field.get('title')}Entry"
)
class_definitions = {
**class_definitions,
**get_class_definitions(title, items_data),
}
fields[field_name] = ListType(RawType(title))
else:
fields[field_name] = ListType(RawType(array_type))
else:
fields[field_name] = RawType(field_type, const_value=field.get("const"))
if field_name not in required_fields:
fields[field_name] = OptType(fields[field_name])
fields[field_name].add_description(field.get("description"))
class_definitions[name] = fields
return class_definitions
CLASS_TEMPLATE = """
class {cls_name}:
@public
def __init__(self, {fields_in}):
{self_fields}
"""
SOURCE_TEMPLATE = '''
class {cls_name}(GeneratedAirbyteSource): {nested_defs}
@public
def __init__(self, name: str, {fields_in}):
"""
Airbyte Source for {human_readable_name}
{docs_url}
Args:
name (str): The name of the destination.
{fields_doc}
"""
{self_fields}
super().__init__("{human_readable_name}", name)
'''
DESTINATION_TEMPLATE = '''
class {cls_name}(GeneratedAirbyteDestination): {nested_defs}
@public
def __init__(self, name: str, {fields_in}):
"""
Airbyte Destination for {human_readable_name}
{docs_url}
Args:
name (str): The name of the destination.
{fields_doc}
"""
{self_fields}
super().__init__("{human_readable_name}", name)
'''
def create_nested_class_definition(
base_cls_name: str,
cls_name: str,
cls_def: dict[str, SchemaType],
):
nested_defs = ""
fields_in = ", ".join(
[
f"{field_name}: {field_type.annotation(scope=base_cls_name, quote=True)}"
for field_name, field_type in sorted(
cls_def.items(), key=lambda x: isinstance(x[1], OptType)
)
if field_type.const_value is None
]
)
self_fields = "\n".join(
[
f' self.{field_name} = "{field_type.const_value}"'
for field_name, field_type in cls_def.items()
if field_type.const_value is not None
]
+ [
f" self.{field_name} = {field_type.get_check(field_name, scope=base_cls_name)}"
for field_name, field_type in cls_def.items()
if field_type.const_value is None
]
)
return CLASS_TEMPLATE.format(
cls_name=cls_name,
fields_in=fields_in,
self_fields=self_fields,
nested_defs=nested_defs,
)
def create_connector_class_definition(
connector_name_human_readable: str,
cls_name: str,
cls_def: dict[str, SchemaType],
nested: Optional[list[str]],
is_source: bool,
docs_url: str,
):
nested_defs = ""
if nested:
nested_defs = "\n".join([textwrap.indent(nested_def, " ") for nested_def in nested])
fields_in = ", ".join(
[
f"{field_name}: {field_type} = None"
if isinstance(field_type, OptType)
else f"{field_name}: {field_type.annotation(scope=cls_name, quote=True)}"
for field_name, field_type in sorted(
cls_def.items(), key=lambda x: isinstance(x[1], OptType)
)
if field_type.const_value is None
]
)
fields_doc = "\n".join(
[
textwrap.indent(
field_type.get_doc_desc(field_name, scope=cls_name) or "", " "
)
for field_name, field_type in cls_def.items()
if field_type.description
]
)
self_fields = "\n".join(
[
f' self.{field_name} = "{field_type.const_value}"'
for field_name, field_type in cls_def.items()
if field_type.const_value is not None
]
+ [
f" self.{field_name} = {field_type.get_check(field_name, scope=cls_name)}"
for field_name, field_type in cls_def.items()
if field_type.const_value is None
]
)
return (SOURCE_TEMPLATE if is_source else DESTINATION_TEMPLATE).format(
cls_name=cls_name,
fields_in=fields_in,
fields_doc=fields_doc,
self_fields=self_fields,
nested_defs=nested_defs,
human_readable_name=connector_name_human_readable,
docs_url=f"\n Documentation can be found at {docs_url}\n"
if docs_url and docs_url != "https://docsurl.com"
else "",
)
def load_from_spec_file(
connector_name_human_readable: str,
connector_name: str,
filepath: str,
is_source: bool,
injected_props: dict[str, Any],
):
"""Loads a connector spec file and generates a python class definition for it."""
with open(filepath, encoding="utf8") as f:
if filepath.endswith(".json"):
schema = json.loads(f.read())
else:
schema = yaml.safe_load(f.read())
schema["connectionSpecification"]["properties"] = deep_merge_dicts(
schema["connectionSpecification"]["properties"], injected_props
)
cls_defs = get_class_definitions(connector_name, schema["connectionSpecification"])
defs = []
for cls_name, cls_def in cls_defs.items():
if cls_name != connector_name:
defs.append(create_nested_class_definition(connector_name, cls_name, cls_def))
return create_connector_class_definition(
connector_name_human_readable,
connector_name,
cls_defs[connector_name],
defs,
is_source,
schema["documentationUrl"],
)
SOURCE_OUT_FILE = os.path.abspath(
file_relative_path(
__file__,
"../python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py",
)
)
DEST_OUT_FILE = os.path.abspath(
file_relative_path(
__file__,
"../python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py",
)
)
SSH_TUNNEL_SPEC = "airbyte-integrations/bases/base-java/src/main/resources/ssh-tunnel-spec.json"
AIRBYTE_REPO_URL = "https://github.com/airbytehq/airbyte.git"
@contextmanager
def airbyte_repo_path(airbyte_repo_root: Optional[str], tag: str):
if airbyte_repo_root:
os.chdir(airbyte_repo_root)
subprocess.call(["git", "checkout", f"origin/{tag}"])
yield airbyte_repo_root
else:
build_dir = os.path.abspath(file_relative_path(__file__, ".build"))
mkdir_p(build_dir)
os.chdir(build_dir)
subprocess.call(["git", "clone", "--depth", "1", "--branch", "master", AIRBYTE_REPO_URL])
os.chdir("./airbyte")
subprocess.call(["git", "fetch", "--all", "--tags"])
subprocess.call(["git", "checkout", f"-btags/{tag}", f"tags/{tag}"])
yield os.path.join(str(build_dir), "airbyte")
EXPECTED_FAILURES = [
# "Dv 360",
"E2e Test",
]
@click.command()
@click.option(
"--airbyte-repo-root",
"-a",
default=None,
help="Path to a cloned copy of Airbyte, defaults to cloning a temp copy",
)
@click.option(
"--airbyte-tag",
"-t",
default="v0.40.17",
help="Airbyte tag to use, defaults to v0.40.17",
)
def gen_airbyte_classes(airbyte_repo_root, airbyte_tag):
with airbyte_repo_path(airbyte_repo_root, airbyte_tag) as airbyte_dir:
connectors_root = os.path.join(airbyte_dir, "airbyte-integrations/connectors")
for title, prefix, out_file, imp, is_source in [
("Source", "source-", SOURCE_OUT_FILE, "GeneratedAirbyteSource", True),
("Destination", "destination-", DEST_OUT_FILE, "GeneratedAirbyteDestination", False),
]:
successes = 0
failures = []
click.secho(f"\n\nGenerating Airbyte {title} Classes...\n\n\n", fg="green")
out = f"""# ruff: noqa: F401, A002
from typing import Any, List, Optional, Union
from dagster_airbyte.managed.types import {imp}
import dagster._check as check
from dagster._annotations import public
"""
for connector_package in os.listdir(connectors_root):
connector_name_parts = [x.capitalize() for x in connector_package.split("-")]
connector_name_human_readable = " ".join(connector_name_parts[1:])
connector_name = "".join(connector_name_parts[1:] + connector_name_parts[:1])
if connector_package.startswith(prefix):
injected_props = {}
# The Postgres source has this additional property injected into its spec file
# https://github.com/airbytehq/airbyte/pull/5742/files#diff-b92c2b888c32ef84ae905c683e3a6a893e81b5fb840427245da34443b18f3c64
if connector_name == "PostgresSource" and is_source:
with open(os.path.join(airbyte_dir, SSH_TUNNEL_SPEC), encoding="utf8") as f:
injected_props["tunnel_method"] = json.loads(f.read())
files: list[tuple[str, str]] = list(
itertools.chain.from_iterable(
[
[(root, file) for file in files]
for root, _, files in os.walk(
os.path.join(connectors_root, connector_package)
)
]
)
)
for root, file in files:
if file == "spec.json" or file == "spec.yml" or file == "spec.yaml":
# First, attempt to load the spec file and generate
# the class definition
new_out = out
try:
new_out += load_from_spec_file(
connector_name_human_readable,
connector_name,
os.path.join(root, file),
is_source,
injected_props=injected_props,
)
except Exception as e:
failures.append((connector_name_human_readable, e))
continue
with open(out_file, "w", encoding="utf8") as f:
f.write(new_out)
# Next, attempt to load the spec file and
# abort if it fails, recording the failure
try:
spec = importlib.util.spec_from_file_location(
"module.name", out_file
)
foo = importlib.util.module_from_spec(spec) # pyright: ignore[reportArgumentType]
sys.modules["module.name"] = foo
spec.loader.exec_module(foo) # pyright: ignore[reportOptionalMemberAccess]
out = new_out
successes += 1
break
except Exception as e:
failures.append((connector_name_human_readable, e))
continue
print("\033[1A\033[K\033[1A\033[K\033[1A\033[K") # noqa: T201
click.secho(f"{successes} successes", fg="green")
click.secho(f"{len(failures)} failures", fg="red")
for failure in failures:
click.secho(f"{failure[0]}: {failure[1]}", fg="red")
if failure[0] not in EXPECTED_FAILURES:
raise failure[1]
subprocess.call(["ruff", "format", out_file])
if __name__ == "__main__":
gen_airbyte_classes()
| UnionType |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py | {
"start": 4930,
"end": 8517
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest.
In this test, the operator is constructed with hints that invoke the use of
a Cholesky decomposition for solves/determinant.
"""
def setUp(self):
# Increase from 1e-6 to 1e-5. This reduction in tolerance happens,
# presumably, because we are taking a different code path in the operator
# and the matrix. The operator uses a Cholesky, the matrix uses standard
# solve.
self._atol[dtypes.float32] = 1e-5
self._rtol[dtypes.float32] = 1e-5
self._atol[dtypes.float64] = 1e-10
self._rtol[dtypes.float64] = 1e-10
@staticmethod
def dtypes_to_test():
return [dtypes.float32, dtypes.float64]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
# Matrix is always symmetric and positive definite in this class.
del ensure_self_adjoint_and_pd
shape = list(build_info.shape)
matrix = linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
lin_op_matrix = matrix
if use_placeholder:
lin_op_matrix = array_ops.placeholder_with_default(matrix, shape=None)
operator = linalg.LinearOperatorFullMatrix(
lin_op_matrix,
is_square=True,
is_self_adjoint=True,
is_positive_definite=True)
return operator, matrix
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
matrix = [[1., 0.], [0., 7.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_self_adjoint)
# Should be auto-set
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator._can_use_cholesky)
self.assertTrue(operator.is_square)
@test_util.disable_xla("Assert statements in kernels not supported in XLA")
def test_assert_non_singular(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_non_singular().run()
def test_assert_self_adjoint(self):
matrix = [[0., 1.], [0., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
with self.assertRaisesOpError("not equal to its adjoint"):
operator.assert_self_adjoint().run()
@test_util.disable_xla("Assert statements in kernels not supported in XLA")
def test_assert_positive_definite(self):
matrix = [[1., 1.], [1., 1.]]
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
with self.cached_session():
# Cholesky decomposition may fail, so the error is not specific to
# non-singular.
with self.assertRaisesOpError(""):
operator.assert_positive_definite().run()
def test_tape_safe(self):
matrix = variables_module.Variable([[2.]])
operator = linalg.LinearOperatorFullMatrix(
matrix, is_self_adjoint=True, is_positive_definite=True)
self.check_tape_safe(operator)
@test_util.run_all_in_graph_and_eager_modes
| SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest |
python | bottlepy__bottle | bottle.py | {
"start": 7809,
"end": 7914
} | class ____(BottleException):
""" This is a base class for all routing related exceptions """
| RouteError |
python | Textualize__textual | tests/tree/test_tree_clearing.py | {
"start": 206,
"end": 245
} | class ____(VerseBody):
pass
| VerseStar |
python | astral-sh__uv | scripts/benchmark/src/benchmark/__init__.py | {
"start": 47,
"end": 286
} | class ____(typing.NamedTuple):
name: str
"""The name of the command to benchmark."""
prepare: str | None
"""The command to run before each benchmark run."""
command: list[str]
"""The command to benchmark."""
| Command |
python | getsentry__sentry | src/sentry/overwatch_webhooks/types.py | {
"start": 426,
"end": 1136
} | class ____:
name: str
slug: str
id: int
region: str
github_integration_id: int
organization_integration_id: int
@classmethod
def from_organization_mapping_and_integration(
cls, organization_mapping: OrganizationMapping, org_integration: OrganizationIntegration
) -> OrganizationSummary:
return cls(
name=organization_mapping.name,
slug=organization_mapping.slug,
id=organization_mapping.organization_id,
region=organization_mapping.region_name,
github_integration_id=org_integration.integration_id,
organization_integration_id=org_integration.id,
)
@dataclass
| OrganizationSummary |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_validation.py | {
"start": 10079,
"end": 14744
} | class ____:
async def make_deployment_schema(
self,
flow_id: UUID,
schema_cls: Union[Type[DeploymentCreate], Type[DeploymentUpdate]],
):
if schema_cls == DeploymentCreate:
params = {
"flow_id": flow_id,
"name": "test-deployment-2",
}
else:
params = {}
deployment = schema_cls(**params)
return deployment
@pytest.mark.parametrize("schema_cls", [DeploymentCreate, DeploymentUpdate])
async def test_deployment_template_validation_succeeds_with_missing_default_block_document(
self,
session,
work_pool,
flow,
deployment_with_work_queue,
missing_block_doc_ref_template,
schema_cls,
):
"""
When we validate a deployment's job variables, we only validate the job variables
and not default values in the base template.
"""
work_pool.base_job_template = missing_block_doc_ref_template
deployment = await self.make_deployment_schema(flow.id, schema_cls)
await validate_job_variables_for_deployment(
session,
work_pool,
deployment,
)
@pytest.mark.parametrize("schema_cls", [DeploymentCreate, DeploymentUpdate])
async def test_deployment_template_validation_succeeds_with_block_document_reference_incorrect_type(
self,
session,
work_pool,
flow,
deployment_with_work_queue,
incorrect_type_block_ref_template,
schema_cls,
):
"""
When we validate a deployment's job variables, we only validate the job variables
and not default values in the base template.
"""
work_pool.base_job_template = incorrect_type_block_ref_template
deployment = await self.make_deployment_schema(flow.id, schema_cls)
await validate_job_variables_for_deployment(
session,
work_pool,
deployment,
)
@pytest.mark.parametrize("schema_cls", [DeploymentCreate, DeploymentUpdate])
async def test_deployment_template_validation_allows_missing_required_fields(
self,
session,
work_pool,
flow,
deployment_with_work_queue,
template_required_field_no_default,
schema_cls,
):
work_pool.base_job_template = template_required_field_no_default
deployment = await self.make_deployment_schema(flow.id, schema_cls)
await validate_job_variables_for_deployment(
session,
work_pool,
deployment,
)
@pytest.mark.parametrize("schema_cls", [DeploymentCreate, DeploymentUpdate])
async def test_deployment_template_validation_invalid_type(
self,
session,
work_pool,
flow,
deployment_with_work_queue,
template_optional_field,
schema_cls,
):
work_pool.base_job_template = template_optional_field
deployment = await self.make_deployment_schema(flow.id, schema_cls)
deployment.job_variables = {"age": "hi"}
with pytest.raises(HTTPException, match="'hi' is not valid"):
await validate_job_variables_for_deployment(
session,
work_pool,
deployment,
)
@pytest.mark.parametrize("schema_cls", [DeploymentCreate, DeploymentUpdate])
async def test_deployment_template_validation_valid(
self,
session,
work_pool,
flow,
deployment_with_work_queue,
template_optional_field,
schema_cls,
):
work_pool.base_job_template = template_optional_field
deployment = await self.make_deployment_schema(flow.id, schema_cls)
deployment.job_variables = {"age": 41}
await validate_job_variables_for_deployment(
session,
work_pool,
deployment,
)
@pytest.mark.parametrize("schema_cls", [DeploymentCreate, DeploymentUpdate])
async def test_deployment_template_validation_ignores_variable_not_in_schema(
self,
session,
work_pool,
flow,
deployment_with_work_queue,
template_optional_field,
schema_cls,
):
work_pool.base_job_template = template_optional_field
deployment = await self.make_deployment_schema(flow.id, schema_cls)
deployment.job_variables = {"favorite_type_of_bike": "touring"}
await validate_job_variables_for_deployment(
session,
work_pool,
deployment,
)
| TestDeploymentValidation |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 11532,
"end": 14912
} | class ____(GroupFusion):
def _addmm_node_can_be_fused(self, node: torch.fx.Node):
input_shape = node.args[1].meta["val"].shape # type: ignore[union-attr]
weight_shape = node.args[2].meta["val"].shape # type: ignore[union-attr]
return (
node.kwargs.get("beta", DEFAULT_BETA) == DEFAULT_BETA
and node.kwargs.get("alpha", DEFAULT_ALPHA) == DEFAULT_ALPHA
and len(input_shape) == 2
and len(weight_shape) == 2
and all(x % 2 == 0 for x in input_shape + weight_shape)
and all(
shape <= self.graph_search_options["max_fuse_tensor_size_group_linear"]
for shape in input_shape + weight_shape
)
)
def _mm_node_can_be_fused(self, node: torch.fx.Node):
input_shape = node.args[0].meta["val"].shape # type: ignore[union-attr]
weight_shape = node.args[1].meta["val"].shape # type: ignore[union-attr]
return (
len(input_shape) == 2
and len(weight_shape) == 2
and all(x % 2 == 0 for x in input_shape + weight_shape)
and all(
shape <= self.graph_search_options["max_fuse_tensor_size_group_linear"]
for shape in input_shape + weight_shape
)
)
def match(self, node: torch.fx.Node) -> tuple[str, bool] | None:
if CallFunctionVarArgs(aten.mm.default).match(
node
) and self._mm_node_can_be_fused(node):
group_key = ("group_linear", True)
elif CallFunctionVarArgs(aten.addmm.default).match(
node
) and self._addmm_node_can_be_fused(node):
bias = node.args[0]
group_key = ("group_linear", bias is None)
else:
group_key = None
return group_key
def fuse(self, graph: torch.fx.GraphModule, subset: list[torch.fx.Node]):
group_inputs = []
group_weights = []
group_biases = []
group_nodes = []
for node in subset:
if CallFunctionVarArgs(aten.addmm.default).match(node):
bias, input, weight = node.args
else:
assert CallFunctionVarArgs(aten.mm.default).match(node)
input, weight = node.args
bias = None
group_nodes.append(node)
group_inputs.append(input)
group_weights.append(weight)
group_biases.append(bias)
if all(bias is None for bias in group_biases):
group_biases = None # type: ignore[assignment]
with graph.inserting_before(subset[0]): # type: ignore[operator]
fused_mm = graph.call_function( # type: ignore[operator]
torch.ops.fbgemm.gmm.default,
args=(group_inputs, group_weights, group_biases),
kwargs={"smart_fused": True},
)
for i, original_mm in enumerate(group_nodes):
with graph.inserting_after(fused_mm): # type: ignore[operator]
new_mm = graph.call_function(operator.getitem, args=(fused_mm, i)) # type: ignore[operator]
original_mm.replace_all_uses_with(new_mm)
new_mm.meta.update(original_mm.meta)
graph.erase_node(original_mm) # type: ignore[operator]
counters["inductor"]["group_linear"] += 1
| GroupLinearFusion |
python | redis__redis-py | redis/event.py | {
"start": 12099,
"end": 14140
} | class ____(EventListenerInterface):
def __init__(self):
self._connection = None
self._connection_pool = None
self._client_type = None
self._connection_lock = None
self._event = None
def listen(self, event: AfterPubSubConnectionInstantiationEvent):
if isinstance(
event.pubsub_connection.credential_provider, StreamingCredentialProvider
) and event.pubsub_connection.get_protocol() in [3, "3"]:
self._event = event
self._connection = event.pubsub_connection
self._connection_pool = event.connection_pool
self._client_type = event.client_type
self._connection_lock = event.connection_lock
if self._client_type == ClientType.SYNC:
self._connection.credential_provider.on_next(self._re_auth)
self._connection.credential_provider.on_error(self._raise_on_error)
else:
self._connection.credential_provider.on_next(self._re_auth_async)
self._connection.credential_provider.on_error(
self._raise_on_error_async
)
def _re_auth(self, token: TokenInterface):
with self._connection_lock:
self._connection.send_command(
"AUTH", token.try_get("oid"), token.get_value()
)
self._connection.read_response()
self._connection_pool.re_auth_callback(token)
async def _re_auth_async(self, token: TokenInterface):
async with self._connection_lock:
await self._connection.send_command(
"AUTH", token.try_get("oid"), token.get_value()
)
await self._connection.read_response()
await self._connection_pool.re_auth_callback(token)
def _raise_on_error(self, error: Exception):
raise EventException(error, self._event)
async def _raise_on_error_async(self, error: Exception):
raise EventException(error, self._event)
| RegisterReAuthForPubSub |
python | doocs__leetcode | solution/1200-1299/1237.Find Positive Integer Solution for a Given Equation/Solution.py | {
"start": 110,
"end": 349
} | class ____:
# Returns f(x, y) for any given positive integers x and y.
# Note that f(x, y) is increasing with respect to both x and y.
# i.e. f(x, y) < f(x + 1, y), f(x, y) < f(x, y + 1)
def f(self, x, y):
"""
| CustomFunction |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/components/tableau_component.py | {
"start": 4722,
"end": 19619
} | class ____(StateBackedComponent, Resolvable):
"""Pulls in the contents of a Tableau workspace into Dagster assets.
Example:
.. code-block:: yaml
# defs.yaml
type: dagster_tableau.TableauComponent
attributes:
workspace:
type: cloud
connected_app_client_id: "{{ env.TABLEAU_CLIENT_ID }}"
connected_app_secret_id: "{{ env.TABLEAU_SECRET_ID }}"
connected_app_secret_value: "{{ env.TABLEAU_SECRET_VALUE }}"
username: "{{ env.TABLEAU_USERNAME }}"
site_name: my_site
pod_name: 10ax
"""
workspace: Annotated[
BaseTableauWorkspace,
Resolver(
_resolve_tableau_workspace,
model_field_name="workspace",
model_field_type=Union[
TableauCloudWorkspaceArgs.model(), TableauServerWorkspaceArgs.model()
],
description="Configuration for connecting to the Tableau workspace. Use 'type: cloud' for Tableau Cloud or 'type: server' for Tableau Server.",
examples=[
{
"type": "cloud",
"connected_app_client_id": "{{ env.TABLEAU_CLIENT_ID }}",
"connected_app_secret_id": "{{ env.TABLEAU_SECRET_ID }}",
"connected_app_secret_value": "{{ env.TABLEAU_SECRET_VALUE }}",
"username": "{{ env.TABLEAU_USERNAME }}",
"site_name": "my_site",
"pod_name": "10ax",
},
{
"type": "server",
"connected_app_client_id": "{{ env.TABLEAU_CLIENT_ID }}",
"connected_app_secret_id": "{{ env.TABLEAU_SECRET_ID }}",
"connected_app_secret_value": "{{ env.TABLEAU_SECRET_VALUE }}",
"username": "{{ env.TABLEAU_USERNAME }}",
"site_name": "my_site",
"server_name": "tableau.example.com",
},
],
),
]
# Takes a list of workbook names or ids to enable refresh for, or True to enable for all embedded datasources
enable_embedded_datasource_refresh: Union[bool, list[str]] = False
# Takes a list of published datasource names or id's to enable refresh for, or True to enable for all published datasources
enable_published_datasource_refresh: Union[bool, list[str]] = False
translation: Optional[ResolvedMultilayerTranslationFn] = None
defs_state: ResolvedDefsStateConfig = field(
default_factory=DefsStateConfigArgs.legacy_code_server_snapshots
)
@property
def defs_state_config(self) -> DefsStateConfig:
default_key = f"{self.__class__.__name__}[{self.workspace.site_name}]"
return DefsStateConfig.from_args(self.defs_state, default_key=default_key)
@cached_property
def translator(self) -> DagsterTableauTranslator:
translator_cls = create_tableau_component_translator(TableauComponent)
return translator_cls(self)
@cached_property
def _base_translator(self) -> DagsterTableauTranslator:
return DagsterTableauTranslator()
@public
def get_asset_spec(self, data: TableauTranslatorData) -> dg.AssetSpec:
"""Generates an AssetSpec for a given Tableau content item.
This method can be overridden in a subclass to customize how Tableau content
(workbooks, dashboards, sheets, data sources) are converted to Dagster asset specs.
By default, it delegates to the configured DagsterTableauTranslator.
Args:
data: The TableauTranslatorData containing information about the Tableau content
item and workspace
Returns:
An AssetSpec that represents the Tableau content as a Dagster asset
Example:
Override this method to add custom metadata based on content properties:
.. code-block:: python
from dagster_tableau import TableauComponent
from dagster import AssetSpec
class CustomTableauComponent(TableauComponent):
def get_asset_spec(self, data):
base_spec = super().get_asset_spec(data)
return base_spec.replace_attributes(
metadata={
**base_spec.metadata,
"tableau_type": data.content_data.content_type,
"project": data.content_data.properties.get("project", {}).get("name")
}
)
"""
return self._base_translator.get_asset_spec(data)
def _load_asset_specs(self, state: TableauWorkspaceData) -> list[dg.AssetSpec]:
# Process all content types
asset_specs = []
for sheet_data in state.sheets_by_id.values():
translator_data = TableauTranslatorData(content_data=sheet_data, workspace_data=state)
asset_specs.append(self.translator.get_asset_spec(translator_data))
for dashboard_data in state.dashboards_by_id.values():
translator_data = TableauTranslatorData(
content_data=dashboard_data, workspace_data=state
)
asset_specs.append(self.translator.get_asset_spec(translator_data))
for data_source_data in state.data_sources_by_id.values():
translator_data = TableauTranslatorData(
content_data=data_source_data, workspace_data=state
)
asset_specs.append(self.translator.get_asset_spec(translator_data))
return asset_specs
async def write_state_to_path(self, state_path: Path) -> None:
"""Fetches Tableau workspace data and writes it to the state path."""
# Fetch the workspace data
workspace_data = self.workspace.fetch_tableau_workspace_data()
# Serialize and write to path
state_path.write_text(dg.serialize_value(workspace_data))
def execute_embedded_data_sources(
self, context: dg.AssetExecutionContext, workspace: BaseTableauWorkspace, workbook_id: str
):
"""Executes a refresh for embedded data sources within a Tableau workbook.
This method can be overridden in a subclass to customize the refresh execution behavior,
such as adding custom logging or handling refresh results differently.
Args:
context: The asset execution context provided by Dagster
workspace: The BaseTableauWorkspace used to trigger and monitor refreshes
workbook_id: The ID of the Tableau workbook containing the embedded data sources
Yields:
AssetMaterialization events for each embedded data source in the workbook
Example:
Override this method to add custom logging during refresh execution:
.. code-block:: python
from dagster_tableau import TableauComponent
import dagster as dg
class CustomTableauComponent(TableauComponent):
def execute_embedded_data_sources(self, context, workspace, workbook_id):
context.log.info(f"Starting refresh for workbook {workbook_id}")
result = yield from super().execute_embedded_data_sources(context, workspace, workbook_id)
context.log.info("Workbook refresh completed successfully")
return result
"""
with workspace.get_client() as client:
client.refresh_and_poll_workbook(workbook_id)
for asset_key in context.selected_asset_keys:
yield dg.AssetMaterialization(
asset_key=asset_key,
)
def execute_published_data_sources(
self,
context: dg.AssetExecutionContext,
workspace: BaseTableauWorkspace,
workspace_data: TableauWorkspaceData,
):
"""Executes a refresh for published data sources in the Tableau workspace.
This method can be overridden in a subclass to customize the refresh execution behavior,
such as adding custom logging or handling refresh results differently.
Args:
context: The asset execution context provided by Dagster. Only the assets that are in this context should be refreshed.
workspace: The BaseTableauWorkspace used to trigger and monitor refreshes
workspace_data: The TableauWorkspaceData containing information about all workspace content
Yields:
AssetMaterialization events for each published data source that was refreshed
Example:
Override this method to add custom logging during refresh execution:
.. code-block:: python
from dagster_tableau import TableauComponent
import dagster as dg
class CustomTableauComponent(TableauComponent):
def execute_published_data_sources(self, context, workspace, workspace_data):
context.log.info(f"Starting refresh for {len(context.selected_asset_keys)} published data sources")
result = yield from super().execute_published_data_sources(context, workspace, workspace_data)
context.log.info("Published data source refreshes completed successfully")
return result
"""
specs_by_data_source_id = {
k: self.get_asset_spec(
TableauTranslatorData(content_data=v, workspace_data=workspace_data)
)
for k, v in workspace_data.data_sources_by_id.items()
}
specs_by_data_source_id = { # Filter to only selected asset keys
k: v for k, v in specs_by_data_source_id.items() if v.key in context.selected_asset_keys
}
with workspace.get_client() as client:
for datasource_id in client.refresh_and_poll_data_sources(
list(specs_by_data_source_id.keys())
):
yield dg.AssetMaterialization(asset_key=specs_by_data_source_id[datasource_id].key)
def build_refreshable_embedded_data_sources_asset_definition(
self, workbook_id: str, specs: list[dg.AssetSpec]
) -> dg.AssetsDefinition:
@dg.multi_asset(specs=specs, can_subset=False, name=clean_name_lower(workbook_id))
def asset_fn(context: dg.AssetExecutionContext):
yield from self.execute_embedded_data_sources(context, self.workspace, workbook_id)
return asset_fn
def build_refreshable_published_data_sources_asset_definition(
self, specs: list[dg.AssetSpec], workspace_data: TableauWorkspaceData
) -> dg.AssetsDefinition:
@dg.multi_asset(
specs=specs,
can_subset=True,
name=clean_name_lower(
f"tableau_published_data_sources_{clean_name_lower(self.workspace.site_name)}"
),
)
def asset_fn(context: dg.AssetExecutionContext):
yield from self.execute_published_data_sources(context, self.workspace, workspace_data)
return asset_fn
def is_refreshable_published_data_source(
self, spec: dg.AssetSpec, workspace_data: TableauWorkspaceData
) -> bool:
if not self.enable_published_datasource_refresh:
return False
if ("published datasource" not in spec.kinds) or ("extract" not in spec.kinds):
return False
metadataset = TableauDataSourceMetadataSet.extract(spec.metadata)
data_source_id = check.not_none(metadataset.id)
data_source_name = workspace_data.data_sources_by_id[data_source_id].properties["name"]
if (
isinstance(self.enable_published_datasource_refresh, list)
and (data_source_id not in self.enable_published_datasource_refresh)
and (data_source_name not in self.enable_published_datasource_refresh)
):
return False
return True
def is_refreshable_embedded_data_source(
self, spec: dg.AssetSpec, workspace_data: TableauWorkspaceData
) -> bool:
if not self.enable_embedded_datasource_refresh:
return False
if ("embedded datasource" not in spec.kinds) or ("extract" not in spec.kinds):
return False
metadataset = TableauDataSourceMetadataSet.extract(spec.metadata)
workbook_id = check.not_none(metadataset.workbook_id)
workbook_name = workspace_data.workbooks_by_id[workbook_id].properties["name"]
if (
isinstance(self.enable_embedded_datasource_refresh, list)
and (workbook_id not in self.enable_embedded_datasource_refresh)
and (workbook_name not in self.enable_embedded_datasource_refresh)
):
return False
return True
def build_defs_from_state(
self, context: ComponentLoadContext, state_path: Optional[Path]
) -> dg.Definitions:
"""Builds Dagster definitions from the cached Tableau workspace state."""
if state_path is None:
return dg.Definitions()
# Deserialize workspace data
workspace_data = dg.deserialize_value(state_path.read_text(), TableauWorkspaceData)
specs = self._load_asset_specs(workspace_data)
non_refreshable_specs = [
spec
for spec in specs
if (not self.is_refreshable_embedded_data_source(spec, workspace_data))
and (not self.is_refreshable_published_data_source(spec, workspace_data))
]
refreshable_embedded_data_source_specs = [
spec for spec in specs if self.is_refreshable_embedded_data_source(spec, workspace_data)
]
refreshable_published_data_source_specs = [
spec
for spec in specs
if self.is_refreshable_published_data_source(spec, workspace_data)
]
refreshable_specs_by_workbook_id = defaultdict(list)
for spec in refreshable_embedded_data_source_specs:
workbook_id = TableauDataSourceMetadataSet.extract(spec.metadata).workbook_id
refreshable_specs_by_workbook_id[workbook_id].append(spec)
assets_defs = []
for workbook_id, specs in refreshable_specs_by_workbook_id.items():
assets_defs.append(
self.build_refreshable_embedded_data_sources_asset_definition(workbook_id, specs)
)
assets_defs.append(
self.build_refreshable_published_data_sources_asset_definition(
refreshable_published_data_source_specs, workspace_data
)
)
return dg.Definitions(assets=non_refreshable_specs + assets_defs)
| TableauComponent |
python | numpy__numpy | numpy/_core/tests/test_dtype.py | {
"start": 64374,
"end": 65721
} | class ____:
def test_simple(self):
class dt:
dtype = np.dtype("f8")
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
def test_recursive(self):
# This used to recurse. It now doesn't, we enforce the
# dtype attribute to be a dtype (and will not recurse).
class dt:
pass
dt.dtype = dt
with pytest.raises(ValueError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(ValueError):
np.dtype(dt_instance)
def test_void_subtype(self):
class dt(np.void):
# This code path is fully untested before, so it is unclear
# what this should be useful for. Note that if np.void is used
# numpy will think we are deallocating a base type [1.17, 2019-02].
dtype = np.dtype("f,f")
np.dtype(dt)
np.dtype(dt(1))
def test_void_subtype_recursive(self):
# Used to recurse, but dtype is now enforced to be a dtype instance
# so that we do not recurse.
class vdt(np.void):
pass
vdt.dtype = vdt
with pytest.raises(ValueError):
np.dtype(vdt)
with pytest.raises(ValueError):
np.dtype(vdt(1))
| TestFromDTypeAttribute |
python | PyCQA__pylint | tests/functional/s/super/super_with_arguments.py | {
"start": 22,
"end": 126
} | class ____(Foo):
def __init__(self):
super(Bar, self).__init__() # [super-with-arguments]
| Bar |
python | giampaolo__psutil | tests/test_contracts.py | {
"start": 12214,
"end": 12532
} | class ____(PsutilTestCase):
@pytest.mark.skipif(not POSIX, reason="not POSIX")
def test_negative_signal(self):
p = psutil.Process(self.spawn_subproc().pid)
p.terminate()
code = p.wait()
assert code == -signal.SIGTERM
assert isinstance(code, enum.IntEnum)
| TestProcessWaitType |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 4317,
"end": 4525
} | class ____(_VectorIndexConfigUpdate):
vectorCacheMaxObjects: Optional[int]
@staticmethod
def vector_index_type() -> VectorIndexType:
return VectorIndexType.FLAT
| _VectorIndexConfigFlatUpdate |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/int.py | {
"start": 37008,
"end": 42070
} | class ____(BaseInt[np.dtypes.Int64DType, np.int64], HasEndianness):
"""
A Zarr data type for arrays containing 64-bit signed integers.
Wraps the [`np.dtypes.Int64DType`][numpy.dtypes.Int64DType] data type. Scalars for this data type are instances of
[`np.int64`][numpy.int64].
Attributes
----------
dtype_cls : np.dtypes.Int64DType
The class of the underlying NumPy dtype.
References
----------
This class implements the 64-bit signed integer data type defined in Zarr V2 and V3.
See the [Zarr V2](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v2/v2.0.rst#data-type-encoding) and [Zarr V3](https://github.com/zarr-developers/zarr-specs/blob/main/docs/v3/data-types/index.rst) specification documents for details.
"""
dtype_cls = np.dtypes.Int64DType
_zarr_v3_name: ClassVar[Literal["int64"]] = "int64"
_zarr_v2_names: ClassVar[tuple[Literal[">i8"], Literal["<i8"]]] = (">i8", "<i8")
@classmethod
def from_native_dtype(cls, dtype: TBaseDType) -> Self:
"""
Create an Int64 from a np.dtype('int64') instance.
Parameters
----------
dtype : TBaseDType
The NumPy data type.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input data type is not a valid representation of this class 64-bit signed
integer.
"""
if cls._check_native_dtype(dtype):
return cls(endianness=get_endianness_from_numpy_dtype(dtype))
raise DataTypeValidationError(
f"Invalid data type: {dtype}. Expected an instance of {cls.dtype_cls}"
)
def to_native_dtype(self) -> np.dtypes.Int64DType:
"""
Create a NumPy signed 64-bit integer dtype instance from this Int64 ZDType.
Returns
-------
np.dtypes.Int64DType
The NumPy signed 64-bit integer dtype.
"""
byte_order = endianness_to_numpy_str(self.endianness)
return self.dtype_cls().newbyteorder(byte_order)
@classmethod
def _from_json_v2(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this data type from Zarr V2-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class 64-bit signed
integer.
"""
if cls._check_json_v2(data):
# Going via NumPy ensures that we get the endianness correct without
# annoying string parsing.
name = data["name"]
return cls.from_native_dtype(np.dtype(name))
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected one of the strings {cls._zarr_v2_names}."
raise DataTypeValidationError(msg)
@classmethod
def _from_json_v3(cls, data: DTypeJSON) -> Self:
"""
Create an instance of this data type from Zarr V3-flavored JSON.
Parameters
----------
data : DTypeJSON
The JSON data.
Returns
-------
Self
An instance of this data type.
Raises
------
DataTypeValidationError
If the input JSON is not a valid representation of this class 64-bit signed
integer.
"""
if cls._check_json_v3(data):
return cls()
msg = f"Invalid JSON representation of {cls.__name__}. Got {data!r}, expected the string {cls._zarr_v3_name!r}"
raise DataTypeValidationError(msg)
@overload
def to_json(self, zarr_format: Literal[2]) -> DTypeConfig_V2[Literal[">i8", "<i8"], None]: ...
@overload
def to_json(self, zarr_format: Literal[3]) -> Literal["int64"]: ...
def to_json(
self, zarr_format: ZarrFormat
) -> DTypeConfig_V2[Literal[">i8", "<i8"], None] | Literal["int64"]:
"""
Convert the data type to a JSON-serializable form.
Parameters
----------
zarr_format : ZarrFormat
The Zarr format version.
Returns
-------
DTypeConfig_V2[Literal[">i8", "<i8"], None] | Literal["int64"]
The JSON-serializable representation of the data type.
"""
if zarr_format == 2:
name = self.to_native_dtype().str
return {"name": name, "object_codec_id": None}
elif zarr_format == 3:
return self._zarr_v3_name
raise ValueError(f"zarr_format must be 2 or 3, got {zarr_format}")
@property
def item_size(self) -> int:
"""
The size of a single scalar in bytes.
Returns
-------
int
The size of a single scalar in bytes.
"""
return 8
@dataclass(frozen=True, kw_only=True)
| Int64 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.