language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/number-of-divisible-triplet-sums.py | {
"start": 1089,
"end": 1505
} | class ____(object):
def divisibleTripletCount(self, nums, d):
"""
:type nums: List[int]
:type d: int
:rtype: int
"""
result = 0
for i in xrange(len(nums)):
cnt = collections.Counter()
for j in xrange(i+1, len(nums)):
result += cnt[nums[j]%d]
cnt[-(nums[i]+nums[j])%d] += 1
return result
| Solution3 |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-awsdocdb/llama_index/vector_stores/awsdocdb/base.py | {
"start": 1269,
"end": 1657
} | class ____(Enum):
Euclidean = "euclidean"
DotProduct = "dotProduct"
Cosine = "cosine"
def _to_mongodb_filter(standard_filters: MetadataFilters) -> Dict:
"""Convert from standard dataclass to filter dict."""
filters = {}
for filter in standard_filters.legacy_filters():
filters[filter.key] = filter.value
return filters
| AWSDocDbVectorStoreSimilarityType |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py | {
"start": 21606,
"end": 24631
} | class ____(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Video Training job."""
template_fields = (
"parent_model",
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
prediction_type: str = "classification",
model_type: str = "CLOUD",
training_filter_split: str | None = None,
test_filter_split: str | None = None,
region: str,
impersonation_chain: str | Sequence[str] | None = None,
parent_model: str | None = None,
**kwargs,
) -> None:
super().__init__(
region=region, impersonation_chain=impersonation_chain, parent_model=parent_model, **kwargs
)
self.dataset_id = dataset_id
self.prediction_type = prediction_type
self.model_type = model_type
self.training_filter_split = training_filter_split
self.test_filter_split = test_filter_split
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.parent_model = self.parent_model.split("@")[0] if self.parent_model else None
model, training_id = self.hook.create_auto_ml_video_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.VideoDataset(dataset_name=self.dataset_id),
prediction_type=self.prediction_type,
model_type=self.model_type,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
test_filter_split=self.test_filter_split,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
sync=self.sync,
parent_model=self.parent_model,
is_default_version=self.is_default_version,
model_version_aliases=self.model_version_aliases,
model_version_description=self.model_version_description,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
context["ti"].xcom_push(key="model_id", value=model_id)
VertexAIModelLink.persist(context=context, model_id=model_id)
else:
result = model # type: ignore
context["ti"].xcom_push(key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, training_id=training_id)
return result
| CreateAutoMLVideoTrainingJobOperator |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 8033,
"end": 8119
} | class ____(DatetimeCmpOp):
key = operator.ne
@infer_global(operator.lt)
| DatetimeCmpNe |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_core/test_api_server.py | {
"start": 28855,
"end": 31933
} | class ____:
"""Tests api-server network policy."""
def test_off_by_default(self):
docs = render_chart(
show_only=["templates/api-server/api-server-networkpolicy.yaml"],
)
assert len(docs) == 0
def test_defaults(self):
docs = render_chart(
values={
"networkPolicies": {"enabled": True},
"apiServer": {
"networkPolicy": {
"ingress": {
"from": [{"namespaceSelector": {"matchLabels": {"release": "myrelease"}}}]
}
},
},
},
show_only=["templates/api-server/api-server-networkpolicy.yaml"],
)
assert len(docs) == 1
assert docs[0]["kind"] == "NetworkPolicy"
assert jmespath.search("spec.ingress[0].from", docs[0]) == [
{"namespaceSelector": {"matchLabels": {"release": "myrelease"}}}
]
assert jmespath.search("spec.ingress[0].ports", docs[0]) == [{"port": 8080}]
@pytest.mark.parametrize(
("ports", "expected_ports"),
[
([{"port": "sidecar"}], [{"port": "sidecar"}]),
(
[
{"port": "{{ .Values.ports.apiServer }}"},
{"port": 80},
],
[
{"port": 8080},
{"port": 80},
],
),
],
)
def test_ports_overrides(self, ports, expected_ports):
docs = render_chart(
values={
"networkPolicies": {"enabled": True},
"apiServer": {
"networkPolicy": {
"ingress": {
"from": [{"namespaceSelector": {"matchLabels": {"release": "myrelease"}}}],
"ports": ports,
}
},
},
},
show_only=["templates/api-server/api-server-networkpolicy.yaml"],
)
assert expected_ports == jmespath.search("spec.ingress[0].ports", docs[0])
def test_should_add_component_specific_labels(self):
docs = render_chart(
values={
"networkPolicies": {"enabled": True},
"apiServer": {
"labels": {"test_label": "test_label_value"},
},
},
show_only=["templates/api-server/api-server-networkpolicy.yaml"],
)
assert "test_label" in jmespath.search("metadata.labels", docs[0])
assert jmespath.search("metadata.labels", docs[0])["test_label"] == "test_label_value"
def test_can_be_disabled(self):
"""
API server networkpolicy can be disabled by configuration.
"""
docs = render_chart(
values={"apiServer": {"enabled": False}},
show_only=["templates/api-server/api-server-networkpolicy.yaml"],
)
assert len(docs) == 0
| TestAPIServerNetworkPolicy |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 276457,
"end": 278011
} | class ____(torch._dynamo.test_case.TestCaseWithNestedGraphBreaks):
@requires_cuda_and_triton
@parametrize("backend", ("aot_eager", "inductor"))
@ops(
list(filter(lambda op: op.name not in xfail_hops_compile, hop_db)),
allowed_dtypes=(torch.float,),
)
def test_hops_compile(self, device, dtype, op, backend):
# Ensure HOPs can be compiled
if backend == "aot_eager" and op.name == "invoke_quant":
raise unittest.SkipTest(
"TODO: partitioner fails. migrate canonicalization to aot eager backend"
)
sample_inputs_itr = op.sample_inputs(
device, dtype, requires_grad=op.supports_autograd
)
for inp in sample_inputs_itr:
input = inp.input if isinstance(inp.input, tuple) else (inp.input,)
eager_args = (*input, *inp.args)
eager_kwargs = inp.kwargs
compiled_args = deepcopy(eager_args)
compiled_kwargs = deepcopy(eager_kwargs)
def fn(args, kwargs):
return op.op(*args, **(kwargs))
compiled_fn = torch.compile(fn, backend=backend, fullgraph=True)
eager_out = fn(eager_args, eager_kwargs)
compiled_out = compiled_fn(compiled_args, compiled_kwargs)
self.assertEqual(eager_out, compiled_out)
instantiate_device_type_tests(TestHigherOrderOpsOpInfo, globals(), only_for=("cuda",))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| TestHigherOrderOpsOpInfo |
python | doocs__leetcode | solution/0100-0199/0188.Best Time to Buy and Sell Stock IV/Solution3.py | {
"start": 0,
"end": 384
} | class ____:
def maxProfit(self, k: int, prices: List[int]) -> int:
f = [[0] * 2 for _ in range(k + 1)]
for j in range(1, k + 1):
f[j][1] = -prices[0]
for x in prices[1:]:
for j in range(k, 0, -1):
f[j][0] = max(f[j][1] + x, f[j][0])
f[j][1] = max(f[j - 1][0] - x, f[j][1])
return f[k][0]
| Solution |
python | numba__numba | numba/tests/test_sort.py | {
"start": 35097,
"end": 35892
} | class ____(TestCase):
def setUp(self):
np.random.seed(321)
def check_argsort_stable(self, sorter, low, high, count):
# make data with high possibility of duplicated key
data = np.random.randint(low, high, count)
expect = np.argsort(data, kind='mergesort')
got = sorter(data)
np.testing.assert_equal(expect, got)
def test_argsort_stable(self):
arglist = [
(-2, 2, 5),
(-5, 5, 10),
(0, 10, 101),
(0, 100, 1003),
]
imp = make_jit_mergesort(is_argsort=True)
toplevel = imp.run_mergesort
sorter = njit(lambda arr: toplevel(arr))
for args in arglist:
self.check_argsort_stable(sorter, *args)
nop_compiler = lambda x:x
| TestMergeSort |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 50007,
"end": 50178
} | class ____(skipIf):
def __init__(self, dep, reason):
super().__init__(dep, reason, device_type="cuda")
# Skips a test on XPU if the condition is true.
| skipCUDAIf |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 138993,
"end": 143039
} | class ____(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
References
----------
.. [1] Asgharzadeh et al (2011). "Comparisons of Methods of Estimation for the
Half-Logistic Distribution". Selcuk J. Appl. Math. 93-108.
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _sf(self, x):
return 2 * sc.expit(-x)
def _isf(self, q):
return xpx.apply_where(q < 0.5, q,
lambda q: -sc.logit(0.5 * q),
lambda q: 2*np.arctanh(1 - q))
def _munp(self, n):
if n == 0:
return 1 # otherwise returns NaN
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
def find_scale(data, loc):
# scale is solution to a fix point problem ([1] 2.6)
# use approximate MLE as starting point ([1] 3.1)
n_observations = data.shape[0]
sorted_data = np.sort(data, axis=0)
p = np.arange(1, n_observations + 1)/(n_observations + 1)
q = 1 - p
pp1 = 1 + p
alpha = p - 0.5 * q * pp1 * np.log(pp1 / q)
beta = 0.5 * q * pp1
sorted_data = sorted_data - loc
B = 2 * np.sum(alpha[1:] * sorted_data[1:])
C = 2 * np.sum(beta[1:] * sorted_data[1:]**2)
# starting guess
scale = ((B + np.sqrt(B**2 + 8 * n_observations * C))
/(4 * n_observations))
# relative tolerance of fix point iterator
rtol = 1e-8
relative_residual = 1
shifted_mean = sorted_data.mean() # y_mean - y_min
# find fix point by repeated application of eq. (2.6)
# simplify as
# exp(-x) / (1 + exp(-x)) = 1 / (1 + exp(x))
# = expit(-x))
while relative_residual > rtol:
sum_term = sorted_data * sc.expit(-sorted_data/scale)
scale_new = shifted_mean - 2/n_observations * sum_term.sum()
relative_residual = abs((scale - scale_new)/scale)
scale = scale_new
return scale
# location is independent from the scale
data_min = np.min(data)
if floc is not None:
if data_min < floc:
# There are values that are less than the specified loc.
raise FitDataError("halflogistic", lower=floc, upper=np.inf)
loc = floc
else:
# if not provided, location MLE is the minimal data point
loc = data_min
# scale depends on location
scale = fscale if fscale is not None else find_scale(data, loc)
return loc, scale
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
| halflogistic_gen |
python | sympy__sympy | sympy/assumptions/predicates/matrices.py | {
"start": 8296,
"end": 8816
} | class ____(Predicate):
"""
Integer elements matrix predicate.
Explanation
===========
``Q.integer_elements(x)`` is true iff all the elements of ``x``
are integers.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.integer(X[1, 2]), Q.integer_elements(X))
True
"""
name = "integer_elements"
handler = Dispatcher("IntegerElementsHandler", doc="Handler for key 'integer_elements'.")
| IntegerElementsPredicate |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/table.py | {
"start": 8121,
"end": 11097
} | class ____(IHaveNew):
deps_by_column: PublicAttr[Mapping[str, Sequence[TableColumnDep]]]
"""Represents the lineage of column outputs to column inputs for a tabular asset.
Args:
deps_by_column (Mapping[str, Sequence[TableColumnDep]]): A mapping from column names to
the columns that the column depends on.
Examples:
Defining column lineage at materialization time, where the resulting asset has two columns,
``new_column_foo`` and ``new_column_qux``. The first column, ``new_column_foo``, depends on
``column_bar`` in ``source_bar`` and ``column_baz`` in ``source_baz``. The second column,
``new_column_qux``, depends on ``column_quuz`` in ``source_bar``.
.. code-block:: python
from dagster import (
AssetKey,
MaterializeResult,
TableColumnDep,
TableColumnLineage,
asset,
)
@asset(deps=[AssetKey("source_bar"), AssetKey("source_baz")])
def my_asset():
yield MaterializeResult(
metadata={
"dagster/column_lineage": TableColumnLineage(
deps_by_column={
"new_column_foo": [
TableColumnDep(
asset_key=AssetKey("source_bar"),
column_name="column_bar",
),
TableColumnDep(
asset_key=AssetKey("source_baz"),
column_name="column_baz",
),
],
"new_column_qux": [
TableColumnDep(
asset_key=AssetKey("source_bar"),
column_name="column_quuz",
),
],
}
)
}
)
"""
def __new__(cls, deps_by_column: Mapping[str, Sequence[TableColumnDep]]):
deps_by_column = check.mapping_param(
deps_by_column, "deps_by_column", key_type=str, value_type=list
)
sorted_deps_by_column = {}
for column, deps in deps_by_column.items():
sorted_deps_by_column[column] = sorted(
deps, key=lambda dep: (dep.asset_key, dep.column_name)
)
check.invariant(
len(deps) == len(set((dep.asset_key, dep.column_name) for dep in deps)),
f"The deps for column `{column}` must be unique by asset key and column name.",
)
return super().__new__(cls, deps_by_column=sorted_deps_by_column)
| TableColumnLineage |
python | django__django | tests/gis_tests/geoapp/test_sitemaps.py | {
"start": 411,
"end": 2724
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = {n.nodeName for n in elem.childNodes}
expected = set(expected)
self.assertEqual(actual, expected)
def test_geositemap_kml(self):
"Tests KML/KMZ geographic sitemaps."
for kml_type in ("kml", "kmz"):
doc = minidom.parseString(
self.client.get("/sitemaps/%s.xml" % kml_type).content
)
# Ensuring the right sitemaps namespace is present.
urlset = doc.firstChild
self.assertEqual(
urlset.getAttribute("xmlns"),
"http://www.sitemaps.org/schemas/sitemap/0.9",
)
urls = urlset.getElementsByTagName("url")
self.assertEqual(2, len(urls)) # Should only be 2 sitemaps.
for url in urls:
self.assertChildNodes(url, ["loc"])
# Getting the relative URL since we don't have a real site.
kml_url = (
url.getElementsByTagName("loc")[0]
.childNodes[0]
.data.split("http://example.com")[1]
)
if kml_type == "kml":
kml_doc = minidom.parseString(self.client.get(kml_url).content)
elif kml_type == "kmz":
# Have to decompress KMZ before parsing.
buf = BytesIO(self.client.get(kml_url).content)
with zipfile.ZipFile(buf) as zf:
self.assertEqual(1, len(zf.filelist))
self.assertEqual("doc.kml", zf.filelist[0].filename)
kml_doc = minidom.parseString(zf.read("doc.kml"))
# Ensuring the correct number of placemarks are in the KML doc.
if "city" in kml_url:
model = City
elif "country" in kml_url:
model = Country
self.assertEqual(
model.objects.count(),
len(kml_doc.getElementsByTagName("Placemark")),
)
| GeoSitemapTest |
python | tensorflow__tensorflow | third_party/xla/xla/tools/buffer_debug_log/checksum_mismatch_report_test.py | {
"start": 911,
"end": 5539
} | class ____(absltest.TestCase):
def test_from_protos_loads_metadata(self):
test_log = ""
test_metadata = """
thunk_metadata {
thunk_info {
thunk_id: 100
profile_annotation: "thunk1"
}
thunk_kind: "kGemm"
}
thunk_metadata {
thunk_info {
thunk_id: 101
profile_annotation: "thunk2"
}
thunk_kind: "kConv"
}
"""
log_proto = text_format.Parse(
test_log, buffer_debug_log_pb2.BufferDebugLogProto()
)
metadata_proto = text_format.Parse(
test_metadata,
thunk_pb2.ThunkMetadataListProto(),
)
report = checksum_mismatch_report.ChecksumMismatchReport.from_protos(
{0: log_proto}, metadata_proto
)
self.assertEqual(
report.thunk_metadata,
{
100: checksum_mismatch_report.ThunkMetadata(
thunk_id=100,
thunk_kind="kGemm",
profile_annotation="thunk1",
),
101: checksum_mismatch_report.ThunkMetadata(
thunk_id=101,
thunk_kind="kConv",
profile_annotation="thunk2",
),
},
)
def test_from_protos_finds_mismatches_in_single_proto(self):
test_log = """
entries {
thunk_id: 100
execution_id: 10
buffer_idx: 0
is_input_buffer: true
checksum: 11111111
}
entries {
thunk_id: 100
execution_id: 10
buffer_idx: 1
is_input_buffer: false
checksum: 22222222
}
entries {
thunk_id: 100
execution_id: 11
buffer_idx: 0
is_input_buffer: true
checksum: 11111111
}
entries {
thunk_id: 100
execution_id: 11
buffer_idx: 1
is_input_buffer: false
checksum: 33333333
}
"""
test_metadata = ""
log_proto = text_format.Parse(
test_log, buffer_debug_log_pb2.BufferDebugLogProto()
)
metadata_proto = text_format.Parse(
test_metadata,
thunk_pb2.ThunkMetadataListProto(),
)
report = checksum_mismatch_report.ChecksumMismatchReport.from_protos(
{0: log_proto}, metadata_proto
)
self.assertEqual(
report.mismatches,
{
# thunk ID
100: {
# input checksums
checksum_mismatch_report.BufferChecksums({0: 11111111}): {
# output buffer index => checksums
1: {22222222, 33333333},
},
},
},
)
def test_from_protos_finds_mismatches_in_multiple_protos(self):
test_log_template = """
entries {{
thunk_id: 100
execution_id: 10
buffer_idx: 0
is_input_buffer: true
checksum: 11111111
}}
entries {{
thunk_id: 100
execution_id: 10
buffer_idx: 1
is_input_buffer: false
checksum: {output_checksum}
}}
"""
test_logs = [
test_log_template.format(output_checksum=checksum)
for checksum in [22222222, 33333333]
]
test_metadata = ""
log_protos = {
module_id: text_format.Parse(
test_log, buffer_debug_log_pb2.BufferDebugLogProto()
)
for module_id, test_log in enumerate(test_logs)
}
metadata_proto = text_format.Parse(
test_metadata,
thunk_pb2.ThunkMetadataListProto(),
)
report = checksum_mismatch_report.ChecksumMismatchReport.from_protos(
log_protos, metadata_proto
)
self.assertEqual(
report.mismatches,
{
# thunk ID
100: {
# input checksums
checksum_mismatch_report.BufferChecksums({0: 11111111}): {
# output buffer index => checksums
1: {22222222, 33333333},
},
},
},
)
def test_from_protos_does_not_include_consistent_executions(self):
test_log = """
entries {
thunk_id: 100
execution_id: 10
buffer_idx: 0
is_input_buffer: true
checksum: 11111111
}
entries {
thunk_id: 100
execution_id: 10
buffer_idx: 1
is_input_buffer: false
checksum: 22222222
}
entries {
thunk_id: 100
execution_id: 11
buffer_idx: 0
is_input_buffer: true
checksum: 11111111
}
entries {
thunk_id: 100
execution_id: 11
buffer_idx: 1
is_input_buffer: false
checksum: 22222222
}
"""
test_metadata = ""
log_proto = text_format.Parse(
test_log, buffer_debug_log_pb2.BufferDebugLogProto()
)
metadata_proto = text_format.Parse(
test_metadata,
thunk_pb2.ThunkMetadataListProto(),
)
report = checksum_mismatch_report.ChecksumMismatchReport.from_protos(
{0: log_proto}, metadata_proto
)
self.assertEmpty(report.mismatches)
if __name__ == "__main__":
absltest.main()
| ChecksumMismatchReportTest |
python | django__django | django/contrib/postgres/indexes.py | {
"start": 7496,
"end": 8102
} | class ____(PostgresIndex):
suffix = "spgist"
def __init__(self, *expressions, fillfactor=None, **kwargs):
self.fillfactor = fillfactor
super().__init__(*expressions, **kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fillfactor is not None:
kwargs["fillfactor"] = self.fillfactor
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.fillfactor is not None:
with_params.append("fillfactor = %d" % self.fillfactor)
return with_params
| SpGistIndex |
python | tensorflow__tensorflow | tensorflow/python/util/pywrap_xla_ops_test.py | {
"start": 837,
"end": 1622
} | class ____(googletest.TestCase):
def testGetGpuCompilableKernelNames(self):
"""Tests retrieving compilable op names for GPU."""
op_names = pywrap_xla_ops.get_gpu_kernel_names()
self.assertGreater(op_names.__len__(), 0)
self.assertEqual(op_names.count('Max'), 1)
self.assertEqual(op_names.count('Min'), 1)
self.assertEqual(op_names.count('MatMul'), 1)
def testGetCpuCompilableKernelNames(self):
"""Tests retrieving compilable op names for CPU."""
op_names = pywrap_xla_ops.get_cpu_kernel_names()
self.assertGreater(op_names.__len__(), 0)
self.assertEqual(op_names.count('Max'), 1)
self.assertEqual(op_names.count('Min'), 1)
self.assertEqual(op_names.count('MatMul'), 1)
if __name__ == '__main__':
googletest.main()
| XlaOpsetUtilsTest |
python | huggingface__transformers | tests/models/smolvlm/test_modeling_smolvlm.py | {
"start": 1516,
"end": 5231
} | class ____:
def __init__(
self,
parent,
is_training=True,
batch_size=2,
scale_factor=2,
num_images=2,
vision_config={
"image_size": 16,
"patch_size": 4,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
text_config={
"vocab_size": 100,
"hidden_size": 64,
"intermediate_size": 56,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 256,
"initializer_range": 0.02,
"rms_norm_eps": 1e-6,
"pad_token_id": 2,
"bos_token_id": 0,
"eos_token_id": 1,
"image_token_id": 57,
"tie_word_embeddings": False,
"rope_theta": 10000.0,
"sliding_window": 32,
"attention_dropout": 0.0,
},
use_cache=False,
tie_word_embeddings=False,
image_token_id=57,
):
self.parent = parent
self.is_training = is_training
self.batch_size = batch_size
self.num_images = num_images
self.scale_factor = scale_factor
self.seq_length = (
int(((vision_config["image_size"] // vision_config["patch_size"]) ** 2) / (self.scale_factor**2))
* self.num_images
)
self.use_cache = use_cache
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
# Hack - add properties here so use common tests
self.vocab_size = text_config["vocab_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_config = vision_config
self.text_config = text_config
def get_config(self):
return SmolVLMConfig(
use_cache=self.use_cache,
image_token_id=self.image_token_id,
tie_word_embeddings=self.tie_word_embeddings,
vision_config=self.vision_config,
text_config=self.text_config,
vocab_size=self.vocab_size,
scale_factor=self.scale_factor,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.num_images,
3, # SmolVLMImageProcessor always generates RGB pixel values
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.seq_length
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| SmolVLMVisionText2TextModelTester |
python | pytorch__pytorch | torch/distributed/_tools/sac_estimator.py | {
"start": 1973,
"end": 2807
} | class ____:
"""
Stores metadata for a single operator for SAC.
Attributes:
func (Any): The operator function.
time_taken (float): The time taken by the operator.
memory_used (float): The memory used by the operator.
curr_idx (int): The current operator index.
output_ids (Tuple[int, ...]): The storage IDs of the operator's outputs.
inplace_info (Tuple[int, ...]): Tuple of self and parent operator for in-place operator.
is_view_like (bool): Whether the operator is view-like.
is_rand_op (bool): Whether the operator is a random operator.
"""
func: Any
time_taken: float
memory_used: float
curr_idx: int
output_ids: tuple[int, ...]
inplace_info: tuple[int, ...]
is_view_like: bool
is_rand_op: bool
@dataclass
| _SACMetadata |
python | python__mypy | mypy/suggestions.py | {
"start": 4756,
"end": 6434
} | class ____(TraverserVisitor):
"""Visitor for finding all the types of arguments that each arg is passed to.
This is extremely simple minded but might be effective anyways.
"""
def __init__(self, func: FuncDef, typemap: dict[Expression, Type]) -> None:
self.typemap = typemap
self.arg_types: dict[SymbolNode, list[Type]] = {arg.variable: [] for arg in func.arguments}
def visit_call_expr(self, o: CallExpr) -> None:
if not any(isinstance(e, RefExpr) and e.node in self.arg_types for e in o.args):
return
typ = get_proper_type(self.typemap.get(o.callee))
if not isinstance(typ, CallableType):
return
formal_to_actual = map_actuals_to_formals(
o.arg_kinds,
o.arg_names,
typ.arg_kinds,
typ.arg_names,
lambda n: AnyType(TypeOfAny.special_form),
)
for i, args in enumerate(formal_to_actual):
for arg_idx in args:
arg = o.args[arg_idx]
if isinstance(arg, RefExpr) and arg.node in self.arg_types:
self.arg_types[arg.node].append(typ.arg_types[i])
def get_arg_uses(typemap: dict[Expression, Type], func: FuncDef) -> list[list[Type]]:
"""Find all the types of arguments that each arg is passed to.
For example, given
def foo(x: int) -> None: ...
def bar(x: str) -> None: ...
def test(x, y):
foo(x)
bar(y)
this will return [[int], [str]].
"""
finder = ArgUseFinder(func, typemap)
func.body.accept(finder)
return [finder.arg_types[arg.variable] for arg in func.arguments]
| ArgUseFinder |
python | getsentry__sentry | tests/apidocs/endpoints/scim/test_member_index.py | {
"start": 184,
"end": 2411
} | class ____(APIDocsTestCase, SCIMTestCase):
def setUp(self) -> None:
super().setUp()
self.member = self.create_member(user=self.create_user(), organization=self.organization)
self.url = reverse(
"sentry-api-0-organization-scim-member-index",
kwargs={"organization_id_or_slug": self.organization.slug},
)
def test_get(self) -> None:
response = self.client.get(self.url)
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_post(self) -> None:
post_data = {
"schemas": ["urn:ietf:params:scim:schemas:core:2.0:User"],
"userName": "test.user@okta.local",
"name": {"givenName": "Test", "familyName": "User"},
"emails": [{"primary": True, "value": "test.user@okta.local", "type": "work"}],
"displayName": "Test User",
"locale": "en-US",
"externalId": "00ujl29u0le5T6Aj10h7",
"groups": [],
"password": "1mz050nq",
"active": True,
}
response = self.client.post(self.url, post_data)
request = RequestFactory().post(self.url, post_data)
self.validate_schema(request, response)
def test_post_member_exists_but_not_accepted(self) -> None:
self.create_member(
user=self.create_user(email="test.user@okta.local"),
organization=self.organization,
role="member",
invite_status=1,
)
post_data = {
"schemas": ["urn:ietf:params:scim:schemas:core:2.0:User"],
"userName": "test.user@okta.local",
"name": {"givenName": "Test", "familyName": "User"},
"emails": [{"primary": True, "value": "test.user@okta.local", "type": "work"}],
"displayName": "Test User",
"locale": "en-US",
"externalId": "00ujl29u0le5T6Aj10h7",
"groups": [],
"password": "1mz050nq",
"active": True,
}
response = self.client.post(self.url, post_data)
request = RequestFactory().post(self.url, post_data)
self.validate_schema(request, response)
| SCIMMemberIndexDocs |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_import_functionality.py | {
"start": 7979,
"end": 11597
} | class ____(AdminTestMixin, TestCase):
@override_settings(TEMPLATE_STRING_IF_INVALID="INVALID_VARIABLE")
def test_import(self):
# GET the import form
response = self._get_url_response(
self.book_import_url, str_in_response='form action=""'
)
self.assertTemplateUsed(response, self.admin_import_template_url)
response = self._do_import_post(self.book_import_url, "books.csv")
self.assertIn("result", response.context)
self.assertFalse(response.context["result"].has_errors())
self.assertIn("confirm_form", response.context)
confirm_form = response.context["confirm_form"]
data = confirm_form.initial
self._prepend_form_prefix(data)
self.assertEqual(data["original_file_name"], "books.csv")
response = self._post_url_response(
self.book_process_import_url, data, follow=True
)
self.assertContains(
response,
_(
"Import finished: {} new, {} updated, {} deleted and {} skipped {}."
).format(1, 0, 0, 0, Book._meta.verbose_name_plural),
)
def test_import_mac(self):
# GET the import form
response = self._get_url_response(
self.book_import_url, str_in_response='form action=""'
)
self.assertTemplateUsed(response, self.admin_import_template_url)
response = self._do_import_post(self.book_import_url, "books-mac.csv")
self.assertIn("result", response.context)
self.assertFalse(response.context["result"].has_errors())
self.assertIn("confirm_form", response.context)
confirm_form = response.context["confirm_form"]
data = confirm_form.initial
self._prepend_form_prefix(data)
self.assertEqual(data["original_file_name"], "books-mac.csv")
response = self._post_url_response(
self.book_process_import_url, data, follow=True
)
self.assertContains(
response,
_(
"Import finished: {} new, {} updated, {} deleted and {} skipped {}."
).format(1, 0, 0, 0, Book._meta.verbose_name_plural),
)
@override_settings(TEMPLATE_STRING_IF_INVALID="INVALID_VARIABLE")
def test_import_second_resource(self):
Book.objects.create(id=1)
# GET the import form
response = self._get_url_response(
self.book_import_url, str_in_response="Export/Import only book names"
)
self.assertTemplateUsed(response, self.admin_import_template_url)
self.assertContains(response, 'form action=""')
response = self._do_import_post(self.book_import_url, "books.csv", resource=1)
self.assertIn("result", response.context)
self.assertFalse(response.context["result"].has_errors())
self.assertIn("confirm_form", response.context)
confirm_form = response.context["confirm_form"]
data = confirm_form.initial
self._prepend_form_prefix(data)
self.assertEqual(data["original_file_name"], "books.csv")
response = self._post_url_response(
self.book_process_import_url, data, follow=True
)
self.assertContains(
response,
_(
"Import finished: {} new, {} updated, {} deleted and {} skipped {}."
).format(0, 1, 0, 0, Book._meta.verbose_name_plural),
)
# Check, that we really use second resource - author_email didn't get imported
self.assertEqual(Book.objects.get(id=1).author_email, "")
| ImportFileHandlingTests |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 30736,
"end": 35891
} | class ____:
def __init__(self, model, input_specs: List[spack.spec.Spec]):
self.model = model
self.input_specs = input_specs
self.full_model = None
def multiple_values_error(self, attribute, pkg):
return f'Cannot select a single "{attribute}" for package "{pkg}"'
def no_value_error(self, attribute, pkg):
return f'Cannot select a single "{attribute}" for package "{pkg}"'
def _get_cause_tree(
self,
cause: Tuple[str, str],
conditions: Dict[str, str],
condition_causes: List[Tuple[Tuple[str, str], Tuple[str, str]]],
seen: Set,
indent: str = " ",
) -> List[str]:
"""
Implementation of recursion for self.get_cause_tree. Much of this operates on tuples
(condition_id, set_id) in which the latter idea means that the condition represented by
the former held in the condition set represented by the latter.
"""
seen.add(cause)
parents = [c for e, c in condition_causes if e == cause and c not in seen]
local = f"required because {conditions[cause[0]]} "
return [indent + local] + [
c
for parent in parents
for c in self._get_cause_tree(
parent, conditions, condition_causes, seen, indent=indent + " "
)
]
def get_cause_tree(self, cause: Tuple[str, str]) -> List[str]:
"""
Get the cause tree associated with the given cause.
Arguments:
cause: The root cause of the tree (final condition)
Returns:
A list of strings describing the causes, formatted to display tree structure.
"""
conditions: Dict[str, str] = dict(extract_args(self.full_model, "condition_reason"))
condition_causes: List[Tuple[Tuple[str, str], Tuple[str, str]]] = list(
((Effect, EID), (Cause, CID))
for Effect, EID, Cause, CID in extract_args(self.full_model, "condition_cause")
)
return self._get_cause_tree(cause, conditions, condition_causes, set())
def handle_error(self, msg, *args):
"""Handle an error state derived by the solver."""
if msg == "multiple_values_error":
return self.multiple_values_error(*args)
if msg == "no_value_error":
return self.no_value_error(*args)
try:
idx = args.index("startcauses")
except ValueError:
msg_args = args
causes = []
else:
msg_args = args[:idx]
cause_args = args[idx + 1 :]
cause_args_conditions = cause_args[::2]
cause_args_ids = cause_args[1::2]
causes = list(zip(cause_args_conditions, cause_args_ids))
msg = msg.format(*msg_args)
# For variant formatting, we sometimes have to construct specs
# to format values properly. Find/replace all occurances of
# Spec(...) with the string representation of the spec mentioned
specs_to_construct = re.findall(r"Spec\(([^)]*)\)", msg)
for spec_str in specs_to_construct:
msg = msg.replace(f"Spec({spec_str})", str(spack.spec.Spec(spec_str)))
for cause in set(causes):
for c in self.get_cause_tree(cause):
msg += f"\n{c}"
return msg
def message(self, errors) -> str:
input_specs = ", ".join(elide_list([f"`{s}`" for s in self.input_specs], 5))
header = f"failed to concretize {input_specs} for the following reasons:"
messages = (
f" {idx+1:2}. {self.handle_error(msg, *args)}"
for idx, (_, msg, args) in enumerate(errors)
)
return "\n".join((header, *messages))
def raise_if_errors(self):
initial_error_args = extract_args(self.model, "error")
if not initial_error_args:
return
error_causation = clingo().Control()
parent_dir = pathlib.Path(__file__).parent
errors_lp = parent_dir / "error_messages.lp"
def on_model(model):
self.full_model = model.symbols(shown=True, terms=True)
with error_causation.backend() as backend:
for atom in self.model:
atom_id = backend.add_atom(atom)
backend.add_rule([atom_id], [], choice=False)
error_causation.load(str(errors_lp))
error_causation.ground([("base", []), ("error_messages", [])])
_ = error_causation.solve(on_model=on_model)
# No choices so there will be only one model
error_args = extract_args(self.full_model, "error")
errors = sorted(
[(int(priority), msg, args) for priority, msg, *args in error_args], reverse=True
)
try:
msg = self.message(errors)
except Exception as e:
msg = (
f"unexpected error during concretization [{str(e)}]. "
f"Please report a bug at https://github.com/spack/spack/issues"
)
raise spack.error.SpackError(msg) from e
raise UnsatisfiableSpecError(msg)
| ErrorHandler |
python | ray-project__ray | python/ray/_private/thirdparty/pathspec/pattern.py | {
"start": 123,
"end": 1225
} | class ____(object):
"""
The :class:`Pattern` class is the abstract definition of a pattern.
"""
# Make the class dict-less.
__slots__ = ('include',)
def __init__(self, include):
"""
Initializes the :class:`Pattern` instance.
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
self.include = include
"""
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
def match(self, files):
"""
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
each file relative to the root directory (e.g., ``"relative/path/to/file"``).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
| Pattern |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-vertexaivectorsearch/llama_index/vector_stores/vertexaivectorsearch/base.py | {
"start": 868,
"end": 9443
} | class ____(BasePydanticVectorStore):
"""
Vertex AI Vector Search vector store.
In this vector store, embeddings are stored in Vertex AI Vector Store and
docs are stored within Cloud Storage bucket.
During query time, the index uses Vertex AI Vector Search to query for the
top k most similar nodes.
Args:
project_id (str) : The Google Cloud Project ID.
region (str) : The default location making the API calls.
It must be the same location as where Vector Search
index created and must be regional.
index_id (str) : The fully qualified resource name of the created
index in Vertex AI Vector Search.
endpoint_id (str): The fully qualified resource name of the created
index endpoint in Vertex AI Vector Search.
gcs_bucket_name (Optional[str]):
The location where the vectors will be stored for
the index to be created in batch mode.
credentials_path (Optional[str]):
The path of the Google credentials on the local file
system.
Examples:
`pip install llama-index-vector-stores-vertexaivectorsearch`
```python
from
vector_store = VertexAIVectorStore(
project_id=PROJECT_ID,
region=REGION,
index_id="<index_resource_name>"
endpoint_id="<index_endpoint_resource_name>"
)
```
"""
stores_text: bool = True
remove_text_from_metadata: bool = True
flat_metadata: bool = False
text_key: str
project_id: str
region: str
index_id: str
endpoint_id: str
gcs_bucket_name: Optional[str] = None
credentials_path: Optional[str] = None
_index: MatchingEngineIndex = PrivateAttr()
_endpoint: MatchingEngineIndexEndpoint = PrivateAttr()
_index_metadata: dict = PrivateAttr()
_stream_update: bool = PrivateAttr()
_staging_bucket: storage.Bucket = PrivateAttr()
# _document_storage: GCSDocumentStorage = PrivateAttr()
def __init__(
self,
project_id: Optional[str] = None,
region: Optional[str] = None,
index_id: Optional[str] = None,
endpoint_id: Optional[str] = None,
gcs_bucket_name: Optional[str] = None,
credentials_path: Optional[str] = None,
text_key: str = DEFAULT_TEXT_KEY,
remove_text_from_metadata: bool = True,
**kwargs: Any,
) -> None:
super().__init__(
project_id=project_id,
region=region,
index_id=index_id,
endpoint_id=endpoint_id,
gcs_bucket_name=gcs_bucket_name,
credentials_path=credentials_path,
text_key=text_key,
remove_text_from_metadata=remove_text_from_metadata,
)
"""Initialize params."""
_sdk_manager = VectorSearchSDKManager(
project_id=project_id, region=region, credentials_path=credentials_path
)
# get index and endpoint resource names including metadata
self._index = _sdk_manager.get_index(index_id=index_id)
self._endpoint = _sdk_manager.get_endpoint(endpoint_id=endpoint_id)
self._index_metadata = self._index.to_dict()
# get index update method from index metadata
self._stream_update = False
if self._index_metadata["indexUpdateMethod"] == "STREAM_UPDATE":
self._stream_update = True
# get bucket object when available
if self.gcs_bucket_name:
self._staging_bucket = _sdk_manager.get_gcs_bucket(
bucket_name=gcs_bucket_name
)
else:
self._staging_bucket = None
@classmethod
def from_params(
cls,
project_id: Optional[str] = None,
region: Optional[str] = None,
index_id: Optional[str] = None,
endpoint_id: Optional[str] = None,
gcs_bucket_name: Optional[str] = None,
credentials_path: Optional[str] = None,
text_key: str = DEFAULT_TEXT_KEY,
**kwargs: Any,
) -> "VertexAIVectorStore":
"""Create VertexAIVectorStore from config."""
return cls(
project_id=project_id,
region=region,
index_name=index_id,
endpoint_id=endpoint_id,
gcs_bucket_name=gcs_bucket_name,
credentials_path=credentials_path,
text_key=text_key,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "VertexAIVectorStore"
@property
def client(self) -> Any:
"""Get client."""
return self._index
@property
def index(self) -> Any:
"""Get client."""
return self._index
@property
def endpoint(self) -> Any:
"""Get client."""
return self._endpoint
@property
def staging_bucket(self) -> Any:
"""Get client."""
return self._staging_bucket
def add(
self,
nodes: List[BaseNode],
is_complete_overwrite: bool = False,
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
ids = []
embeddings = []
metadatas = []
for node in nodes:
node_id = node.node_id
metadata = node_to_metadata_dict(
node, remove_text=False, flat_metadata=False
)
embedding = node.get_embedding()
ids.append(node_id)
embeddings.append(embedding)
metadatas.append(metadata)
data_points = utils.to_data_points(ids, embeddings, metadatas)
# self._document_storage.add_documents(list(zip(ids, nodes)))
if self._stream_update:
utils.stream_update_index(index=self._index, data_points=data_points)
else:
if self._staging_bucket is None:
raise ValueError(
"To update a Vector Search index a staging bucket must be defined."
)
utils.batch_update_index(
index=self._index,
data_points=data_points,
staging_bucket=self._staging_bucket,
is_complete_overwrite=is_complete_overwrite,
)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
# get datapoint ids by filter
filter = {"ref_doc_id": ref_doc_id}
ids = utils.get_datapoints_by_filter(
index=self.index, endpoint=self.endpoint, metadata=filter
)
# remove datapoints
self._index.remove_datapoints(datapoint_ids=ids)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
query_embedding = None
if query.mode == VectorStoreQueryMode.DEFAULT:
query_embedding = [cast(List[float], query.query_embedding)]
if query.filters is not None:
if "filter" in kwargs and kwargs["filter"] is not None:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for Vertex AI Vector Search specific items that are "
"not supported via the generic query interface such as numeric filters."
)
filter, num_filter = utils.to_vectorsearch_filter(query.filters)
else:
filter = None
num_filter = None
matches = utils.find_neighbors(
index=self._index,
endpoint=self._endpoint,
embeddings=query_embedding,
top_k=query.similarity_top_k,
filter=filter,
numeric_filter=num_filter,
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
for match in matches:
node = utils.to_node(match, self.text_key)
top_k_ids.append(match.id)
top_k_scores.append(match.distance)
top_k_nodes.append(node)
return VectorStoreQueryResult(
nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
)
| VertexAIVectorStore |
python | pytorch__pytorch | torch/_dynamo/_trace_wrapped_higher_order_op.py | {
"start": 4102,
"end": 5070
} | class ____(torch.autograd.Function):
generate_vmap_rule = True
@staticmethod
# pyrefly: ignore [bad-override]
def forward(x: Tensor, indices: list[Tensor]) -> Tensor:
return torch.ops.aten.index(x, indices)
@staticmethod
def setup_context(ctx: Any, inputs: tuple[Any, ...], output: Any) -> None:
x, indices = inputs
ctx.save_for_backward(*indices)
ctx.input_shape = x.shape
@staticmethod
def backward(ctx, gradOut): # type: ignore[no-untyped-def]
indices = ctx.saved_tensors
return (
torch.ops.flex_lib.zeros_and_scatter(
ctx.input_shape,
indices,
gradOut,
),
None,
)
@classmethod
@torch._export.wrappers.allow_in_pre_dispatch_graph
def apply(cls, *args, **kwargs): # type: ignore[no-untyped-def]
return super().apply(*args, **kwargs)
mod_index = ModIndex.apply
| ModIndex |
python | coleifer__peewee | playhouse/postgres_ext.py | {
"start": 4393,
"end": 6790
} | class ____(IndexedFieldMixin, Field):
passthrough = True
def __init__(self, field_class=IntegerField, field_kwargs=None,
dimensions=1, convert_values=False, *args, **kwargs):
self.__field = field_class(**(field_kwargs or {}))
self.dimensions = dimensions
self.convert_values = convert_values
self.field_type = self.__field.field_type
super(ArrayField, self).__init__(*args, **kwargs)
def bind(self, model, name, set_attribute=True):
ret = super(ArrayField, self).bind(model, name, set_attribute)
self.__field.bind(model, '__array_%s' % name, False)
return ret
def ddl_datatype(self, ctx):
data_type = self.__field.ddl_datatype(ctx)
return NodeList((data_type, SQL('[]' * self.dimensions)), glue='')
def db_value(self, value):
if value is None or isinstance(value, Node):
return value
elif self.convert_values:
return self._process(self.__field.db_value, value, self.dimensions)
else:
return value if isinstance(value, list) else list(value)
def python_value(self, value):
if self.convert_values and value is not None:
conv = self.__field.python_value
if isinstance(value, list):
return self._process(conv, value, self.dimensions)
else:
return conv(value)
else:
return value
def _process(self, conv, value, dimensions):
dimensions -= 1
if dimensions == 0:
return [conv(v) for v in value]
else:
return [self._process(conv, v, dimensions) for v in value]
def __getitem__(self, value):
return ObjectSlice.create(self, value)
def _e(op):
def inner(self, rhs):
return Expression(self, op, ArrayValue(self, rhs))
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def contains(self, *items):
return Expression(self, ACONTAINS, ArrayValue(self, items))
def contains_any(self, *items):
return Expression(self, ACONTAINS_ANY, ArrayValue(self, items))
def contained_by(self, *items):
return Expression(self, ACONTAINED_BY, ArrayValue(self, items))
| ArrayField |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 117558,
"end": 118221
} | class ____(Qwen2_5OmniProcessorKwargs):
_defaults = {
"text_kwargs": {
"padding": False,
"padding_side": "left",
},
"videos_kwargs": {
"seconds_per_chunk": 2.0,
"position_id_per_seconds": 13.0,
"use_audio_in_video": False,
"size": {
"shortest_edge": 128 * 32 * 32,
"longest_edge": 768 * 32 * 32,
},
},
"audio_kwargs": {
"sampling_rate": 16000,
"padding": True,
"truncation": False,
"return_attention_mask": True,
},
}
| Qwen3OmniMoeProcessorKwargs |
python | mlflow__mlflow | tests/evaluate/test_evaluation.py | {
"start": 2676,
"end": 28613
} | class ____(NamedTuple):
params: dict[str, Any]
metrics: dict[str, Any]
tags: dict[str, Any]
artifacts: list[str]
def get_run_data(run_id):
client = MlflowClient()
data = client.get_run(run_id).data
artifacts = [f.path for f in client.list_artifacts(run_id)]
return RunData(params=data.params, metrics=data.metrics, tags=data.tags, artifacts=artifacts)
def get_run_datasets(run_id):
client = MlflowClient()
return client.get_run(run_id).inputs.dataset_inputs
def get_raw_tag(run_id, tag_name):
client = MlflowClient()
data = client.get_run(run_id).data
return data.tags[tag_name]
def get_local_artifact_path(run_id, artifact_path):
return get_artifact_uri(run_id, artifact_path).replace("file://", "")
@pytest.fixture(scope="module")
def iris_dataset():
X, y = get_iris()
eval_X = X[0::3]
eval_y = y[0::3]
constructor_args = {"data": eval_X, "targets": eval_y, "name": "dataset"}
ds = EvaluationDataset(**constructor_args)
ds._constructor_args = constructor_args
return ds
@pytest.fixture(scope="module")
def diabetes_dataset():
X, y = get_diabetes_dataset()
eval_X = X[0::3]
eval_y = y[0::3]
constructor_args = {"data": eval_X, "targets": eval_y}
ds = EvaluationDataset(**constructor_args)
ds._constructor_args = constructor_args
return ds
@pytest.fixture(scope="module")
def diabetes_spark_dataset():
spark_df = get_diabetes_spark_dataset().sample(fraction=0.3, seed=1)
constructor_args = {"data": spark_df, "targets": "label"}
ds = EvaluationDataset(**constructor_args)
ds._constructor_args = constructor_args
return ds
@pytest.fixture(scope="module")
def breast_cancer_dataset():
X, y = get_breast_cancer_dataset()
eval_X = X[0::3]
eval_y = y[0::3]
constructor_args = {"data": eval_X, "targets": eval_y}
ds = EvaluationDataset(**constructor_args)
ds._constructor_args = constructor_args
return ds
def get_pipeline_model_dataset():
"""
The dataset tweaks the IRIS dataset by changing its first 2 features into categorical features,
and replace some feature values with NA values.
The dataset is prepared for a pipeline model, see `pipeline_model_uri`.
"""
X, y = get_iris()
def convert_num_to_label(x):
return f"v_{round(x)}"
f1 = np.array(list(map(convert_num_to_label, X[:, 0])))
f2 = np.array(list(map(convert_num_to_label, X[:, 1])))
f3 = X[:, 2]
f4 = X[:, 3]
f1[0::8] = None
f2[1::8] = None
f3[2::8] = np.nan
f4[3::8] = np.nan
data = pd.DataFrame(
{
"f1": f1,
"f2": f2,
"f3": f3,
"f4": f4,
"y": y,
}
)
return data, "y"
@pytest.fixture
def pipeline_model_uri():
return get_pipeline_model_uri()
def get_pipeline_model_uri():
"""
Create a pipeline model that transforms and trains on the dataset returned by
`get_pipeline_model_dataset`. The pipeline model imputes the missing values in
input dataset, encodes categorical features, and then trains a logistic regression
model.
"""
data, target_col = get_pipeline_model_dataset()
X = data.drop(target_col, axis=1)
y = data[target_col].to_numpy()
encoder = sklearn.preprocessing.OrdinalEncoder()
str_imputer = sklearn.impute.SimpleImputer(missing_values=None, strategy="most_frequent")
num_imputer = sklearn.impute.SimpleImputer(missing_values=np.nan, strategy="mean")
preproc_pipeline = sklearn.pipeline.Pipeline(
[
("imputer", str_imputer),
("encoder", encoder),
]
)
pipeline = sklearn.pipeline.Pipeline(
[
(
"transformer",
sklearn.compose.make_column_transformer(
(preproc_pipeline, ["f1", "f2"]),
(num_imputer, ["f3", "f4"]),
),
),
("clf", sklearn.linear_model.LogisticRegression()),
]
)
pipeline.fit(X, y)
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(pipeline, name="pipeline_model")
return model_info.model_uri
@pytest.fixture
def linear_regressor_model_uri():
return get_linear_regressor_model_uri()
def get_linear_regressor_model_uri():
X, y = get_diabetes_dataset()
reg = sklearn.linear_model.LinearRegression()
reg.fit(X, y)
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(reg, name="reg_model")
return model_info.model_uri
@pytest.fixture
def spark_linear_regressor_model_uri():
return get_spark_linear_regressor_model_uri()
def get_spark_linear_regressor_model_uri():
spark_df = get_diabetes_spark_dataset()
reg = SparkLinearRegression()
spark_reg_model = reg.fit(spark_df)
with mlflow.start_run():
model_info = mlflow.spark.log_model(spark_reg_model, artifact_path="spark_reg_model")
return model_info.model_uri
@pytest.fixture
def multiclass_logistic_regressor_model_uri():
return multiclass_logistic_regressor_model_uri_by_max_iter(2)
def multiclass_logistic_regressor_model_uri_by_max_iter(max_iter):
X, y = get_iris()
clf = sklearn.linear_model.LogisticRegression(max_iter=max_iter)
clf.fit(X, y)
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(clf, name=f"clf_model_{max_iter}_iters")
return model_info.model_uri
@pytest.fixture
def binary_logistic_regressor_model_uri():
return get_binary_logistic_regressor_model_uri()
def get_binary_logistic_regressor_model_uri():
X, y = get_breast_cancer_dataset()
clf = sklearn.linear_model.LogisticRegression()
clf.fit(X, y)
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(clf, name="bin_clf_model")
return model_info.model_uri
@pytest.fixture
def svm_model_uri():
return get_svm_model_url()
def get_svm_model_url():
X, y = get_breast_cancer_dataset()
clf = sklearn.svm.LinearSVC()
clf.fit(X, y)
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(clf, name="svm_model")
return model_info.model_uri
@pytest.fixture
def iris_pandas_df_dataset():
X, y = get_iris()
eval_X = X[0::3]
eval_y = y[0::3]
data = pd.DataFrame(
{
"f1": eval_X[:, 0],
"f2": eval_X[:, 1],
"f3": eval_X[:, 2],
"f4": eval_X[:, 3],
"y": eval_y,
}
)
constructor_args = {"data": data, "targets": "y"}
ds = EvaluationDataset(**constructor_args)
ds._constructor_args = constructor_args
return ds
@pytest.fixture
def iris_pandas_df_num_cols_dataset():
X, y = get_iris()
eval_X = X[0::3]
eval_y = y[0::3]
data = pd.DataFrame(eval_X)
data["y"] = eval_y
constructor_args = {"data": data, "targets": "y"}
ds = EvaluationDataset(**constructor_args)
ds._constructor_args = constructor_args
return ds
def test_mlflow_evaluate_logs_traces():
eval_data = pd.DataFrame(
{
"inputs": [
"What is MLflow?",
"What is Spark?",
],
"ground_truth": ["What is MLflow?", "Not what is Spark?"],
}
)
@mlflow.trace
def model(inputs):
return inputs
with mlflow.start_run() as run:
evaluate(
model, eval_data, targets="ground_truth", extra_metrics=[mlflow.metrics.exact_match()]
)
assert len(get_traces()) == 1
assert run.info.run_id == get_traces()[0].info.request_metadata[TraceMetadataKey.SOURCE_RUN]
def test_pyfunc_evaluate_logs_traces():
class Model(mlflow.pyfunc.PythonModel):
@mlflow.trace()
def predict(self, context, model_input):
return self.add(model_input, model_input)
@mlflow.trace()
def add(self, x, y):
return x + y
eval_data = pd.DataFrame(
{
"inputs": [1, 2, 4],
"ground_truth": [2, 4, 8],
}
)
with mlflow.start_run() as run:
model_info = mlflow.pyfunc.log_model(name="model", python_model=Model())
evaluate(
model_info.model_uri,
eval_data,
targets="ground_truth",
extra_metrics=[mlflow.metrics.exact_match()],
)
traces = get_traces()
assert len(traces) == 1
assert len(traces[0].data.spans) == 2
assert run.info.run_id == traces[0].info.request_metadata[TraceMetadataKey.SOURCE_RUN]
assert traces[0].info.request_metadata[TraceMetadataKey.MODEL_ID] == model_info.model_id
def test_classifier_evaluate(multiclass_logistic_regressor_model_uri, iris_dataset):
y_true = iris_dataset.labels_data
classifier_model = mlflow.pyfunc.load_model(multiclass_logistic_regressor_model_uri)
y_pred = classifier_model.predict(iris_dataset.features_data)
expected_accuracy_score = accuracy_score(y_true, y_pred)
expected_metrics = {
"accuracy_score": expected_accuracy_score,
}
expected_saved_metrics = {
"accuracy_score": expected_accuracy_score,
}
expected_csv_artifact = confusion_matrix(y_true, y_pred)
cm_figure = sklearn.metrics.ConfusionMatrixDisplay.from_predictions(y_true, y_pred).figure_
img_buf = io.BytesIO()
cm_figure.savefig(img_buf)
img_buf.seek(0)
expected_image_artifact = Image.open(img_buf)
with mlflow.start_run() as run:
eval_result = evaluate(
multiclass_logistic_regressor_model_uri,
iris_dataset._constructor_args["data"],
model_type="classifier",
targets=iris_dataset._constructor_args["targets"],
evaluators="dummy_evaluator",
)
csv_artifact_name = "confusion_matrix"
saved_csv_artifact_path = get_local_artifact_path(run.info.run_id, csv_artifact_name + ".csv")
png_artifact_name = "confusion_matrix_image"
saved_png_artifact_path = get_local_artifact_path(run.info.run_id, png_artifact_name) + ".png"
_, saved_metrics, _, saved_artifacts = get_run_data(run.info.run_id)
assert saved_metrics == expected_saved_metrics
assert set(saved_artifacts) == {csv_artifact_name + ".csv", png_artifact_name + ".png"}
assert eval_result.metrics == expected_metrics
confusion_matrix_artifact = eval_result.artifacts[csv_artifact_name]
np.testing.assert_array_equal(confusion_matrix_artifact.content, expected_csv_artifact)
assert confusion_matrix_artifact.uri == get_artifact_uri(
run.info.run_id, csv_artifact_name + ".csv"
)
np.testing.assert_array_equal(
confusion_matrix_artifact._load(saved_csv_artifact_path), expected_csv_artifact
)
confusion_matrix_image_artifact = eval_result.artifacts[png_artifact_name]
assert (
ImageChops.difference(
confusion_matrix_image_artifact.content, expected_image_artifact
).getbbox()
is None
)
assert confusion_matrix_image_artifact.uri == get_artifact_uri(
run.info.run_id, png_artifact_name + ".png"
)
assert (
ImageChops.difference(
confusion_matrix_image_artifact._load(saved_png_artifact_path),
expected_image_artifact,
).getbbox()
is None
)
with TempDir() as temp_dir:
temp_dir_path = temp_dir.path()
eval_result.save(temp_dir_path)
with open(temp_dir.path("metrics.json")) as fp:
assert json.load(fp) == eval_result.metrics
with open(temp_dir.path("artifacts_metadata.json")) as fp:
json_dict = json.load(fp)
assert "confusion_matrix" in json_dict
assert json_dict["confusion_matrix"] == {
"uri": confusion_matrix_artifact.uri,
"class_name": "mlflow_test_plugin.dummy_evaluator.Array2DEvaluationArtifact",
}
assert "confusion_matrix_image" in json_dict
assert json_dict["confusion_matrix_image"] == {
"uri": confusion_matrix_image_artifact.uri,
"class_name": "mlflow.models.evaluation.artifacts.ImageEvaluationArtifact",
}
assert set(os.listdir(temp_dir.path("artifacts"))) == {
"confusion_matrix.csv",
"confusion_matrix_image.png",
}
loaded_eval_result = EvaluationResult.load(temp_dir_path)
assert loaded_eval_result.metrics == eval_result.metrics
loaded_confusion_matrix_artifact = loaded_eval_result.artifacts[csv_artifact_name]
assert confusion_matrix_artifact.uri == loaded_confusion_matrix_artifact.uri
np.testing.assert_array_equal(
confusion_matrix_artifact.content,
loaded_confusion_matrix_artifact.content,
)
loaded_confusion_matrix_image_artifact = loaded_eval_result.artifacts[png_artifact_name]
assert confusion_matrix_image_artifact.uri == loaded_confusion_matrix_image_artifact.uri
assert (
ImageChops.difference(
confusion_matrix_image_artifact.content,
loaded_confusion_matrix_image_artifact.content,
).getbbox()
is None
)
new_confusion_matrix_artifact = Array2DEvaluationArtifact(uri=confusion_matrix_artifact.uri)
new_confusion_matrix_artifact._load()
np.testing.assert_array_equal(
confusion_matrix_artifact.content,
new_confusion_matrix_artifact.content,
)
new_confusion_matrix_image_artifact = ImageEvaluationArtifact(
uri=confusion_matrix_image_artifact.uri
)
new_confusion_matrix_image_artifact._load()
np.testing.assert_array_equal(
confusion_matrix_image_artifact.content,
new_confusion_matrix_image_artifact.content,
)
def test_regressor_evaluate(linear_regressor_model_uri, diabetes_dataset):
y_true = diabetes_dataset.labels_data
regressor_model = mlflow.pyfunc.load_model(linear_regressor_model_uri)
y_pred = regressor_model.predict(diabetes_dataset.features_data)
expected_mae = mean_absolute_error(y_true, y_pred)
expected_mse = mean_squared_error(y_true, y_pred)
expected_metrics = {
"mean_absolute_error": expected_mae,
"mean_squared_error": expected_mse,
}
expected_saved_metrics = {
"mean_absolute_error": expected_mae,
"mean_squared_error": expected_mse,
}
with mlflow.start_run() as run:
eval_result = evaluate(
linear_regressor_model_uri,
diabetes_dataset._constructor_args["data"],
model_type="regressor",
targets=diabetes_dataset._constructor_args["targets"],
evaluators="dummy_evaluator",
)
_, saved_metrics, _, _ = get_run_data(run.info.run_id)
assert saved_metrics == expected_saved_metrics
assert eval_result.metrics == expected_metrics
def _load_diabetes_dataset_in_required_format(format):
data = sklearn.datasets.load_diabetes()
if format == "numpy":
return data.data, data.target
elif format == "pandas":
df = pd.DataFrame(data.data, columns=data.feature_names)
df["label"] = data.target
return df, "label"
elif format == "spark":
spark = SparkSession.builder.master("local[*]").getOrCreate()
panda_df = pd.DataFrame(data.data, columns=data.feature_names)
panda_df["label"] = data.target
spark_df = spark.createDataFrame(panda_df)
return spark_df, "label"
elif format == "list":
return data.data.tolist(), data.target.tolist()
else:
raise TypeError(
f"`format` must be one of 'numpy', 'pandas', 'spark' or 'list', but received {format}."
)
@pytest.mark.parametrize("data_format", ["list", "numpy", "pandas", "spark"])
def test_regressor_evaluation(linear_regressor_model_uri, data_format):
data, target = _load_diabetes_dataset_in_required_format(data_format)
with mlflow.start_run() as run:
eval_result = evaluate(
linear_regressor_model_uri,
data=data,
targets=target,
model_type="regressor",
evaluators=["default"],
)
_, saved_metrics, _, _ = get_run_data(run.info.run_id)
for k, v in eval_result.metrics.items():
assert v == saved_metrics[k]
datasets = get_run_datasets(run.info.run_id)
assert len(datasets) == 1
assert len(datasets[0].tags) == 0
assert datasets[0].dataset.source_type == "code"
def test_pandas_df_regressor_evaluation_mlflow_dataset_with_metric_prefix(
linear_regressor_model_uri,
):
data = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data.data, columns=data.feature_names)
df["y"] = data.target
mlflow_df = from_pandas(df=df, source="my_src", targets="y")
with mlflow.start_run() as run:
eval_result = evaluate(
linear_regressor_model_uri,
data=mlflow_df,
model_type="regressor",
evaluators=["default"],
evaluator_config={
"default": {
"metric_prefix": "eval",
}
},
)
_, saved_metrics, _, _ = get_run_data(run.info.run_id)
for k, v in eval_result.metrics.items():
assert v == saved_metrics[k]
datasets = get_run_datasets(run.info.run_id)
assert len(datasets) == 1
assert datasets[0].tags[0].value == "eval"
def test_pandas_df_regressor_evaluation_mlflow_dataset(linear_regressor_model_uri):
data = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data.data, columns=data.feature_names)
df["y"] = data.target
mlflow_df = from_pandas(df=df, source="my_src", targets="y")
with mlflow.start_run() as run:
eval_result = evaluate(
linear_regressor_model_uri,
data=mlflow_df,
model_type="regressor",
evaluators=["default"],
)
_, saved_metrics, _, _ = get_run_data(run.info.run_id)
for k, v in eval_result.metrics.items():
assert v == saved_metrics[k]
datasets = get_run_datasets(run.info.run_id)
assert len(datasets) == 1
assert len(datasets[0].tags) == 0
def test_pandas_df_regressor_evaluation_mlflow_dataset_with_targets_from_dataset(
linear_regressor_model_uri,
):
data = sklearn.datasets.load_diabetes()
df = pd.DataFrame(data.data, columns=data.feature_names)
df["y"] = data.target
mlflow_df = from_pandas(df=df, source="my_src", targets="y")
with mlflow.start_run() as run:
eval_result = evaluate(
linear_regressor_model_uri,
data=mlflow_df,
model_type="regressor",
evaluators=["default"],
)
_, saved_metrics, _, _ = get_run_data(run.info.run_id)
for k, v in eval_result.metrics.items():
assert v == saved_metrics[k]
datasets = get_run_datasets(run.info.run_id)
assert len(datasets) == 1
assert len(datasets[0].tags) == 0
def test_dataset_name():
X, y = get_iris()
d1 = EvaluationDataset(data=X, targets=y, name="a1")
assert d1.name == "a1"
d2 = EvaluationDataset(data=X, targets=y)
assert d2.name == d2.hash
def test_dataset_metadata():
X, y = get_iris()
d1 = EvaluationDataset(data=X, targets=y, name="a1", path="/path/to/a1")
assert d1._metadata == {
"hash": "6bdf4e119bf1a37e7907dfd9f0e68733",
"name": "a1",
"path": "/path/to/a1",
}
def test_gen_md5_for_arraylike_obj():
def get_md5(data):
md5_gen = hashlib.md5(usedforsecurity=False)
_gen_md5_for_arraylike_obj(md5_gen, data)
return md5_gen.hexdigest()
list0 = list(range(20))
list1 = [100] + list0[1:]
list2 = list0[:-1] + [100]
list3 = list0[:10] + [100] + list0[10:]
assert len({get_md5(list0), get_md5(list1), get_md5(list2), get_md5(list3)}) == 4
list4 = list0[:10] + [99] + list0[10:]
assert get_md5(list3) == get_md5(list4)
def test_gen_md5_for_arraylike_obj_with_pandas_df_using_float_idx_does_not_raise_keyerror():
float_indices = np.random.uniform(low=0.5, high=13.3, size=(10,))
df = pd.DataFrame(np.random.randn(10, 4), index=float_indices, columns=["A", "B", "C", "D"])
md5_gen = hashlib.md5(usedforsecurity=False)
assert _gen_md5_for_arraylike_obj(md5_gen, df) is None
def test_dataset_hash(
iris_dataset, iris_pandas_df_dataset, iris_pandas_df_num_cols_dataset, diabetes_spark_dataset
):
assert iris_dataset.hash == "99329a790dc483e7382c0d1d27aac3f3"
assert iris_pandas_df_dataset.hash == "799d4f50e2e353127f94a0e5300add06"
assert iris_pandas_df_num_cols_dataset.hash == "3c5fc56830a0646001253e25e17bdce4"
assert diabetes_spark_dataset.hash == "ebfb050519e7e5b463bd38b0c8d04243"
def test_trace_dataset_hash():
# Validates that a dataset containing Traces can be hashed.
df = pd.DataFrame(
{
"request": ["Hello"],
"trace": [Trace(info=create_test_trace_info("tr"), data=TraceData([]))],
}
)
dataset = EvaluationDataset(data=df)
assert dataset.hash == "757c14bf38aa42d36b93ccd70b1ea719"
# Hash of a dataset with a different column should be different
df2 = pd.DataFrame(
{
"request": ["Hi"],
"trace": [Trace(info=create_test_trace_info("tr"), data=TraceData([]))],
}
)
dataset2 = EvaluationDataset(data=df2)
assert dataset2.hash != dataset.hash
def test_dataset_with_pandas_dataframe():
data = pd.DataFrame({"f1": [1, 2], "f2": [3, 4], "f3": [5, 6], "label": [0, 1]})
eval_dataset = EvaluationDataset(data=data, targets="label")
assert list(eval_dataset.features_data.columns) == ["f1", "f2", "f3"]
np.testing.assert_array_equal(eval_dataset.features_data.f1.to_numpy(), [1, 2])
np.testing.assert_array_equal(eval_dataset.features_data.f2.to_numpy(), [3, 4])
np.testing.assert_array_equal(eval_dataset.features_data.f3.to_numpy(), [5, 6])
np.testing.assert_array_equal(eval_dataset.labels_data, [0, 1])
eval_dataset2 = EvaluationDataset(data=data, targets="label", feature_names=["f3", "f2"])
assert list(eval_dataset2.features_data.columns) == ["f3", "f2"]
np.testing.assert_array_equal(eval_dataset2.features_data.f2.to_numpy(), [3, 4])
np.testing.assert_array_equal(eval_dataset2.features_data.f3.to_numpy(), [5, 6])
def test_dataset_with_array_data():
features = [[1, 2], [3, 4]]
labels = [0, 1]
for input_data in [features, np.array(features)]:
eval_dataset1 = EvaluationDataset(data=input_data, targets=labels)
np.testing.assert_array_equal(eval_dataset1.features_data, features)
np.testing.assert_array_equal(eval_dataset1.labels_data, labels)
assert list(eval_dataset1.feature_names) == ["feature_1", "feature_2"]
assert EvaluationDataset(
data=input_data, targets=labels, feature_names=["a", "b"]
).feature_names == ["a", "b"]
with pytest.raises(MlflowException, match="all elements must have the same length"):
EvaluationDataset(data=[[1, 2], [3, 4, 5]], targets=labels)
def test_dataset_autogen_feature_names():
labels = [0]
eval_dataset2 = EvaluationDataset(data=[list(range(9))], targets=labels)
assert eval_dataset2.feature_names == [f"feature_{i + 1}" for i in range(9)]
eval_dataset2 = EvaluationDataset(data=[list(range(10))], targets=labels)
assert eval_dataset2.feature_names == [f"feature_{i + 1:02d}" for i in range(10)]
eval_dataset2 = EvaluationDataset(data=[list(range(99))], targets=labels)
assert eval_dataset2.feature_names == [f"feature_{i + 1:02d}" for i in range(99)]
eval_dataset2 = EvaluationDataset(data=[list(range(100))], targets=labels)
assert eval_dataset2.feature_names == [f"feature_{i + 1:03d}" for i in range(100)]
with pytest.raises(
MlflowException, match="features example rows must be the same length with labels array"
):
EvaluationDataset(data=[[1, 2], [3, 4]], targets=[1, 2, 3])
def test_dataset_from_spark_df(spark_session):
spark_df = spark_session.createDataFrame([(1.0, 2.0, 3.0)] * 10, ["f1", "f2", "y"])
with mock.patch.object(EvaluationDataset, "SPARK_DATAFRAME_LIMIT", 5):
dataset = EvaluationDataset(spark_df, targets="y")
assert list(dataset.features_data.columns) == ["f1", "f2"]
assert list(dataset.features_data["f1"]) == [1.0] * 5
assert list(dataset.features_data["f2"]) == [2.0] * 5
assert list(dataset.labels_data) == [3.0] * 5
def test_log_dataset_tag(iris_dataset, iris_pandas_df_dataset):
model_uuid = uuid.uuid4().hex
with mlflow.start_run() as run:
client = MlflowClient()
iris_dataset._log_dataset_tag(client, run.info.run_id, model_uuid=model_uuid)
_, _, tags, _ = get_run_data(run.info.run_id)
logged_meta1 = {**iris_dataset._metadata, "model": model_uuid}
logged_meta2 = {**iris_pandas_df_dataset._metadata, "model": model_uuid}
assert json.loads(tags["mlflow.datasets"]) == [logged_meta1]
raw_tag = get_raw_tag(run.info.run_id, "mlflow.datasets")
assert " " not in raw_tag # assert the tag string remove all whitespace chars.
# Test appending dataset tag
iris_pandas_df_dataset._log_dataset_tag(client, run.info.run_id, model_uuid=model_uuid)
_, _, tags, _ = get_run_data(run.info.run_id)
assert json.loads(tags["mlflow.datasets"]) == [
logged_meta1,
logged_meta2,
]
# Test log repetitive dataset
iris_dataset._log_dataset_tag(client, run.info.run_id, model_uuid=model_uuid)
_, _, tags, _ = get_run_data(run.info.run_id)
assert json.loads(tags["mlflow.datasets"]) == [
logged_meta1,
logged_meta2,
]
| RunData |
python | kubernetes-client__python | kubernetes/base/dynamic/exceptions.py | {
"start": 2903,
"end": 2977
} | class ____(DynamicApiError):
""" 400: StatusBadRequest """
| BadRequestError |
python | networkx__networkx | networkx/utils/configs.py | {
"start": 9492,
"end": 15440
} | class ____(Config):
"""Configuration for NetworkX that controls behaviors such as how to use backends.
Attribute and bracket notation are supported for getting and setting configurations::
>>> nx.config.backend_priority == nx.config["backend_priority"]
True
Parameters
----------
backend_priority : list of backend names or dict or BackendPriorities
Enable automatic conversion of graphs to backend graphs for functions
implemented by the backend. Priority is given to backends listed earlier.
This is a nested configuration with keys ``algos``, ``generators``,
``classes``, and, optionally, function names. Setting this value to a
list of backend names will set ``nx.config.backend_priority.algos``.
For more information, see ``help(nx.config.backend_priority)``.
Default is empty list.
backends : Config mapping of backend names to backend Config
The keys of the Config mapping are names of all installed NetworkX backends,
and the values are their configurations as Config mappings.
cache_converted_graphs : bool
If True, then save converted graphs to the cache of the input graph. Graph
conversion may occur when automatically using a backend from `backend_priority`
or when using the `backend=` keyword argument to a function call. Caching can
improve performance by avoiding repeated conversions, but it uses more memory.
Care should be taken to not manually mutate a graph that has cached graphs; for
example, ``G[u][v][k] = val`` changes the graph, but does not clear the cache.
Using methods such as ``G.add_edge(u, v, weight=val)`` will clear the cache to
keep it consistent. ``G.__networkx_cache__.clear()`` manually clears the cache.
Default is True.
fallback_to_nx : bool
If True, then "fall back" and run with the default "networkx" implementation
for dispatchable functions not implemented by backends of input graphs. When a
backend graph is passed to a dispatchable function, the default behavior is to
use the implementation from that backend if possible and raise if not. Enabling
``fallback_to_nx`` makes the networkx implementation the fallback to use instead
of raising, and will convert the backend graph to a networkx-compatible graph.
Default is False.
warnings_to_ignore : set of strings
Control which warnings from NetworkX are not emitted. Valid elements:
- `"cache"`: when a cached value is used from ``G.__networkx_cache__``.
Notes
-----
Environment variables may be used to control some default configurations:
- ``NETWORKX_BACKEND_PRIORITY``: set ``backend_priority.algos`` from comma-separated names.
- ``NETWORKX_CACHE_CONVERTED_GRAPHS``: set ``cache_converted_graphs`` to True if nonempty.
- ``NETWORKX_FALLBACK_TO_NX``: set ``fallback_to_nx`` to True if nonempty.
- ``NETWORKX_WARNINGS_TO_IGNORE``: set `warnings_to_ignore` from comma-separated names.
and can be used for finer control of ``backend_priority`` such as:
- ``NETWORKX_BACKEND_PRIORITY_ALGOS``: same as ``NETWORKX_BACKEND_PRIORITY``
to set ``backend_priority.algos``.
This is a global configuration. Use with caution when using from multiple threads.
"""
backend_priority: BackendPriorities
backends: Config
cache_converted_graphs: bool
fallback_to_nx: bool
warnings_to_ignore: set[str]
def _on_setattr(self, key, value):
from .backends import backend_info
if key == "backend_priority":
if isinstance(value, list):
# `config.backend_priority = [backend]` sets `backend_priority.algos`
value = BackendPriorities(
**dict(
self.backend_priority,
algos=self.backend_priority._on_setattr("algos", value),
)
)
elif isinstance(value, dict):
kwargs = value
value = BackendPriorities(algos=[], generators=[], classes=[])
for key, val in kwargs.items():
setattr(value, key, val)
elif not isinstance(value, BackendPriorities):
raise TypeError(
f"{key!r} config must be a dict of lists of backend names; got {value!r}"
)
elif key == "backends":
if not (
isinstance(value, Config)
and all(isinstance(key, str) for key in value)
and all(isinstance(val, Config) for val in value.values())
):
raise TypeError(
f"{key!r} config must be a Config of backend configs; got {value!r}"
)
if missing := {x for x in value if x not in backend_info}:
missing = ", ".join(map(repr, sorted(missing)))
raise ValueError(f"Unknown backend when setting {key!r}: {missing}")
elif key in {"cache_converted_graphs", "fallback_to_nx"}:
if not isinstance(value, bool):
raise TypeError(f"{key!r} config must be True or False; got {value!r}")
elif key == "warnings_to_ignore":
if not (isinstance(value, set) and all(isinstance(x, str) for x in value)):
raise TypeError(
f"{key!r} config must be a set of warning names; got {value!r}"
)
known_warnings = {"cache"}
if missing := {x for x in value if x not in known_warnings}:
missing = ", ".join(map(repr, sorted(missing)))
raise ValueError(
f"Unknown warning when setting {key!r}: {missing}. Valid entries: "
+ ", ".join(sorted(known_warnings))
)
return value
| NetworkXConfig |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_glue_data_quality.py | {
"start": 7172,
"end": 11451
} | class ____:
SENSOR = GlueDataQualityRuleRecommendationRunSensor
def setup_method(self):
self.default_args = dict(
task_id="test_data_quality_rule_recommendation_sensor",
recommendation_run_id="12345",
poke_interval=5,
max_retries=0,
)
self.sensor = self.SENSOR(**self.default_args, aws_conn_id=None)
def test_base_aws_op_attributes(self):
op = self.SENSOR(**self.default_args)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
op = self.SENSOR(
**self.default_args,
aws_conn_id="aws-test-custom-conn",
region_name="eu-west-1",
verify=False,
botocore_config={"read_timeout": 42},
)
assert op.hook.aws_conn_id == "aws-test-custom-conn"
assert op.hook._region_name == "eu-west-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
@mock.patch.object(GlueDataQualityHook, "conn")
def test_poke_success_state(self, mock_conn):
mock_conn.get_data_quality_rule_recommendation_run.return_value = (
SAMPLE_RESPONSE_GET_DATA_RULE_RECOMMENDATION_RUN_SUCCEEDED
)
assert self.sensor.poke({}) is True
@mock.patch.object(GlueDataQualityHook, "conn")
def test_poke_intermediate_state(self, mock_conn):
mock_conn.get_data_quality_rule_recommendation_run.return_value = (
SAMPLE_RESPONSE_DATA_RULE_RECOMMENDATION_RUN_RUNNING
)
assert self.sensor.poke({}) is False
@pytest.mark.parametrize("state", SENSOR.FAILURE_STATES)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_poke_failure_states(self, mock_conn, state):
mock_conn.get_data_quality_rule_recommendation_run.return_value = {
"RunId": "12345",
"Status": state,
"ErrorString": "unknown error",
}
sensor = self.SENSOR(**self.default_args, aws_conn_id=None)
message = (
f"Error: AWS Glue data quality recommendation run RunId: 12345 Run Status: {state}: unknown error"
)
with pytest.raises(AirflowException, match=message):
sensor.poke({})
mock_conn.get_data_quality_rule_recommendation_run.assert_called_once_with(RunId="12345")
def test_sensor_defer(self):
"""Test the execute method raise TaskDeferred if running sensor in deferrable mode"""
sensor = GlueDataQualityRuleRecommendationRunSensor(
task_id="test_task",
poke_interval=0,
recommendation_run_id="12345",
aws_conn_id="aws_default",
deferrable=True,
)
with pytest.raises(TaskDeferred):
sensor.execute(context=None)
@mock.patch.object(GlueDataQualityHook, "conn")
def test_execute_complete_succeeds_if_status_in_succeeded_states(self, mock_conn, caplog):
mock_conn.get_data_quality_rule_recommendation_run.return_value = (
SAMPLE_RESPONSE_GET_DATA_RULE_RECOMMENDATION_RUN_SUCCEEDED
)
op = GlueDataQualityRuleRecommendationRunSensor(
task_id="test_data_quality_rule_recommendation_run_sensor",
recommendation_run_id="12345",
poke_interval=0,
aws_conn_id="aws_default",
deferrable=True,
)
event = {"status": "success", "recommendation_run_id": "12345"}
op.execute_complete(context={}, event=event)
assert "AWS Glue data quality recommendation run completed." in caplog.messages
def test_execute_complete_fails_if_status_in_failure_states(self):
op = GlueDataQualityRuleRecommendationRunSensor(
task_id="test_data_quality_rule_recommendation_run_sensor",
recommendation_run_id="12345",
poke_interval=0,
aws_conn_id="aws_default",
deferrable=True,
)
event = {"status": "failure"}
with pytest.raises(AirflowException):
op.execute_complete(context={}, event=event)
| TestGlueDataQualityRuleRecommendationRunSensor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1246901,
"end": 1247310
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.add_billing_manager"""
__schema__ = github_schema
__field_names__ = ("invitation_email",)
invitation_email = sgqlc.types.Field(String, graphql_name="invitationEmail")
"""The email address used to invite a billing manager for the
organization.
"""
| OrgAddBillingManagerAuditEntry |
python | pytest-dev__pluggy | src/pluggy/_hooks.py | {
"start": 12732,
"end": 20826
} | class ____:
"""A caller of all registered implementations of a hook specification."""
__slots__ = (
"name",
"spec",
"_hookexec",
"_hookimpls",
"_call_history",
)
def __init__(
self,
name: str,
hook_execute: _HookExec,
specmodule_or_class: _Namespace | None = None,
spec_opts: HookspecOpts | None = None,
) -> None:
""":meta private:"""
#: Name of the hook getting called.
self.name: Final = name
self._hookexec: Final = hook_execute
# The hookimpls list. The caller iterates it *in reverse*. Format:
# 1. trylast nonwrappers
# 2. nonwrappers
# 3. tryfirst nonwrappers
# 4. trylast wrappers
# 5. wrappers
# 6. tryfirst wrappers
self._hookimpls: Final[list[HookImpl]] = []
self._call_history: _CallHistory | None = None
# TODO: Document, or make private.
self.spec: HookSpec | None = None
if specmodule_or_class is not None:
assert spec_opts is not None
self.set_specification(specmodule_or_class, spec_opts)
# TODO: Document, or make private.
def has_spec(self) -> bool:
return self.spec is not None
# TODO: Document, or make private.
def set_specification(
self,
specmodule_or_class: _Namespace,
spec_opts: HookspecOpts,
) -> None:
if self.spec is not None:
raise ValueError(
f"Hook {self.spec.name!r} is already registered "
f"within namespace {self.spec.namespace}"
)
self.spec = HookSpec(specmodule_or_class, self.name, spec_opts)
if spec_opts.get("historic"):
self._call_history = []
def is_historic(self) -> bool:
"""Whether this caller is :ref:`historic <historic>`."""
return self._call_history is not None
def _remove_plugin(self, plugin: _Plugin) -> None:
for i, method in enumerate(self._hookimpls):
if method.plugin == plugin:
del self._hookimpls[i]
return
raise ValueError(f"plugin {plugin!r} not found")
def get_hookimpls(self) -> list[HookImpl]:
"""Get all registered hook implementations for this hook."""
return self._hookimpls.copy()
def _add_hookimpl(self, hookimpl: HookImpl) -> None:
"""Add an implementation to the callback chain."""
for i, method in enumerate(self._hookimpls):
if method.hookwrapper or method.wrapper:
splitpoint = i
break
else:
splitpoint = len(self._hookimpls)
if hookimpl.hookwrapper or hookimpl.wrapper:
start, end = splitpoint, len(self._hookimpls)
else:
start, end = 0, splitpoint
if hookimpl.trylast:
self._hookimpls.insert(start, hookimpl)
elif hookimpl.tryfirst:
self._hookimpls.insert(end, hookimpl)
else:
# find last non-tryfirst method
i = end - 1
while i >= start and self._hookimpls[i].tryfirst:
i -= 1
self._hookimpls.insert(i + 1, hookimpl)
def __repr__(self) -> str:
return f"<HookCaller {self.name!r}>"
def _verify_all_args_are_provided(self, kwargs: Mapping[str, object]) -> None:
# This is written to avoid expensive operations when not needed.
if self.spec:
for argname in self.spec.argnames:
if argname not in kwargs:
notincall = ", ".join(
repr(argname)
for argname in self.spec.argnames
# Avoid self.spec.argnames - kwargs.keys()
# it doesn't preserve order.
if argname not in kwargs.keys()
)
warnings.warn(
f"Argument(s) {notincall} which are declared in the hookspec "
"cannot be found in this hook call",
stacklevel=2,
)
break
def __call__(self, **kwargs: object) -> Any:
"""Call the hook.
Only accepts keyword arguments, which should match the hook
specification.
Returns the result(s) of calling all registered plugins, see
:ref:`calling`.
"""
assert not self.is_historic(), (
"Cannot directly call a historic hook - use call_historic instead."
)
self._verify_all_args_are_provided(kwargs)
firstresult = self.spec.opts.get("firstresult", False) if self.spec else False
# Copy because plugins may register other plugins during iteration (#438).
return self._hookexec(self.name, self._hookimpls.copy(), kwargs, firstresult)
def call_historic(
self,
result_callback: Callable[[Any], None] | None = None,
kwargs: Mapping[str, object] | None = None,
) -> None:
"""Call the hook with given ``kwargs`` for all registered plugins and
for all plugins which will be registered afterwards, see
:ref:`historic`.
:param result_callback:
If provided, will be called for each non-``None`` result obtained
from a hook implementation.
"""
assert self._call_history is not None
kwargs = kwargs or {}
self._verify_all_args_are_provided(kwargs)
self._call_history.append((kwargs, result_callback))
# Historizing hooks don't return results.
# Remember firstresult isn't compatible with historic.
# Copy because plugins may register other plugins during iteration (#438).
res = self._hookexec(self.name, self._hookimpls.copy(), kwargs, False)
if result_callback is None:
return
if isinstance(res, list):
for x in res:
result_callback(x)
def call_extra(
self, methods: Sequence[Callable[..., object]], kwargs: Mapping[str, object]
) -> Any:
"""Call the hook with some additional temporarily participating
methods using the specified ``kwargs`` as call parameters, see
:ref:`call_extra`."""
assert not self.is_historic(), (
"Cannot directly call a historic hook - use call_historic instead."
)
self._verify_all_args_are_provided(kwargs)
opts: HookimplOpts = {
"wrapper": False,
"hookwrapper": False,
"optionalhook": False,
"trylast": False,
"tryfirst": False,
"specname": None,
}
hookimpls = self._hookimpls.copy()
for method in methods:
hookimpl = HookImpl(None, "<temp>", method, opts)
# Find last non-tryfirst nonwrapper method.
i = len(hookimpls) - 1
while i >= 0 and (
# Skip wrappers.
(hookimpls[i].hookwrapper or hookimpls[i].wrapper)
# Skip tryfirst nonwrappers.
or hookimpls[i].tryfirst
):
i -= 1
hookimpls.insert(i + 1, hookimpl)
firstresult = self.spec.opts.get("firstresult", False) if self.spec else False
return self._hookexec(self.name, hookimpls, kwargs, firstresult)
def _maybe_apply_history(self, method: HookImpl) -> None:
"""Apply call history to a new hookimpl if it is marked as historic."""
if self.is_historic():
assert self._call_history is not None
for kwargs, result_callback in self._call_history:
res = self._hookexec(self.name, [method], kwargs, False)
if res and result_callback is not None:
# XXX: remember firstresult isn't compat with historic
assert isinstance(res, list)
result_callback(res[0])
# Historical name (pluggy<=1.2), kept for backward compatibility.
_HookCaller = HookCaller
| HookCaller |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/gradients_test.py | {
"start": 1943,
"end": 27361
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
super(FunctionGradientsTest, self).setUp()
cpus = config.list_physical_devices('CPU')
# Set 4 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testGraphModeWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@polymorphic_function.function
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
self.assertAllEqual(step(), 2.0)
def testGraphGradientVariable(self):
with ops.Graph().as_default(), self.cached_session():
v = variables.Variable(1.0)
@polymorphic_function.function
def f():
return 2.0 * v
node = f()
grads, = gradients_impl.gradients(node, v)
v.initializer.run()
self.assertAllEqual(grads, 2.0)
self.assertEqual(grads.shape, v.shape)
def testSymbolicHigherOrder(self):
@polymorphic_function.function
def f(x, order):
y = polymorphic_function.function(lambda: math_ops.cos(x))()
for _ in range(order):
y, = gradients_impl.gradients(y, [x])
return y
for order, expected in enumerate(_COS_DERIVATIVES):
self.assertAllClose(
expected(constant_op.constant(1.)),
f(constant_op.constant(1.), order))
@parameterized.parameters([dict(persistent=True),
dict(persistent=False)])
def testSymbolicHigherOrderUnderTape(self, persistent):
@polymorphic_function.function
def f(x, order):
with backprop.GradientTape(persistent=persistent) as tape:
tape.watch(x)
# Note that having a tape active, even if we don't use it, forces us
# down a different function call path. Symbolic gradients should work
# here too; correctness of tape gradients are tested elsewhere.
y = polymorphic_function.function(lambda: math_ops.cos(x))()
tape_dy = tape.gradient(y, x)
for _ in range(order):
y, = gradients_impl.gradients(y, [x])
if order > 0:
y1 = tape_dy
for _ in range(order - 1):
y1, = gradients_impl.gradients(y1, [x])
else:
y1 = y
return y, y1
for order, expected_f in enumerate(_COS_DERIVATIVES):
expected = self.evaluate(expected_f(constant_op.constant(1.)))
self.assertAllClose(
(expected, expected),
f(constant_op.constant(1.), order))
def testIteratedGradientsNested(self):
def _grad(f):
def _grad_function(primal):
with backprop.GradientTape() as tape:
tape.watch(primal)
primal_out = f(primal)
return tape.gradient(primal_out, primal)
return _grad_function
@polymorphic_function.function
def _forward(x):
return math_ops.cos(x)
f = _forward
traced_f = polymorphic_function.function(f)
one = constant_op.constant(1.)
for expected in _COS_DERIVATIVES:
self.assertAllClose(expected(one), f(one))
self.assertAllClose(expected(one), traced_f(one))
self.assertAllClose(expected(one), polymorphic_function.function(f)(one))
f = _grad(f)
traced_f = polymorphic_function.function(_grad(traced_f))
def testIteratedGradientsNestedWithVariable(self):
def _grad(f):
def _grad_function():
with backprop.GradientTape() as tape:
primal_out = f()
g, = tape.gradient(primal_out, tape.watched_variables())
return g
return _grad_function
v = variables.Variable(2.)
@polymorphic_function.function
def _forward():
return math_ops.cos(v)
f = _forward
two = constant_op.constant(2.)
for expected in _COS_DERIVATIVES:
self.assertAllClose(expected(two), f())
self.assertAllClose(expected(two), polymorphic_function.function(f)())
f = _grad(f)
def testIteratedGradientsPersistent(self):
@polymorphic_function.function
def _forward(z):
return math_ops.cos(z)
f = _forward
with backprop.GradientTape(persistent=True) as tape:
start = constant_op.constant(1.)
tape.watch(start)
x = f(start)
for expected in _COS_DERIVATIVES:
self.assertAllClose(expected(start), x)
x = tape.gradient(x, start)
def testHigherOrderWithVariable(self):
v = variables.Variable(1.)
@polymorphic_function.function
def _forward():
return math_ops.cos(v)
f = _forward
with backprop.GradientTape(persistent=True) as tape:
x = f()
for expected in _COS_DERIVATIVES:
self.assertAllClose(expected(constant_op.constant(1.)), x)
x, = tape.gradient(x, tape.watched_variables())
def testGradientsChained(self):
@polymorphic_function.function
def _forward(z):
return math_ops.cos(z)
f = _forward
x = constant_op.constant(1.)
with backprop.GradientTape() as t:
t.watch(x)
y = f(x)
with backprop.GradientTape() as tt:
doutputs = constant_op.constant(2.)
tt.watch(doutputs)
g = t.gradient(y, x, doutputs)
self.assertAllClose(-2. * math_ops.sin(x), g)
gg = tt.gradient(g, doutputs)
# We're taking gradients with respect to doutputs, which is just a linear
# function of the gradient.
self.assertAllClose(-math_ops.sin(x), gg)
def testSymGradGatherNd(self):
with ops.Graph().as_default(), self.cached_session():
@polymorphic_function.function
def f(x):
return array_ops.gather_nd(x, [[0]])
c = constant_op.constant([[2.]])
f_c = f(c)
g, = gradients_impl.gradients(f_c, c)
self.assertAllEqual(self.evaluate(g).values, [[1.0]])
def testNoSymGradNestedDefun(self):
@polymorphic_function.function
def outer():
@polymorphic_function.function
def f(x):
return array_ops.gather_nd(x, [[0]])
c = constant_op.constant([[2.]])
f_c = f(c)
g, = gradients_impl.gradients(f_c, c)
self.assertIsInstance(g, indexed_slices.IndexedSlices)
outer()
def testGraphFunctionWithGradients(self):
v = resource_variable_ops.ResourceVariable(1.0, name='v')
@polymorphic_function.function
def step():
def inner():
return v * v
return backprop.implicit_grad(inner)()[0][0]
step_op = step.get_concrete_function()
self.assertEqual(step_op.output_dtypes, dtypes.float32)
self.assertEqual(step_op.output_shapes, tensor_shape.TensorShape([]))
self.assertAllEqual(step_op(), 2.0)
@test_util.run_in_graph_and_eager_modes()
def testDefunCondGradient(self):
@polymorphic_function.function
def f(x):
return cond.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x)
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0)
@test_util.run_in_graph_and_eager_modes()
def testGraphLoopGradient(self):
@polymorphic_function.function
def f(x):
return while_loop.while_loop(
lambda _, i: i < 2, lambda x, i: (2 * x, i + 1), [x, 0]
)[0]
with backprop.GradientTape() as t:
x = constant_op.constant(1.0)
t.watch(x)
y = f(x)
self.assertAllEqual(self.evaluate(t.gradient(y, x)), 4.0)
def testGraphLoopGradientInsideSession(self):
with ops.Graph().as_default():
n = constant_op.constant(2.0)
x = array_ops.placeholder(dtypes.float32, shape=None)
@polymorphic_function.function
def f():
c = lambda n: n < 10
b = lambda n: n * x
return while_loop.while_loop(c, b, [n], [tensor_shape.unknown_shape()])
l = f()
dx = gradients_impl.gradients(l, [x])[0]
with self.cached_session():
self.assertEqual(dx.eval(feed_dict={x: 2.0}), 24.0)
def testDefunDifferentiable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@polymorphic_function.function
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testDefunCanBeDifferentiatedTwice(self):
v = resource_variable_ops.ResourceVariable(1.0)
@polymorphic_function.function
def f():
return v * v
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
# Ensure that v is watched again.
self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0)
def testSymbolicGradientVariableNoneNotZerosLike(self):
with ops.Graph().as_default():
v = variables.Variable(1.0)
@polymorphic_function.function
def f(x, v):
v.read_value()
return x * x
x = constant_op.constant(1.0)
l = f(x, v)
_, dv = gradients_impl.gradients(l, [x, v])
with self.cached_session():
v.initializer.run()
self.assertEqual(dv, None)
def testDefunCallBackprop(self):
@polymorphic_function.function
def f(x):
return math_ops.add(x, x)
@polymorphic_function.function
def g(x):
return backprop.gradients_function(f, [0])(x)[0]
self.assertAllEqual(2, g(constant_op.constant(2.)))
@test_util.run_v1_only('b/120545219')
def testGraphModeEagerGradError(self):
with context.graph_mode():
def f():
x = variable_scope.get_variable(
'v', initializer=constant_op.constant(1.0))
return x * constant_op.constant(2.0)
with self.assertRaisesRegex(ValueError,
'No trainable variables were accessed'):
backprop.implicit_val_and_grad(f)()
def testDefunCallBackpropUsingSameObjectForMultipleArguments(self):
@polymorphic_function.function
def g(x):
return backprop.gradients_function(math_ops.multiply, [0, 1])(x, x)
def np_g(x):
return [d.numpy() for d in g(x)]
x = constant_op.constant(1.)
self.assertAllEqual([1., 1.], np_g(x))
self.assertAllEqual([1., 1.], np_g(1.))
def testGradientTensorConversionWithDefun(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@polymorphic_function.function
def f(x):
return math_ops.add(x, three)
def g(x):
return f(x)
g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0]
self.assertAllEqual(g, 1.0)
def testGradient(self):
matmul = polymorphic_function.function(math_ops.matmul)
def sq(x):
return matmul(x, x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
def testGradientInFunction(self):
@polymorphic_function.function
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(constant_op.constant(1.0)), 2.0)
def testGradientOfGatherWithDefun(self):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
grad_fn = backprop.implicit_grad(sum_gather)
gradient = grad_fn()
defun_grad_fn = backprop.implicit_grad(
polymorphic_function.function(sum_gather))
defun_gradient = defun_grad_fn()
self.assertEqual(len(gradient), len(defun_gradient))
gradient = gradient[0][0]
defun_gradient = defun_gradient[0][0]
self.assertAllEqual(gradient.values, defun_gradient.values)
self.assertAllEqual(gradient.indices, defun_gradient.indices)
self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape)
def testDifferentiableFunctionNoneOutputs(self):
@polymorphic_function.function
def my_function(x):
return x, None
def wrapper(x):
return my_function(x)[0]
g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0))
self.assertAllEqual(g[0], 1.)
@polymorphic_function.function
def foo(a):
return None, a * a
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
none, r = foo(x)
g = tp.gradient(r, x)
self.assertIs(none, None)
self.assertAllEqual(r, 25.0)
self.assertAllEqual(g, 2 * 5.0)
@test_util.run_in_graph_and_eager_modes
def testNestedDifferentiableFunction(self):
@polymorphic_function.function
def inner_fn(a, b):
return a * math_ops.add(a, b)
@polymorphic_function.function
def outer_fn(x):
return inner_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunction(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def outer_fn(x):
return middle_fn(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionWithMultipleGradCalls(self):
@polymorphic_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return math_ops.mul(a, inner_fn(a, b))
@polymorphic_function.function
def outer_fn(x):
return middle_fn(x, 3.0)
x = constant_op.constant(5.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
self.assertAllEqual(middle_fn(3.0, x), 3.0 * (3.0 + 5.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tp:
tp.watch(y)
result = outer_fn(y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2 * 4.0 + 3.0)
with backprop.GradientTape() as tp:
tp.watch(y)
result = inner_fn(y, y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionGradientTapeInDefun(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def outer_fn(x):
with backprop.GradientTape() as tp:
tp.watch(x)
result = middle_fn(x, 1.0)
grad = tp.gradient(result, x)
return grad
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionGradientTapeInNestedDefun(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def almost_outer_fn(x):
with backprop.GradientTape() as tp:
tp.watch(x)
result = middle_fn(x, 1.0)
grad = tp.gradient(result, x)
return grad
@polymorphic_function.function
def outer_fn(x):
return almost_outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionGradientTapeInMultNestedDefun(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def almost_outer_fn(x):
with backprop.GradientTape() as tp:
tp.watch(x)
result = middle_fn(x, 1.0)
grad = tp.gradient(result, x)
return grad
@polymorphic_function.function
def outer_fn(x):
return almost_outer_fn(x)
@polymorphic_function.function
def outer_outer_fn(x):
return outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionTFGradientInDefun(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def outer_fn(x):
result = middle_fn(x, 1.0)
return gradients_impl.gradients(result, [x])[0]
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionTFGradientInNestedDefun(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def almost_outer_fn(x):
result = middle_fn(x, 1.0)
return gradients_impl.gradients(result, [x])[0]
@polymorphic_function.function
def outer_fn(x):
return almost_outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
@test_util.run_in_graph_and_eager_modes
def testDeeplyNestedDifferentiableFunctionTFGradientInMultNestedDefun(self):
@polymorphic_function.function
def inner_inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def inner_fn(a, b):
return inner_inner_fn(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def almost_outer_fn(x):
result = middle_fn(x, 1.0)
return gradients_impl.gradients(result, [x])[0]
@polymorphic_function.function
def outer_fn(x):
return almost_outer_fn(x)
@polymorphic_function.function
def outer_outer_fn(x):
return outer_fn(x)
x = constant_op.constant(5.0)
grad = outer_outer_fn(x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
def testDeeplyNestedDifferentiableFunctionWithVariable(self):
var = variables.Variable(constant_op.constant(1.0))
@polymorphic_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return a * inner_fn(a, b)
@polymorphic_function.function
def outer_fn(x):
return middle_fn(x, var)
x = constant_op.constant(5.0)
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 1.0)
def testDeeplyNestedDifferentiableFunctionWithVariableMultipleGradCalls(self):
v = variables.Variable(constant_op.constant(3.0))
@polymorphic_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return math_ops.mul(a, inner_fn(a, b))
@polymorphic_function.function
def outer_fn(x):
return middle_fn(x, v)
x = constant_op.constant(5.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0))
with backprop.GradientTape() as tp:
tp.watch(x)
result = outer_fn(x)
grad = tp.gradient(result, x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
y = constant_op.constant(4.0)
with backprop.GradientTape() as tp:
tp.watch(y)
result = outer_fn(y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2 * 4.0 + 3.0)
v.assign(constant_op.constant(1.5))
with backprop.GradientTape() as tp:
tp.watch(y)
result = outer_fn(y)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 2 * 4.0 + 1.5)
with backprop.GradientTape() as tp:
tp.watch(y)
result = inner_fn(y, v)
grad = tp.gradient(result, y)
self.assertAllEqual(grad, 1.0)
def testDeeplyNestedDifferentiableFunctionWithVariableMultipleTFGrads(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(3.0)
v.initializer.run()
@polymorphic_function.function
def inner_fn(a, b):
return math_ops.add(a, b)
@polymorphic_function.function
def middle_fn(a, b):
return math_ops.mul(a, inner_fn(a, b))
@polymorphic_function.function
def outer_fn(x):
return middle_fn(x, v)
x = constant_op.constant(5.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
grad, = gradients_impl.gradients(outer_fn(x), x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0))
grad, = gradients_impl.gradients(outer_fn(x), x)
self.assertAllEqual(grad, 2 * 5.0 + 3.0)
y = constant_op.constant(4.0)
grad, = gradients_impl.gradients(outer_fn(y), y)
self.assertAllEqual(grad, 2 * 4.0 + 3.0)
self.evaluate(v.assign(constant_op.constant(1.5)))
grad, = gradients_impl.gradients(outer_fn(y), y)
self.assertAllEqual(grad, 2 * 4.0 + 1.5)
grad, = gradients_impl.gradients(inner_fn(y, v), y)
self.assertAllEqual(grad, 1.0)
def testNestedDifferentiableFunctionNoneOutputs(self):
@polymorphic_function.function
def foo(a, b):
return None, a * math_ops.add(a, b), None, 2*a
@polymorphic_function.function
def bar(x):
return foo(x, 1.0)
x = constant_op.constant(5.0)
with backprop.GradientTape(persistent=True) as tp:
tp.watch(x)
none1, r1, none2, r2 = bar(x)
g1 = tp.gradient(r1, x)
g2 = tp.gradient(r2, x)
self.assertAllEqual(r1, 30.0)
self.assertAllEqual(r2, 10.0)
self.assertIs(none1, None)
self.assertIs(none2, None)
self.assertAllEqual(g1, 2 * 5.0 + 1.0)
self.assertAllEqual(g2, 2.0)
def testGradientWithKeywordArguments(self):
matmul = polymorphic_function.function(math_ops.matmul)
def sq(x):
return matmul(a=x, b=x, transpose_a=True)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
grad_t, = backprop.gradients_function(sq, [0])(t)
self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
with backprop.GradientTape(persistent=True) as tape:
tape.watch(t)
one = matmul(t, b=t, transpose_a=True)
two = matmul(b=t, a=t, transpose_a=True)
three = matmul(a=t, b=t, transpose_a=True)
for output in [one, two, three]:
self.assertAllEqual(tape.gradient(output, t), [[6, 6], [14, 14]])
def testGradientInFunctionWithKeywordArguments(self):
@polymorphic_function.function
def f(x):
return backprop.gradients_function(lambda y: y * y, [0])(x)[0]
self.assertAllEqual(f(x=constant_op.constant(1.0)), 2.0)
def testFunctionHasNoSecondOrderGradient(self):
# This test needs nn_grad imported. We could just disable the lint error,
# but this way if the test is deleted we'll know the import isn't needed.
_ = nn_grad
v = variables.Variable(1.)
@polymorphic_function.function
def f(labels, logits):
return polymorphic_function.function(
nn_ops.sparse_softmax_cross_entropy_with_logits)(
labels=labels, logits=logits + v)
@polymorphic_function.function
def f_grad():
with backprop.GradientTape() as tape:
logits = constant_op.constant([1., 2.])
tape.watch(logits)
out = f(constant_op.constant(1), logits)
return tape.gradient(out, logits)
# Mainly we want to check that the function builds despite
# sparse_softmax_cross_entropy_with_logits not having a second-order
# gradient defined.
self.assertAllEqual([2], f_grad().shape)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| FunctionGradientsTest |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 26852,
"end": 26928
} | class ____(MaskedItemTests, LongitudeSetup):
pass
| TestMaskedLongitudeItems |
python | huggingface__transformers | src/transformers/models/kosmos2/configuration_kosmos2.py | {
"start": 5992,
"end": 9586
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Kosmos2VisionModel`]. It is used to instantiate a
KOSMOS-2 vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the vision encoder of the KOSMOS-2
[microsoft/kosmos-2-patch14-224](https://huggingface.co/microsoft/kosmos-2-patch14-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
```"""
model_type = "kosmos_2_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=1024,
intermediate_size=4096,
num_hidden_layers=24,
num_attention_heads=16,
num_channels=3,
image_size=224,
patch_size=14,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
| Kosmos2VisionConfig |
python | django__django | django/contrib/gis/gdal/geometries.py | {
"start": 23576,
"end": 23617
} | class ____(LineString):
pass
| LinearRing |
python | doocs__leetcode | lcp/LCP 50. 宝石补给/Solution.py | {
"start": 0,
"end": 234
} | class ____:
def giveGem(self, gem: List[int], operations: List[List[int]]) -> int:
for x, y in operations:
v = gem[x] >> 1
gem[y] += v
gem[x] -= v
return max(gem) - min(gem)
| Solution |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 32165,
"end": 37822
} | class ____(ExportableSettings):
default_settings: Optional[TrainerSettings] = None
behaviors: TrainerSettings.DefaultTrainerDict = attr.ib(
factory=TrainerSettings.DefaultTrainerDict
)
env_settings: EnvironmentSettings = attr.ib(factory=EnvironmentSettings)
engine_settings: EngineSettings = attr.ib(factory=EngineSettings)
environment_parameters: Optional[Dict[str, EnvironmentParameterSettings]] = None
checkpoint_settings: CheckpointSettings = attr.ib(factory=CheckpointSettings)
torch_settings: TorchSettings = attr.ib(factory=TorchSettings)
# These are options that are relevant to the run itself, and not the engine or environment.
# They will be left here.
debug: bool = parser.get_default("debug")
# Convert to settings while making sure all fields are valid
cattr.register_structure_hook(EnvironmentSettings, strict_to_cls)
cattr.register_structure_hook(EngineSettings, strict_to_cls)
cattr.register_structure_hook(CheckpointSettings, strict_to_cls)
cattr.register_structure_hook_func(
lambda t: t == Dict[str, EnvironmentParameterSettings],
EnvironmentParameterSettings.structure,
)
cattr.register_structure_hook(Lesson, strict_to_cls)
cattr.register_structure_hook(
ParameterRandomizationSettings, ParameterRandomizationSettings.structure
)
cattr.register_unstructure_hook(
ParameterRandomizationSettings, ParameterRandomizationSettings.unstructure
)
cattr.register_structure_hook(TrainerSettings, TrainerSettings.structure)
cattr.register_structure_hook(
TrainerSettings.DefaultTrainerDict, TrainerSettings.dict_to_trainerdict
)
cattr.register_unstructure_hook(collections.defaultdict, defaultdict_to_dict)
@staticmethod
def from_argparse(args: argparse.Namespace) -> "RunOptions":
"""
Takes an argparse.Namespace as specified in `parse_command_line`, loads input configuration files
from file paths, and converts to a RunOptions instance.
:param args: collection of command-line parameters passed to mlagents-learn
:return: RunOptions representing the passed in arguments, with trainer config, curriculum and sampler
configs loaded from files.
"""
argparse_args = vars(args)
config_path = StoreConfigFile.trainer_config_path
# Load YAML
configured_dict: Dict[str, Any] = {
"checkpoint_settings": {},
"env_settings": {},
"engine_settings": {},
"torch_settings": {},
}
_require_all_behaviors = True
if config_path is not None:
configured_dict.update(load_config(config_path))
else:
# If we're not loading from a file, we don't require all behavior names to be specified.
_require_all_behaviors = False
# Use the YAML file values for all values not specified in the CLI.
for key in configured_dict.keys():
# Detect bad config options
if key not in attr.fields_dict(RunOptions):
raise TrainerConfigError(
"The option {} was specified in your YAML file, but is invalid.".format(
key
)
)
# Override with CLI args
# Keep deprecated --load working, TODO: remove
argparse_args["resume"] = argparse_args["resume"] or argparse_args["load_model"]
for key, val in argparse_args.items():
if key in DetectDefault.non_default_args:
if key in attr.fields_dict(CheckpointSettings):
configured_dict["checkpoint_settings"][key] = val
elif key in attr.fields_dict(EnvironmentSettings):
configured_dict["env_settings"][key] = val
elif key in attr.fields_dict(EngineSettings):
configured_dict["engine_settings"][key] = val
elif key in attr.fields_dict(TorchSettings):
configured_dict["torch_settings"][key] = val
else: # Base options
configured_dict[key] = val
final_runoptions = RunOptions.from_dict(configured_dict)
final_runoptions.checkpoint_settings.prioritize_resume_init()
# Need check to bypass type checking but keep structure on dict working
if isinstance(final_runoptions.behaviors, TrainerSettings.DefaultTrainerDict):
# configure whether or not we should require all behavior names to be found in the config YAML
final_runoptions.behaviors.set_config_specified(_require_all_behaviors)
_non_default_args = DetectDefault.non_default_args
# Prioritize the deterministic mode from the cli for deterministic actions.
if "deterministic" in _non_default_args:
for behaviour in final_runoptions.behaviors.keys():
final_runoptions.behaviors[
behaviour
].network_settings.deterministic = argparse_args["deterministic"]
return final_runoptions
@staticmethod
def from_dict(
options_dict: Dict[str, Any],
) -> "RunOptions":
# If a default settings was specified, set the TrainerSettings class override
if (
"default_settings" in options_dict.keys()
and options_dict["default_settings"] is not None
):
TrainerSettings.default_override = cattr.structure(
options_dict["default_settings"], TrainerSettings
)
return cattr.structure(options_dict, RunOptions)
| RunOptions |
python | openai__openai-python | src/openai/types/beta/chatkit/chat_session.py | {
"start": 427,
"end": 1329
} | class ____(BaseModel):
id: str
"""Identifier for the ChatKit session."""
chatkit_configuration: ChatSessionChatKitConfiguration
"""Resolved ChatKit feature configuration for the session."""
client_secret: str
"""Ephemeral client secret that authenticates session requests."""
expires_at: int
"""Unix timestamp (in seconds) for when the session expires."""
max_requests_per_1_minute: int
"""Convenience copy of the per-minute request limit."""
object: Literal["chatkit.session"]
"""Type discriminator that is always `chatkit.session`."""
rate_limits: ChatSessionRateLimits
"""Resolved rate limit values."""
status: ChatSessionStatus
"""Current lifecycle state of the session."""
user: str
"""User identifier associated with the session."""
workflow: ChatKitWorkflow
"""Workflow metadata for the session."""
| ChatSession |
python | ray-project__ray | python/ray/tune/trainable/trainable.py | {
"start": 1568,
"end": 36971
} | class ____:
"""Abstract class for trainable models, functions, etc.
A call to ``train()`` on a trainable will execute one logical iteration of
training. As a rule of thumb, the execution time of one train call should
be large enough to avoid overheads (i.e. more than a few seconds), but
short enough to report progress periodically (i.e. at most a few minutes).
Calling ``save()`` should save the training state of a trainable to disk,
and ``restore(path)`` should restore a trainable to the given state.
Generally you only need to implement ``setup``, ``step``,
``save_checkpoint``, and ``load_checkpoint`` when subclassing Trainable.
Other implementation methods that may be helpful to override are
``log_result``, ``reset_config``, ``cleanup``, and ``_export_model``.
Tune will convert this class into a Ray actor, which runs on a separate process.
By default, Tune will also change the current working directory of this process to
its corresponding trial-level log directory ``self.logdir``.
This is designed so that different trials that run on the same physical node won't
accidentally write to the same location and overstep each other.
The behavior of changing the working directory can be disabled by setting the
`RAY_CHDIR_TO_TRIAL_DIR=0` environment variable. This allows access to files
in the original working directory, but relative paths should be used for read only
purposes, and you must make sure that the directory is synced on all nodes if
running on multiple machines.
The `TUNE_ORIG_WORKING_DIR` environment variable was the original workaround for
accessing paths relative to the original working directory. This environment
variable is deprecated, and the `RAY_CHDIR_TO_TRIAL_DIR` environment variable
described above should be used instead.
This class supports checkpointing to and restoring from remote storage.
"""
def __init__(
self,
config: Dict[str, Any] = None,
logger_creator: Callable[[Dict[str, Any]], "Logger"] = None, # Deprecated (2.7)
storage: Optional[StorageContext] = None,
):
"""Initialize a Trainable.
Sets up logging and points ``self.logdir`` to a directory in which
training outputs should be placed.
Subclasses should prefer defining ``setup()`` instead of overriding
``__init__()`` directly.
Args:
config: Trainable-specific configuration data. By default
will be saved as ``self.config``.
logger_creator: (Deprecated) Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
storage: StorageContext object that contains persistent storage paths
"""
self.config = config or {}
trial_info = self.config.pop(TRIAL_INFO, None)
if self.is_actor():
disable_ipython()
# TODO(ml-team): Remove `logger_creator` in 2.7.
# TODO(justinvyu): Rename/remove logdir.
self._result_logger = self._logdir = None
self._create_logger(self.config, logger_creator)
self._stdout_context = self._stdout_fp = self._stdout_stream = None
self._stderr_context = self._stderr_fp = self._stderr_stream = None
self._stderr_logging_handler = None
stdout_file = self.config.pop(STDOUT_FILE, None)
stderr_file = self.config.pop(STDERR_FILE, None)
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._last_result = None
self._restored = False
self._trial_info = trial_info
self._stdout_file = stdout_file
self._stderr_file = stderr_file
self._start_time = time.time()
self._local_ip = ray.util.get_node_ip_address()
self._storage = storage
if storage:
assert storage.trial_fs_path
logger.debug(f"StorageContext on the TRAINABLE:\n{storage}")
self._open_logfiles(stdout_file, stderr_file)
self.setup(copy.deepcopy(self.config))
setup_time = time.time() - self._start_time
if setup_time > SETUP_TIME_THRESHOLD:
logger.info(
"Trainable.setup took {:.3f} seconds. If your "
"trainable is slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(setup_time)
)
log_sys_usage = self.config.get("log_sys_usage", False)
self._monitor = UtilMonitor(start=log_sys_usage)
@classmethod
def default_resource_request(
cls, config: Dict[str, Any]
) -> Optional[PlacementGroupFactory]:
"""Provides a static resource requirement for the given configuration.
This can be overridden by sub-classes to set the correct trial resource
allocation, so the user does not need to.
.. testcode::
@classmethod
def default_resource_request(cls, config):
return PlacementGroupFactory([{"CPU": 1}, {"CPU": 1}])
Args:
config[Dict[str, Any]]: The Trainable's config dict.
Returns:
PlacementGroupFactory: A PlacementGroupFactory consumed by Tune
for queueing.
"""
return None
@classmethod
def resource_help(cls, config: Dict):
"""Returns a help string for configuring this trainable's resources.
Args:
config: The Trainer's config dict.
"""
return ""
def get_current_ip_pid(self):
return self._local_ip, os.getpid()
def get_auto_filled_metrics(
self,
now: Optional[datetime] = None,
time_this_iter: Optional[float] = None,
timestamp: Optional[int] = None,
debug_metrics_only: bool = False,
) -> dict:
"""Return a dict with metrics auto-filled by the trainable.
If ``debug_metrics_only`` is True, only metrics that don't
require at least one iteration will be returned
(``ray.tune.result.DEBUG_METRICS``).
"""
if now is None:
now = datetime.today()
autofilled = {
TRIAL_ID: self.trial_id,
"date": now.strftime("%Y-%m-%d_%H-%M-%S"),
"timestamp": timestamp if timestamp else int(time.mktime(now.timetuple())),
TIME_THIS_ITER_S: time_this_iter,
TIME_TOTAL_S: self._time_total,
PID: os.getpid(),
HOSTNAME: platform.node(),
NODE_IP: self._local_ip,
"config": self.config,
"time_since_restore": self._time_since_restore,
"iterations_since_restore": self._iterations_since_restore,
}
if self._timesteps_since_restore:
autofilled["timesteps_since_restore"] = self._timesteps_since_restore
if debug_metrics_only:
autofilled = {k: v for k, v in autofilled.items() if k in DEBUG_METRICS}
return autofilled
def is_actor(self):
try:
actor_id = ray._private.worker.global_worker.actor_id
return actor_id != actor_id.nil()
except Exception:
# If global_worker is not instantiated, we're not in an actor
return False
def train_buffered(self, buffer_time_s: float, max_buffer_length: int = 1000):
"""Runs multiple iterations of training.
Calls ``train()`` internally. Collects and combines multiple results.
This function will run ``self.train()`` repeatedly until one of
the following conditions is met: 1) the maximum buffer length is
reached, 2) the maximum buffer time is reached, or 3) a checkpoint
was created. Even if the maximum time is reached, it will always
block until at least one result is received.
Args:
buffer_time_s: Maximum time to buffer. The next result
received after this amount of time has passed will return
the whole buffer.
max_buffer_length: Maximum number of results to buffer.
"""
results = []
now = time.time()
send_buffer_at = now + buffer_time_s
while now < send_buffer_at or not results: # At least one result
result = self.train()
results.append(result)
if result.get(DONE, False):
# If the trial is done, return
break
elif result.get(SHOULD_CHECKPOINT, False):
# If a checkpoint was created, return
break
elif result.get(RESULT_DUPLICATE):
# If the function API trainable completed, return
break
elif len(results) >= max_buffer_length:
# If the buffer is full, return
break
now = time.time()
return results
def train(self):
"""Runs one logical iteration of training.
Calls ``step()`` internally. Subclasses should override ``step()``
instead to return results.
This method automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overridden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`training_iteration` (int): The index of this
training iteration, e.g. call to train(). This is incremented
after `step()` is called.
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed. This may be overridden.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress.
"""
start = time.time()
try:
result = self.step()
except Exception as e:
skipped = skip_exceptions(e)
raise skipped from exception_cause(skipped)
assert isinstance(result, dict), "step() needs to return a dict."
# We do not modify internal state nor update this result if duplicate.
if RESULT_DUPLICATE in result:
return result
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result_timestamp = result.get(TIMESTAMP, None)
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments are provided
if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
# self._episodes_total should only be tracked if increments provided
if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
# self._timesteps_total should not override user-provided total
if self._timesteps_total is not None:
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
if self._episodes_total is not None:
result.setdefault(EPISODES_TOTAL, self._episodes_total)
result.setdefault(TRAINING_ITERATION, self._iteration)
now = datetime.today()
result.update(
self.get_auto_filled_metrics(
now=now, time_this_iter=time_this_iter, timestamp=result_timestamp
)
)
monitor_data = self._monitor.get_data()
if monitor_data:
result.update(monitor_data)
self.log_result(result)
if self._stdout_context:
self._stdout_stream.flush()
if self._stderr_context:
self._stderr_stream.flush()
self._last_result = result
if self._storage:
# Launch background tasks to sync artifacts at some specified frequency.
self._storage.persist_artifacts()
return result
def get_state(self):
return {
"iteration": self._iteration,
"timesteps_total": self._timesteps_total,
"time_total": self._time_total,
"episodes_total": self._episodes_total,
"last_result": self._last_result,
"ray_version": ray.__version__,
}
def _report_class_trainable_checkpoint(
self, checkpoint_dir: str, checkpoint_dict_or_path: Union[str, Dict]
) -> _TrainingResult:
"""Report a checkpoint saved via Trainable.save_checkpoint.
Need to handle both dict or path checkpoint returned by the user's
`save_checkpoint` method.
This is to get class trainables to work with storage backend used by
function trainables.
This basically re-implements `tune.report` for class trainables,
making sure to persist the checkpoint to storage.
"""
if isinstance(checkpoint_dict_or_path, dict):
with Path(checkpoint_dir, _DICT_CHECKPOINT_FILE_NAME).open("wb") as f:
ray_pickle.dump(checkpoint_dict_or_path, f)
elif isinstance(checkpoint_dict_or_path, str):
if checkpoint_dict_or_path != checkpoint_dir:
raise ValueError(
"The returned checkpoint path from `save_checkpoint` "
"must be None or the same as the provided path argument."
f"Got {checkpoint_dict_or_path} != {checkpoint_dir}"
)
local_checkpoint = ray.tune.Checkpoint.from_directory(checkpoint_dir)
metrics = self._last_result.copy() if self._last_result else {}
if self._storage:
# The checkpoint index is updated with the current result.
# NOTE: This is no longer using "iteration" as the folder indexing
# to be consistent with fn trainables.
self._storage._update_checkpoint_index(metrics)
persisted_checkpoint = self._storage.persist_current_checkpoint(
local_checkpoint
)
checkpoint_result = _TrainingResult(
checkpoint=persisted_checkpoint, metrics=metrics
)
# Persist trial artifacts to storage.
self._storage.persist_artifacts(
force=self._storage.sync_config.sync_artifacts_on_checkpoint
)
else:
# `storage=None` only happens when initializing the
# Trainable manually, outside of Tune/Train.
# In this case, no storage is set, so the default behavior
# is to just not upload anything and report a local checkpoint.
# This is fine for the main use case of local debugging.
checkpoint_result = _TrainingResult(
checkpoint=local_checkpoint, metrics=metrics
)
return checkpoint_result
@DeveloperAPI
def save(self, checkpoint_dir: Optional[str] = None) -> _TrainingResult:
"""Saves the current model state to a checkpoint.
Subclasses should override ``save_checkpoint()`` instead to save state.
Args:
checkpoint_dir: Optional dir to place the checkpoint.
Returns:
The given or created checkpoint directory.
Note the return value matches up with what is expected of `restore()`.
"""
if not isinstance(self, ray.tune.trainable.FunctionTrainable):
# Use a temporary directory if no checkpoint_dir is provided.
use_temp_dir = not checkpoint_dir
checkpoint_dir = checkpoint_dir or tempfile.mkdtemp()
os.makedirs(checkpoint_dir, exist_ok=True)
checkpoint_dict_or_path = self.save_checkpoint(checkpoint_dir)
checkpoint_result = self._report_class_trainable_checkpoint(
checkpoint_dir, checkpoint_dict_or_path
)
# Clean up the temporary directory, since it's already been
# reported + persisted to storage. If no storage is set, the user is
# running the Trainable locally and is responsible for cleaning
# up the checkpoint directory themselves.
if use_temp_dir and self._storage:
shutil.rmtree(checkpoint_dir, ignore_errors=True)
else:
checkpoint_result: _TrainingResult = self.save_checkpoint(None)
assert isinstance(checkpoint_result, _TrainingResult)
assert self._last_result
# Update the checkpoint result to include auto-filled metrics.
checkpoint_result.metrics.update(self._last_result)
return checkpoint_result
@DeveloperAPI
def restore(
self, checkpoint_path: Union[str, "ray.tune.Checkpoint", _TrainingResult]
):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
Subclasses should override ``load_checkpoint()`` instead to
restore state.
This method restores additional metadata saved with the checkpoint.
`checkpoint_path` should match with the return from ``save()``.
Args:
checkpoint_path: training result that was returned by a
previous call to `save()`.
"""
# TODO(justinvyu): This also supports restoring from a Checkpoint object
# or a path, which are legacy APIs that RLlib depends on.
# RLlib should remove this dependency since `restore` is a DeveloperAPI.
if isinstance(checkpoint_path, str):
checkpoint_path = ray.tune.Checkpoint.from_directory(checkpoint_path)
if isinstance(checkpoint_path, ray.tune.Checkpoint):
checkpoint_result = _TrainingResult(checkpoint=checkpoint_path, metrics={})
else:
checkpoint_result: _TrainingResult = checkpoint_path
assert isinstance(checkpoint_result, _TrainingResult), type(checkpoint_result)
checkpoint = checkpoint_result.checkpoint
checkpoint_metrics = checkpoint_result.metrics
self._iteration = checkpoint_metrics.get(TRAINING_ITERATION, 0)
self._time_total = checkpoint_metrics.get(TIME_TOTAL_S, 0)
self._time_since_restore = 0.0
self._iterations_since_restore = 0
# TODO(justinvyu): This stuff should be moved to rllib.
self._timesteps_total = checkpoint_metrics.get(TIMESTEPS_TOTAL)
self._timesteps_since_restore = 0
self._episodes_total = checkpoint_metrics.get(EPISODES_TOTAL)
if not _exists_at_fs_path(checkpoint.filesystem, checkpoint.path):
raise ValueError(
f"Could not recover from checkpoint as it does not exist on "
f"storage anymore. "
f"Got storage fs type `{checkpoint.filesystem.type_name}` and "
f"path: {checkpoint.path}"
)
# TODO(justinvyu): [cls_trainable_support]
# This is to conform to the public class Trainable `load_checkpoint` API.
if not isinstance(self, ray.tune.trainable.FunctionTrainable):
# Need to convert Checkpoint -> local path or dict
# (depending on what the output of save_checkpoint was)
with checkpoint.as_directory() as checkpoint_dir:
checkpoint_path = Path(checkpoint_dir)
dict_checkpoint_file = checkpoint_path / _DICT_CHECKPOINT_FILE_NAME
if dict_checkpoint_file.exists():
# If this was a dict checkpoint, load it as a dict
with open(dict_checkpoint_file, "rb") as f:
checkpoint_dict = ray_pickle.load(f)
self.load_checkpoint(checkpoint_dict)
else:
self.load_checkpoint(checkpoint_dir)
else:
# TODO(justinvyu): The Function Trainable case doesn't conform
# to the load_checkpoint API at the moment.
self.load_checkpoint(checkpoint_result)
self._restored = True
logger.info(f"Restored on {self._local_ip} from checkpoint: {checkpoint}")
def export_model(
self, export_formats: Union[List[str], str], export_dir: Optional[str] = None
):
"""Exports model based on export_formats.
Subclasses should override _export_model() to actually
export model to local directory.
Args:
export_formats: Format or list of (str) formats
that should be exported.
export_dir: Optional dir to place the exported model.
Defaults to self.logdir.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
if isinstance(export_formats, str):
export_formats = [export_formats]
export_dir = export_dir or self.logdir
return self._export_model(export_formats, export_dir)
def reset(self, new_config, logger_creator=None, storage=None):
"""Resets trial for use with new config.
Subclasses should override reset_config() to actually
reset actor behavior for the new config."""
self.config = new_config
self._storage = storage
trial_info = new_config.pop(TRIAL_INFO, None)
if trial_info:
self._trial_info = trial_info
self._result_logger.flush()
self._result_logger.close()
if logger_creator:
logger.debug("Logger reset.")
self._create_logger(new_config.copy(), logger_creator)
else:
logger.debug(
"Did not reset logger. Got: "
f"trainable.reset(logger_creator={logger_creator})."
)
stdout_file = new_config.pop(STDOUT_FILE, None)
stderr_file = new_config.pop(STDERR_FILE, None)
self._close_logfiles()
self._open_logfiles(stdout_file, stderr_file)
success = self.reset_config(new_config)
if not success:
return False
# Reset attributes. Will be overwritten by `restore` if a checkpoint
# is provided.
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
return True
def reset_config(self, new_config: Dict) -> bool:
"""Resets configuration without restarting the trial.
This method is optional, but can be implemented to speed up algorithms
such as PBT, and to allow performance optimizations such as running
experiments with reuse_actors=True.
Args:
new_config: Updated hyperparameter configuration
for the trainable.
Returns:
True if reset was successful else False.
"""
return False
def _create_logger(
self,
config: Dict[str, Any],
logger_creator: Callable[[Dict[str, Any]], "Logger"] = None,
):
"""Create logger from logger creator.
Sets _logdir and _result_logger.
`_logdir` is the **per trial** directory for the Trainable.
"""
if logger_creator:
self._result_logger = logger_creator(config)
self._logdir = self._result_logger.logdir
else:
from ray.tune.logger import UnifiedLogger
logdir_prefix = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
try_to_create_directory(DEFAULT_STORAGE_PATH)
self._logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_STORAGE_PATH
)
self._result_logger = UnifiedLogger(config, self._logdir, loggers=None)
def _open_logfiles(self, stdout_file, stderr_file):
"""Create loggers. Open stdout and stderr logfiles."""
if stdout_file:
stdout_path = (Path(self._logdir) / stdout_file).expanduser().as_posix()
self._stdout_fp = open(stdout_path, "a+")
self._stdout_stream = Tee(sys.stdout, self._stdout_fp)
self._stdout_context = redirect_stdout(self._stdout_stream)
self._stdout_context.__enter__()
if stderr_file:
stderr_path = (Path(self._logdir) / stderr_file).expanduser().as_posix()
self._stderr_fp = open(stderr_path, "a+")
self._stderr_stream = Tee(sys.stderr, self._stderr_fp)
self._stderr_context = redirect_stderr(self._stderr_stream)
self._stderr_context.__enter__()
# Add logging handler to root ray logger
formatter = logging.Formatter(
"[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s"
)
self._stderr_logging_handler = logging.StreamHandler(self._stderr_fp)
self._stderr_logging_handler.setFormatter(formatter)
ray.logger.addHandler(self._stderr_logging_handler)
def _close_logfiles(self):
"""Close stdout and stderr logfiles."""
if self._stderr_logging_handler:
ray.logger.removeHandler(self._stderr_logging_handler)
if self._stdout_context:
self._stdout_stream.flush()
self._stdout_context.__exit__(None, None, None)
self._stdout_fp.close()
self._stdout_context = None
if self._stderr_context:
self._stderr_stream.flush()
self._stderr_context.__exit__(None, None, None)
self._stderr_fp.close()
self._stderr_context = None
def stop(self):
"""Releases all resources used by this trainable.
Calls ``Trainable.cleanup`` internally. Subclasses should override
``Trainable.cleanup`` for custom cleanup procedures.
"""
self._result_logger.flush()
self._result_logger.close()
if self._monitor.is_alive():
self._monitor.stop()
self._monitor.join()
self.cleanup()
self._close_logfiles()
@property
def logdir(self):
"""Directory of the results and checkpoints for this Trainable.
Note that the current working directory will also be changed to this.
"""
return self._logdir
@property
def trial_name(self):
"""Trial name for the corresponding trial of this Trainable.
This is not set if not using Tune.
.. testcode::
from ray.tune import Trainable
name = Trainable().trial_name
"""
if self._trial_info:
return self._trial_info.trial_name
else:
return "default"
@property
def trial_id(self):
"""Trial ID for the corresponding trial of this Trainable.
This is not set if not using Tune.
.. testcode::
from ray.tune import Trainable
trial_id = Trainable().trial_id
"""
if self._trial_info:
return self._trial_info.trial_id
else:
return "default"
@property
def trial_resources(self) -> Optional[PlacementGroupFactory]:
"""Resources currently assigned to the trial of this Trainable.
This is not set if not using Tune.
.. testcode::
from ray.tune import Trainable
trial_resources = Trainable().trial_resources
"""
if self._trial_info:
return self._trial_info.trial_resources
else:
return None
@property
def iteration(self):
"""Current training iteration.
This value is automatically incremented every time `train()` is called
and is automatically inserted into the training result dict.
"""
return self._iteration
@property
def training_iteration(self):
"""Current training iteration (same as `self.iteration`).
This value is automatically incremented every time `train()` is called
and is automatically inserted into the training result dict.
"""
return self._iteration
def get_config(self):
"""Returns configuration passed in by Tune."""
return self.config
def step(self):
"""Subclasses should override this to implement train().
The return value will be automatically passed to the loggers. Users
can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT`
as a key to manually trigger termination or checkpointing of this
trial. Note that manual checkpointing only works when subclassing
Trainables.
.. versionadded:: 0.8.7
Returns:
A dict that describes training progress.
"""
raise NotImplementedError
def save_checkpoint(self, checkpoint_dir: str) -> Optional[Dict]:
"""Subclasses should override this to implement ``save()``.
Warning:
Do not rely on absolute paths in the implementation of
``Trainable.save_checkpoint`` and ``Trainable.load_checkpoint``.
Use ``validate_save_restore`` to catch ``Trainable.save_checkpoint``/
``Trainable.load_checkpoint`` errors before execution.
>>> from ray.tune.utils import validate_save_restore
>>> MyTrainableClass = ... # doctest: +SKIP
>>> validate_save_restore(MyTrainableClass) # doctest: +SKIP
.. versionadded:: 0.8.7
Args:
checkpoint_dir: The directory where the checkpoint
file must be stored. In a Tune run, if the trial is paused,
the provided path may be temporary and moved.
Returns:
A dict or None. If dict, the return value will
be automatically serialized by Tune. In that case,
``Trainable.load_checkpoint()`` will receive the dict upon restore.
Example:
>>> trainable, trainable1, trainable2 = ... # doctest: +SKIP
>>> print(trainable1.save_checkpoint("/tmp/checkpoint_1")) # doctest: +SKIP
"/tmp/checkpoint_1"
>>> print(trainable2.save_checkpoint("/tmp/checkpoint_2")) # doctest: +SKIP
{"some": "data"}
>>> trainable.save_checkpoint("/tmp/bad_example") # doctest: +SKIP
"/tmp/NEW_CHECKPOINT_PATH/my_checkpoint_file" # This will error.
"""
raise NotImplementedError
def load_checkpoint(self, checkpoint: Optional[Dict]):
"""Subclasses should override this to implement restore().
Warning:
In this method, do not rely on absolute paths. The absolute
path of the checkpoint_dir used in ``Trainable.save_checkpoint``
may be changed.
If ``Trainable.save_checkpoint`` returned a prefixed string, the
prefix of the checkpoint string returned by
``Trainable.save_checkpoint`` may be changed.
This is because trial pausing depends on temporary directories.
The directory structure under the checkpoint_dir provided to
``Trainable.save_checkpoint`` is preserved.
See the examples below.
Example:
>>> import os
>>> from ray.tune.trainable import Trainable
>>> class Example(Trainable):
... def save_checkpoint(self, checkpoint_path):
... my_checkpoint_path = os.path.join(checkpoint_path, "my/path")
... return my_checkpoint_path
... def load_checkpoint(self, my_checkpoint_path):
... print(my_checkpoint_path)
>>> trainer = Example()
>>> # This is used when PAUSED.
>>> checkpoint_result = trainer.save() # doctest: +SKIP
>>> trainer.restore(checkpoint_result) # doctest: +SKIP
If `Trainable.save_checkpoint` returned a dict, then Tune will directly pass
the dict data as the argument to this method.
Example:
>>> from ray.tune.trainable import Trainable
>>> class Example(Trainable):
... def save_checkpoint(self, checkpoint_path):
... return {"my_data": 1}
... def load_checkpoint(self, checkpoint_dict):
... print(checkpoint_dict["my_data"])
.. versionadded:: 0.8.7
Args:
checkpoint: If dict, the return value is as
returned by ``save_checkpoint``. Otherwise, the directory
the checkpoint was stored in.
"""
raise NotImplementedError
def setup(self, config: Dict):
"""Subclasses should override this for custom initialization.
.. versionadded:: 0.8.7
Args:
config: Hyperparameters and other configs given.
Copy of `self.config`.
"""
pass
def log_result(self, result: Dict):
"""Subclasses can optionally override this to customize logging.
The logging here is done on the worker process rather than
the driver.
.. versionadded:: 0.8.7
Args:
result: Training result returned by step().
"""
self._result_logger.on_result(result)
def cleanup(self):
"""Subclasses should override this for any cleanup on stop.
If any Ray actors are launched in the Trainable (i.e., with a RLlib
trainer), be sure to kill the Ray actor process here.
This process should be lightweight. Per default,
You can kill a Ray actor by calling `ray.kill(actor)`
on the actor or removing all references to it and waiting for garbage
collection
.. versionadded:: 0.8.7
"""
pass
def _export_model(self, export_formats: List[str], export_dir: str):
"""Subclasses should override this to export model.
Args:
export_formats: List of formats that should be exported.
export_dir: Directory to place exported models.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
return {}
def _implements_method(self, key):
return hasattr(self, key) and callable(getattr(self, key))
| Trainable |
python | scikit-learn__scikit-learn | sklearn/metrics/tests/test_score_objects.py | {
"start": 5576,
"end": 58791
} | class ____:
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name in get_scorer_names():
repr(get_scorer(name))
def test_repr_partial():
metric = partial(precision_score, pos_label=1)
scorer = make_scorer(metric)
pattern = (
"functools\\.partial\\(<function\\ precision_score\\ at\\ .*>,\\ pos_label=1\\)"
)
assert re.search(pattern, repr(scorer))
def check_scoring_validator_for_single_metric_usecases(scoring_validator):
# Test all branches of single metric usecases
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = scoring_validator(estimator)
assert isinstance(scorer, _PassthroughScorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (
r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\."
)
with pytest.raises(TypeError, match=pattern):
scoring_validator(estimator)
scorer = scoring_validator(estimator, scoring="accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, scoring="accuracy")
assert isinstance(scorer, _Scorer)
assert scorer._response_method == "predict"
# Test the allow_none parameter for check_scoring alone
if scoring_validator is check_scoring:
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, allow_none=True)
assert scorer is None
@pytest.mark.parametrize(
"scoring",
(
("accuracy",),
["precision"],
{"acc": "accuracy", "precision": "precision"},
("accuracy", "precision"),
["precision", "accuracy"],
{
"accuracy": make_scorer(accuracy_score),
"precision": make_scorer(precision_score),
},
),
ids=[
"single_tuple",
"single_list",
"dict_str",
"multi_tuple",
"multi_list",
"dict_callable",
],
)
def test_check_scoring_and_check_multimetric_scoring(scoring):
check_scoring_validator_for_single_metric_usecases(check_scoring)
# To make sure the check_scoring is correctly applied to the constituent
# scorers
estimator = LinearSVC(random_state=0)
estimator.fit([[1], [2], [3]], [1, 1, 0])
scorers = _check_multimetric_scoring(estimator, scoring)
assert isinstance(scorers, dict)
assert sorted(scorers.keys()) == sorted(list(scoring))
assert all([isinstance(scorer, _Scorer) for scorer in list(scorers.values())])
assert all(scorer._response_method == "predict" for scorer in scorers.values())
if "acc" in scoring:
assert_almost_equal(
scorers["acc"](estimator, [[1], [2], [3]], [1, 0, 0]), 2.0 / 3.0
)
if "accuracy" in scoring:
assert_almost_equal(
scorers["accuracy"](estimator, [[1], [2], [3]], [1, 0, 0]), 2.0 / 3.0
)
if "precision" in scoring:
assert_almost_equal(
scorers["precision"](estimator, [[1], [2], [3]], [1, 0, 0]), 0.5
)
@pytest.mark.parametrize(
"scoring, msg",
[
(
(make_scorer(precision_score), make_scorer(accuracy_score)),
"One or more of the elements were callables",
),
([5], "Non-string types were found"),
((make_scorer(precision_score),), "One or more of the elements were callables"),
((), "Empty list was given"),
(("f1", "f1"), "Duplicate elements were found"),
({4: "accuracy"}, "Non-string types were found in the keys"),
({}, "An empty dict was passed"),
],
ids=[
"tuple of callables",
"list of int",
"tuple of one callable",
"empty tuple",
"non-unique str",
"non-string key dict",
"empty dict",
],
)
def test_check_scoring_and_check_multimetric_scoring_errors(scoring, msg):
# Make sure it raises errors when scoring parameter is not valid.
# More weird corner cases are tested at test_validation.py
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
with pytest.raises(ValueError, match=msg):
_check_multimetric_scoring(estimator, scoring=scoring)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={"C": [0.1, 1]}, cv=3)
scorer = check_scoring(grid, scoring="f1")
assert isinstance(scorer, _Scorer)
assert scorer._response_method == "predict"
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, scoring="f1")
assert isinstance(scorer, _Scorer)
assert scorer._response_method == "predict"
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(
EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1], scoring=DummyScorer(), cv=3
)
assert_array_equal(scores, 1)
@pytest.mark.parametrize(
"scorer_name, metric",
[
("f1", f1_score),
("f1_weighted", partial(f1_score, average="weighted")),
("f1_macro", partial(f1_score, average="macro")),
("f1_micro", partial(f1_score, average="micro")),
("precision", precision_score),
("precision_weighted", partial(precision_score, average="weighted")),
("precision_macro", partial(precision_score, average="macro")),
("precision_micro", partial(precision_score, average="micro")),
("recall", recall_score),
("recall_weighted", partial(recall_score, average="weighted")),
("recall_macro", partial(recall_score, average="macro")),
("recall_micro", partial(recall_score, average="micro")),
("jaccard", jaccard_score),
("jaccard_weighted", partial(jaccard_score, average="weighted")),
("jaccard_macro", partial(jaccard_score, average="macro")),
("jaccard_micro", partial(jaccard_score, average="micro")),
("top_k_accuracy", top_k_accuracy_score),
("matthews_corrcoef", matthews_corrcoef),
],
)
def test_classification_binary_scores(scorer_name, metric):
# check consistency between score and scorer for scores supporting
# binary classification.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score = get_scorer(scorer_name)(clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert_almost_equal(score, expected_score)
@pytest.mark.parametrize(
"scorer_name, metric",
[
("accuracy", accuracy_score),
("balanced_accuracy", balanced_accuracy_score),
("f1_weighted", partial(f1_score, average="weighted")),
("f1_macro", partial(f1_score, average="macro")),
("f1_micro", partial(f1_score, average="micro")),
("precision_weighted", partial(precision_score, average="weighted")),
("precision_macro", partial(precision_score, average="macro")),
("precision_micro", partial(precision_score, average="micro")),
("recall_weighted", partial(recall_score, average="weighted")),
("recall_macro", partial(recall_score, average="macro")),
("recall_micro", partial(recall_score, average="micro")),
("jaccard_weighted", partial(jaccard_score, average="weighted")),
("jaccard_macro", partial(jaccard_score, average="macro")),
("jaccard_micro", partial(jaccard_score, average="micro")),
],
)
def test_classification_multiclass_scores(scorer_name, metric):
# check consistency between score and scorer for scores supporting
# multiclass classification.
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=30, random_state=0
)
# use `stratify` = y to ensure train and test sets capture all classes
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0, stratify=y
)
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X_train, y_train)
score = get_scorer(scorer_name)(clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert score == pytest.approx(expected_score)
def test_custom_scorer_pickling():
# test that custom scorer can be pickled
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score2 = unpickled_scorer(clf, X_test, y_test)
assert score1 == pytest.approx(score2)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer("r2")(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer("neg_log_loss")(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
err_msg = "DecisionTreeRegressor has none of the following attributes"
with pytest.raises(AttributeError, match=err_msg):
get_scorer("roc_auc")(reg, X_test, y_test)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
with pytest.raises(ValueError, match="multi_class must be in \\('ovo', 'ovr'\\)"):
get_scorer("roc_auc")(clf, X_test, y_test)
# test error is raised with a single class present in model
# (predict_proba shape is not suitable for binary auc)
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X_train, np.zeros_like(y_train))
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer("roc_auc")(clf, X_test, y_test)
# for proba scorers
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer("neg_log_loss")(clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack([p[:, -1] for p in y_proba]).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer("roc_auc")(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3, n_init="auto")
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
with pytest.raises(ValueError):
cross_val_score(clf, X, y, scoring=f1_scorer_no_average)
grid_search = GridSearchCV(
clf, scoring=f1_scorer_no_average, param_grid={"max_depth": [1, 2]}
)
with pytest.raises(ValueError):
grid_search.fit(X, y)
def test_classification_scorer_sample_weight():
# Test that classification scorers support sample_weight or raise sensible
# errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name in get_scorer_names():
scorer = get_scorer(name)
if name in REGRESSION_SCORERS:
# skip the regression scores
continue
if name == "top_k_accuracy":
# in the binary case k > 1 will always lead to a perfect score
scorer._kwargs = {"k": 1}
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(
estimator[name], X_test, target, sample_weight=sample_weight
)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
# this should not raise. sample_weight should be ignored if None.
_ = scorer(estimator[name], X_test[:10], target[:10], sample_weight=None)
assert weighted != unweighted, (
f"scorer {name} behaves identically when called with "
f"sample weights: {weighted} vs {unweighted}"
)
assert_almost_equal(
weighted,
ignored,
err_msg=(
f"scorer {name} behaves differently "
"when ignoring samples and setting "
f"sample_weight to 0: {weighted} vs {ignored}"
),
)
except TypeError as e:
assert "sample_weight" in str(e), (
f"scorer {name} raises unhelpful exception when called "
f"with sample weights: {e}"
)
def test_regression_scorer_sample_weight():
# Test that regression scorers support sample_weight or raise sensible
# errors
# Odd number of test samples req for neg_median_absolute_error
X, y = make_regression(n_samples=101, n_features=20, random_state=0)
y = _require_positive_y(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
# Odd number req for neg_median_absolute_error
sample_weight[:11] = 0
reg = DecisionTreeRegressor(random_state=0)
reg.fit(X_train, y_train)
for name in get_scorer_names():
scorer = get_scorer(name)
if name not in REGRESSION_SCORERS:
# skip classification scorers
continue
try:
weighted = scorer(reg, X_test, y_test, sample_weight=sample_weight)
ignored = scorer(reg, X_test[11:], y_test[11:])
unweighted = scorer(reg, X_test, y_test)
assert weighted != unweighted, (
f"scorer {name} behaves identically when called with "
f"sample weights: {weighted} vs {unweighted}"
)
assert_almost_equal(
weighted,
ignored,
err_msg=(
f"scorer {name} behaves differently "
"when ignoring samples and setting "
f"sample_weight to 0: {weighted} vs {ignored}"
),
)
except TypeError as e:
assert "sample_weight" in str(e), (
f"scorer {name} raises unhelpful exception when called "
f"with sample weights: {e}"
)
@pytest.mark.parametrize("name", get_scorer_names())
def test_scorer_memmap_input(name, memmap_data_and_estimators):
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
X_mm, y_mm, y_ml_mm, estimators = memmap_data_and_estimators
if name in REQUIRE_POSITIVE_Y_SCORERS:
y_mm_1 = _require_positive_y(y_mm)
y_ml_mm_1 = _require_positive_y(y_ml_mm)
else:
y_mm_1, y_ml_mm_1 = y_mm, y_ml_mm
# UndefinedMetricWarning for P / R scores
with ignore_warnings():
scorer, estimator = get_scorer(name), estimators[name]
if name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm_1)
else:
score = scorer(estimator, X_mm, y_mm_1)
assert isinstance(score, numbers.Number), name
def test_scoring_is_not_metric():
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(LogisticRegression(), scoring=f1_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(LogisticRegression(), scoring=roc_auc_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(Ridge(), scoring=r2_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(KMeans(), scoring=cluster_module.adjusted_rand_score)
with pytest.raises(ValueError, match="make_scorer"):
check_scoring(KMeans(), scoring=cluster_module.rand_score)
@pytest.mark.parametrize(
(
"scorers,expected_predict_count,"
"expected_predict_proba_count,expected_decision_func_count"
),
[
(
{
"a1": "accuracy",
"a2": "accuracy",
"ll1": "neg_log_loss",
"ll2": "neg_log_loss",
"ra1": "roc_auc",
"ra2": "roc_auc",
},
1,
1,
1,
),
(["roc_auc", "accuracy"], 1, 0, 1),
(["neg_log_loss", "accuracy"], 1, 1, 0),
],
)
def test_multimetric_scorer_calls_method_once(
scorers,
expected_predict_count,
expected_predict_proba_count,
expected_decision_func_count,
):
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
pos_proba = np.random.rand(X.shape[0])
proba = np.c_[1 - pos_proba, pos_proba]
class MyClassifier(ClassifierMixin, BaseEstimator):
def __init__(self):
self._expected_predict_count = 0
self._expected_predict_proba_count = 0
self._expected_decision_function_count = 0
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
self._expected_predict_count += 1
return y
def predict_proba(self, X):
self._expected_predict_proba_count += 1
return proba
def decision_function(self, X):
self._expected_decision_function_count += 1
return pos_proba
mock_est = MyClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(LogisticRegression(), scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict)
results = multi_scorer(mock_est, X, y)
assert set(scorers) == set(results) # compare dict keys
assert mock_est._expected_predict_count == expected_predict_count
assert mock_est._expected_predict_proba_count == expected_predict_proba_count
assert mock_est._expected_decision_function_count == expected_decision_func_count
@pytest.mark.parametrize(
"scorers",
[
(["roc_auc", "neg_log_loss"]),
(
{
"roc_auc": make_scorer(
roc_auc_score,
response_method=["predict_proba", "decision_function"],
),
"neg_log_loss": make_scorer(log_loss, response_method="predict_proba"),
}
),
],
)
def test_multimetric_scorer_calls_method_once_classifier_no_decision(scorers):
predict_proba_call_cnt = 0
class MockKNeighborsClassifier(KNeighborsClassifier):
def predict_proba(self, X):
nonlocal predict_proba_call_cnt
predict_proba_call_cnt += 1
return super().predict_proba(X)
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
# no decision function
clf = MockKNeighborsClassifier(n_neighbors=1)
clf.fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
scorer = _MultimetricScorer(scorers=scorer_dict)
scorer(clf, X, y)
assert predict_proba_call_cnt == 1
def test_multimetric_scorer_calls_method_once_regressor_threshold():
predict_called_cnt = 0
class MockDecisionTreeRegressor(DecisionTreeRegressor):
def predict(self, X):
nonlocal predict_called_cnt
predict_called_cnt += 1
return super().predict(X)
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
# no decision function
clf = MockDecisionTreeRegressor()
clf.fit(X, y)
scorers = {"neg_mse": "neg_mean_squared_error", "r2": "r2"}
scorer_dict = _check_multimetric_scoring(clf, scorers)
scorer = _MultimetricScorer(scorers=scorer_dict)
scorer(clf, X, y)
assert predict_called_cnt == 1
def test_multimetric_scorer_sanity_check():
# scoring dictionary returned is the same as calling each scorer separately
scorers = {
"a1": "accuracy",
"a2": "accuracy",
"ll1": "neg_log_loss",
"ll2": "neg_log_loss",
"ra1": "roc_auc",
"ra2": "roc_auc",
}
X, y = make_classification(random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict)
result = multi_scorer(clf, X, y)
separate_scores = {
name: get_scorer(name)(clf, X, y)
for name in ["accuracy", "neg_log_loss", "roc_auc"]
}
for key, value in result.items():
score_name = scorers[key]
assert_allclose(value, separate_scores[score_name])
@pytest.mark.parametrize("raise_exc", [True, False])
def test_multimetric_scorer_exception_handling(raise_exc):
"""Check that the calling of the `_MultimetricScorer` returns
exception messages in the result dict for the failing scorers
in case of `raise_exc` is `False` and if `raise_exc` is `True`,
then the proper exception is raised.
"""
scorers = {
"failing_1": "neg_mean_squared_log_error",
"non_failing": "neg_median_absolute_error",
"failing_2": "neg_mean_squared_log_error",
}
X, y = make_classification(
n_samples=50, n_features=2, n_redundant=0, random_state=0
)
# neg_mean_squared_log_error fails if y contains values less than or equal to -1
y *= -1
clf = DecisionTreeClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict, raise_exc=raise_exc)
error_msg = (
"Mean Squared Logarithmic Error cannot be used when "
"targets contain values less than or equal to -1."
)
if raise_exc:
with pytest.raises(ValueError, match=error_msg):
multi_scorer(clf, X, y)
else:
result = multi_scorer(clf, X, y)
exception_message_1 = result["failing_1"]
score = result["non_failing"]
exception_message_2 = result["failing_2"]
assert isinstance(exception_message_1, str) and error_msg in exception_message_1
assert isinstance(score, float)
assert isinstance(exception_message_2, str) and error_msg in exception_message_2
@pytest.mark.parametrize(
"scorer_name, metric",
[
("roc_auc_ovr", partial(roc_auc_score, multi_class="ovr")),
("roc_auc_ovo", partial(roc_auc_score, multi_class="ovo")),
(
"roc_auc_ovr_weighted",
partial(roc_auc_score, multi_class="ovr", average="weighted"),
),
(
"roc_auc_ovo_weighted",
partial(roc_auc_score, multi_class="ovo", average="weighted"),
),
],
)
def test_multiclass_roc_proba_scorer(scorer_name, metric):
scorer = get_scorer(scorer_name)
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
y_proba = lr.predict_proba(X)
expected_score = metric(y, y_proba)
assert scorer(lr, X, y) == pytest.approx(expected_score)
def test_multiclass_roc_proba_scorer_label():
scorer = make_scorer(
roc_auc_score,
multi_class="ovo",
labels=[0, 1, 2],
response_method="predict_proba",
)
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
y_proba = lr.predict_proba(X)
y_binary = y == 0
expected_score = roc_auc_score(
y_binary, y_proba, multi_class="ovo", labels=[0, 1, 2]
)
assert scorer(lr, X, y_binary) == pytest.approx(expected_score)
@pytest.mark.parametrize(
"scorer_name",
["roc_auc_ovr", "roc_auc_ovo", "roc_auc_ovr_weighted", "roc_auc_ovo_weighted"],
)
def test_multiclass_roc_no_proba_scorer_errors(scorer_name):
# Perceptron has no predict_proba
scorer = get_scorer(scorer_name)
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = Perceptron().fit(X, y)
msg = "Perceptron has none of the following attributes: predict_proba."
with pytest.raises(AttributeError, match=msg):
scorer(lr, X, y)
@pytest.fixture
def string_labeled_classification_problem():
"""Train a classifier on binary problem with string target.
The classifier is trained on a binary classification problem where the
minority class of interest has a string label that is intentionally not the
greatest class label using the lexicographic order. In this case, "cancer"
is the positive label, and `classifier.classes_` is
`["cancer", "not cancer"]`.
In addition, the dataset is imbalanced to better identify problems when
using non-symmetric performance metrics such as f1-score, average precision
and so on.
Returns
-------
classifier : estimator object
Trained classifier on the binary problem.
X_test : ndarray of shape (n_samples, n_features)
Data to be used as testing set in tests.
y_test : ndarray of shape (n_samples,), dtype=object
Binary target where labels are strings.
y_pred : ndarray of shape (n_samples,), dtype=object
Prediction of `classifier` when predicting for `X_test`.
y_pred_proba : ndarray of shape (n_samples, 2), dtype=np.float64
Probabilities of `classifier` when predicting for `X_test`.
y_pred_decision : ndarray of shape (n_samples,), dtype=np.float64
Decision function values of `classifier` when predicting on `X_test`.
"""
from sklearn.datasets import load_breast_cancer
from sklearn.utils import shuffle
X, y = load_breast_cancer(return_X_y=True)
# create a highly imbalanced classification task
idx_positive = np.flatnonzero(y == 1)
idx_negative = np.flatnonzero(y == 0)
idx_selected = np.hstack([idx_negative, idx_positive[:25]])
X, y = X[idx_selected], y[idx_selected]
X, y = shuffle(X, y, random_state=42)
# only use 2 features to make the problem even harder
X = X[:, :2]
y = np.array(["cancer" if c == 1 else "not cancer" for c in y], dtype=object)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
stratify=y,
random_state=0,
)
classifier = LogisticRegression().fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_pred_proba = classifier.predict_proba(X_test)
y_pred_decision = classifier.decision_function(X_test)
return classifier, X_test, y_test, y_pred, y_pred_proba, y_pred_decision
def test_average_precision_pos_label(string_labeled_classification_problem):
# check that _Scorer will lead to the right score when passing
# `pos_label`. Currently, only `average_precision_score` is defined to
# be such a scorer.
(
clf,
X_test,
y_test,
_,
y_pred_proba,
y_pred_decision,
) = string_labeled_classification_problem
pos_label = "cancer"
# we need to select the positive column or reverse the decision values
y_pred_proba = y_pred_proba[:, 0]
y_pred_decision = y_pred_decision * -1
assert clf.classes_[0] == pos_label
# check that when calling the scoring function, probability estimates and
# decision values lead to the same results
ap_proba = average_precision_score(y_test, y_pred_proba, pos_label=pos_label)
ap_decision_function = average_precision_score(
y_test, y_pred_decision, pos_label=pos_label
)
assert ap_proba == pytest.approx(ap_decision_function)
# create a scorer which would require to pass a `pos_label`
# check that it fails if `pos_label` is not provided
average_precision_scorer = make_scorer(
average_precision_score,
response_method=("decision_function", "predict_proba"),
)
err_msg = "pos_label=1 is not a valid label. It should be one of "
with pytest.raises(ValueError, match=err_msg):
average_precision_scorer(clf, X_test, y_test)
# otherwise, the scorer should give the same results than calling the
# scoring function
average_precision_scorer = make_scorer(
average_precision_score,
response_method=("decision_function", "predict_proba"),
pos_label=pos_label,
)
ap_scorer = average_precision_scorer(clf, X_test, y_test)
assert ap_scorer == pytest.approx(ap_proba)
# The above scorer call is using `clf.decision_function`. We will force
# it to use `clf.predict_proba`.
clf_without_predict_proba = deepcopy(clf)
def _predict_proba(self, X):
raise NotImplementedError
clf_without_predict_proba.predict_proba = partial(
_predict_proba, clf_without_predict_proba
)
# sanity check
with pytest.raises(NotImplementedError):
clf_without_predict_proba.predict_proba(X_test)
ap_scorer = average_precision_scorer(clf_without_predict_proba, X_test, y_test)
assert ap_scorer == pytest.approx(ap_proba)
def test_brier_score_loss_pos_label(string_labeled_classification_problem):
# check that _Scorer leads to the right score when `pos_label` is
# provided. Currently only the `brier_score_loss` is defined to be such
# a scorer.
clf, X_test, y_test, _, y_pred_proba, _ = string_labeled_classification_problem
pos_label = "cancer"
assert clf.classes_[0] == pos_label
# brier score loss is symmetric
brier_pos_cancer = brier_score_loss(y_test, y_pred_proba[:, 0], pos_label="cancer")
brier_pos_not_cancer = brier_score_loss(
y_test, y_pred_proba[:, 1], pos_label="not cancer"
)
assert brier_pos_cancer == pytest.approx(brier_pos_not_cancer)
brier_scorer = make_scorer(
brier_score_loss,
response_method="predict_proba",
pos_label=pos_label,
)
assert brier_scorer(clf, X_test, y_test) == pytest.approx(brier_pos_cancer)
@pytest.mark.parametrize(
"score_func", [f1_score, precision_score, recall_score, jaccard_score]
)
def test_non_symmetric_metric_pos_label(
score_func, string_labeled_classification_problem
):
# check that _Scorer leads to the right score when `pos_label` is
# provided. We check for all possible metric supported.
# Note: At some point we may end up having "scorer tags".
clf, X_test, y_test, y_pred, _, _ = string_labeled_classification_problem
pos_label = "cancer"
assert clf.classes_[0] == pos_label
score_pos_cancer = score_func(y_test, y_pred, pos_label="cancer")
score_pos_not_cancer = score_func(y_test, y_pred, pos_label="not cancer")
assert score_pos_cancer != pytest.approx(score_pos_not_cancer)
scorer = make_scorer(score_func, pos_label=pos_label)
assert scorer(clf, X_test, y_test) == pytest.approx(score_pos_cancer)
@pytest.mark.parametrize(
"scorer",
[
make_scorer(
average_precision_score,
response_method=("decision_function", "predict_proba"),
pos_label="xxx",
),
make_scorer(brier_score_loss, response_method="predict_proba", pos_label="xxx"),
make_scorer(f1_score, pos_label="xxx"),
],
ids=["non-thresholded scorer", "probability scorer", "thresholded scorer"],
)
def test_scorer_select_proba_error(scorer):
# check that we raise the proper error when passing an unknown
# pos_label
X, y = make_classification(
n_classes=2, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
assert scorer._kwargs["pos_label"] not in np.unique(y).tolist()
err_msg = "is not a valid label"
with pytest.raises(ValueError, match=err_msg):
scorer(lr, X, y)
def test_get_scorer_return_copy():
# test that get_scorer returns a copy
assert get_scorer("roc_auc") is not get_scorer("roc_auc")
def test_scorer_no_op_multiclass_select_proba():
# check that calling a _Scorer on a multiclass problem do not raise
# even if `y_true` would be binary during the scoring.
# `_select_proba_binary` should not be called in this case.
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
mask_last_class = y == lr.classes_[-1]
X_test, y_test = X[~mask_last_class], y[~mask_last_class]
assert_array_equal(np.unique(y_test), lr.classes_[:-1])
scorer = make_scorer(
roc_auc_score,
response_method="predict_proba",
multi_class="ovo",
labels=lr.classes_,
)
scorer(lr, X_test, y_test)
@pytest.mark.parametrize("name", get_scorer_names())
def test_scorer_set_score_request_raises(name):
"""Test that set_score_request is only available when feature flag is on."""
# Make sure they expose the routing methods.
scorer = get_scorer(name)
with pytest.raises(RuntimeError, match="This method is only available"):
scorer.set_score_request()
@pytest.mark.parametrize("name", get_scorer_names(), ids=get_scorer_names())
@config_context(enable_metadata_routing=True)
def test_scorer_metadata_request(name):
"""Testing metadata requests for scorers.
This test checks many small things in a large test, to reduce the
boilerplate required for each section.
"""
# Make sure they expose the routing methods.
scorer = get_scorer(name)
assert hasattr(scorer, "set_score_request")
assert hasattr(scorer, "get_metadata_routing")
# Check that by default no metadata is requested.
assert_request_is_empty(scorer.get_metadata_routing())
weighted_scorer = scorer.set_score_request(sample_weight=True)
# set_score_request should mutate the instance, rather than returning a
# new instance
assert weighted_scorer is scorer
# make sure the scorer doesn't request anything on methods other than
# `score`, and that the requested value on `score` is correct.
assert_request_is_empty(weighted_scorer.get_metadata_routing(), exclude="score")
assert (
weighted_scorer.get_metadata_routing().score.requests["sample_weight"] is True
)
# make sure putting the scorer in a router doesn't request anything by
# default
router = MetadataRouter(owner="test").add(
scorer=get_scorer(name),
method_mapping=MethodMapping().add(caller="score", callee="score"),
)
# make sure `sample_weight` is refused if passed.
with pytest.raises(TypeError, match="got unexpected argument"):
router.validate_metadata(params={"sample_weight": 1}, method="score")
# make sure `sample_weight` is not routed even if passed.
routed_params = router.route_params(params={"sample_weight": 1}, caller="score")
assert not routed_params.scorer.score
# make sure putting weighted_scorer in a router requests sample_weight
router = MetadataRouter(owner="test").add(
scorer=weighted_scorer,
method_mapping=MethodMapping().add(caller="score", callee="score"),
)
router.validate_metadata(params={"sample_weight": 1}, method="score")
routed_params = router.route_params(params={"sample_weight": 1}, caller="score")
assert list(routed_params.scorer.score.keys()) == ["sample_weight"]
@config_context(enable_metadata_routing=True)
def test_metadata_kwarg_conflict():
"""This test makes sure the right warning is raised if the user passes
some metadata both as a constructor to make_scorer, and during __call__.
"""
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
scorer = make_scorer(
roc_auc_score,
response_method="predict_proba",
multi_class="ovo",
labels=lr.classes_,
)
with pytest.warns(UserWarning, match="already set as kwargs"):
scorer.set_score_request(labels=True)
with pytest.warns(UserWarning, match="There is an overlap"):
scorer(lr, X, y, labels=lr.classes_)
@config_context(enable_metadata_routing=True)
def test_PassthroughScorer_set_score_request():
"""Test that _PassthroughScorer.set_score_request raises when routing enabled."""
est = LogisticRegression().set_score_request(sample_weight="estimator_weights")
# make a `_PassthroughScorer` with `check_scoring`:
scorer = check_scoring(est, None)
with pytest.raises(
AttributeError,
match="'_PassthroughScorer' object has no attribute 'set_score_request'",
):
scorer.set_score_request(sample_weight=True)
def test_PassthroughScorer_set_score_request_raises_without_routing_enabled():
"""Test that _PassthroughScorer.set_score_request raises if metadata routing is
disabled."""
scorer = check_scoring(LogisticRegression(), None)
with pytest.raises(
AttributeError,
match="'_PassthroughScorer' object has no attribute 'set_score_request'",
):
scorer.set_score_request(sample_weight=True)
@config_context(enable_metadata_routing=True)
def test_multimetric_scoring_metadata_routing():
# Test that _MultimetricScorer properly routes metadata.
def score1(y_true, y_pred):
return 1
def score2(y_true, y_pred, sample_weight="test"):
# make sure sample_weight is not passed
assert sample_weight == "test"
return 1
def score3(y_true, y_pred, sample_weight=None):
# make sure sample_weight is passed
assert sample_weight is not None
return 1
scorers = {
"score1": make_scorer(score1),
"score2": make_scorer(score2).set_score_request(sample_weight=False),
"score3": make_scorer(score3).set_score_request(sample_weight=True),
}
X, y = make_classification(
n_samples=50, n_features=2, n_redundant=0, random_state=0
)
clf = DecisionTreeClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict)
# This passes since routing is done.
multi_scorer(clf, X, y, sample_weight=1)
@config_context(enable_metadata_routing=False)
def test_multimetric_scoring_kwargs():
# Test that _MultimetricScorer correctly forwards kwargs
# to the scorers when metadata routing is disabled.
# `sample_weight` is only forwarded to the scorers that accept it.
# Other arguments are forwarded to all scorers.
def score1(y_true, y_pred, common_arg=None):
# make sure common_arg is passed
assert common_arg is not None
return 1
def score2(y_true, y_pred, common_arg=None, sample_weight=None):
# make sure common_arg is passed
assert common_arg is not None
# make sure sample_weight is passed
assert sample_weight is not None
return 1
scorers = {
"score1": make_scorer(score1),
"score2": make_scorer(score2),
}
X, y = make_classification(
n_samples=50, n_features=2, n_redundant=0, random_state=0
)
clf = DecisionTreeClassifier().fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(scorers=scorer_dict)
multi_scorer(clf, X, y, common_arg=1, sample_weight=1)
def test_kwargs_without_metadata_routing_error():
# Test that kwargs are not supported in scorers if metadata routing is not
# enabled.
# TODO: remove when enable_metadata_routing is deprecated
def score(y_true, y_pred, param=None):
return 1 # pragma: no cover
X, y = make_classification(
n_samples=50, n_features=2, n_redundant=0, random_state=0
)
clf = DecisionTreeClassifier().fit(X, y)
scorer = make_scorer(score)
with config_context(enable_metadata_routing=False):
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
scorer(clf, X, y, param="blah")
def test_get_scorer_multilabel_indicator():
"""Check that our scorer deal with multi-label indicator matrices.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26817
"""
X, Y = make_multilabel_classification(n_samples=72, n_classes=3, random_state=0)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0)
estimator = KNeighborsClassifier().fit(X_train, Y_train)
score = get_scorer("average_precision")(estimator, X_test, Y_test)
assert score > 0.8
@pytest.mark.parametrize(
"scorer, expected_repr",
[
(
get_scorer("accuracy"),
"make_scorer(accuracy_score, response_method='predict')",
),
(
get_scorer("neg_log_loss"),
(
"make_scorer(log_loss, greater_is_better=False,"
" response_method='predict_proba')"
),
),
(
get_scorer("roc_auc"),
(
"make_scorer(roc_auc_score, response_method="
"('decision_function', 'predict_proba'))"
),
),
(
make_scorer(fbeta_score, beta=2),
"make_scorer(fbeta_score, response_method='predict', beta=2)",
),
],
)
def test_make_scorer_repr(scorer, expected_repr):
"""Check the representation of the scorer."""
assert repr(scorer) == expected_repr
@pytest.mark.parametrize("pass_estimator", [True, False])
def test_get_scorer_multimetric(pass_estimator):
"""Check that check_scoring is compatible with multi-metric configurations."""
X, y = make_classification(n_samples=150, n_features=10, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
if pass_estimator:
check_scoring_ = check_scoring
else:
check_scoring_ = partial(check_scoring, clf)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_proba = clf.predict_proba(X_test)
expected_results = {
"r2": r2_score(y_test, y_pred),
"roc_auc": roc_auc_score(y_test, y_proba[:, 1]),
"accuracy": accuracy_score(y_test, y_pred),
}
for container in [set, list, tuple]:
scoring = check_scoring_(scoring=container(["r2", "roc_auc", "accuracy"]))
result = scoring(clf, X_test, y_test)
assert result.keys() == expected_results.keys()
for name in result:
assert result[name] == pytest.approx(expected_results[name])
def double_accuracy(y_true, y_pred):
return 2 * accuracy_score(y_true, y_pred)
custom_scorer = make_scorer(double_accuracy, response_method="predict")
# dict with different names
dict_scoring = check_scoring_(
scoring={
"my_r2": "r2",
"my_roc_auc": "roc_auc",
"double_accuracy": custom_scorer,
}
)
dict_result = dict_scoring(clf, X_test, y_test)
assert len(dict_result) == 3
assert dict_result["my_r2"] == pytest.approx(expected_results["r2"])
assert dict_result["my_roc_auc"] == pytest.approx(expected_results["roc_auc"])
assert dict_result["double_accuracy"] == pytest.approx(
2 * expected_results["accuracy"]
)
def test_multimetric_scorer_repr():
"""Check repr for multimetric scorer"""
multi_metric_scorer = check_scoring(scoring=["accuracy", "r2"])
assert str(multi_metric_scorer) == 'MultiMetricScorer("accuracy", "r2")'
def test_check_scoring_multimetric_raise_exc():
"""Test that check_scoring returns error code for a subset of scorers in
multimetric scoring if raise_exc=False and raises otherwise."""
def raising_scorer(estimator, X, y):
raise ValueError("That doesn't work.")
X, y = make_classification(n_samples=150, n_features=10, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression().fit(X_train, y_train)
# "raising_scorer" is raising ValueError and should return an string representation
# of the error of the last scorer:
scoring = {
"accuracy": make_scorer(accuracy_score),
"raising_scorer": raising_scorer,
}
scoring_call = check_scoring(estimator=clf, scoring=scoring, raise_exc=False)
scores = scoring_call(clf, X_test, y_test)
assert "That doesn't work." in scores["raising_scorer"]
# should raise an error
scoring_call = check_scoring(estimator=clf, scoring=scoring, raise_exc=True)
err_msg = "That doesn't work."
with pytest.raises(ValueError, match=err_msg):
scores = scoring_call(clf, X_test, y_test)
@pytest.mark.parametrize("enable_metadata_routing", [True, False])
def test_metadata_routing_multimetric_metadata_routing(enable_metadata_routing):
"""Test multimetric scorer works with and without metadata routing enabled when
there is no actual metadata to pass.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28256
"""
X, y = make_classification(n_samples=50, n_features=10, random_state=0)
estimator = EstimatorWithFitAndPredict().fit(X, y)
multimetric_scorer = _MultimetricScorer(scorers={"acc": get_scorer("accuracy")})
with config_context(enable_metadata_routing=enable_metadata_routing):
multimetric_scorer(estimator, X, y)
def test_curve_scorer():
"""Check the behaviour of the `_CurveScorer` class."""
X, y = make_classification(random_state=0)
estimator = LogisticRegression().fit(X, y)
curve_scorer = _CurveScorer(
balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
scores, thresholds = curve_scorer(estimator, X, y)
assert thresholds.shape == scores.shape
# check that the thresholds are probabilities with extreme values close to 0 and 1.
# they are not exactly 0 and 1 because they are the extremum of the
# `estimator.predict_proba(X)` values.
assert 0 <= thresholds.min() <= 0.01
assert 0.99 <= thresholds.max() <= 1
# balanced accuracy should be between 0.5 and 1 when it is not adjusted
assert 0.5 <= scores.min() <= 1
# check that passing kwargs to the scorer works
curve_scorer = _CurveScorer(
balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={"adjusted": True},
)
scores, thresholds = curve_scorer(estimator, X, y)
# balanced accuracy should be between 0.5 and 1 when it is not adjusted
assert 0 <= scores.min() <= 0.5
# check that we can inverse the sign of the score when dealing with `neg_*` scorer
curve_scorer = _CurveScorer(
balanced_accuracy_score,
sign=-1,
response_method="predict_proba",
thresholds=10,
kwargs={"adjusted": True},
)
scores, thresholds = curve_scorer(estimator, X, y)
assert all(scores <= 0)
def test_curve_scorer_pos_label(global_random_seed):
"""Check that we propagate properly the `pos_label` parameter to the scorer."""
n_samples = 30
X, y = make_classification(
n_samples=n_samples, weights=[0.9, 0.1], random_state=global_random_seed
)
estimator = LogisticRegression().fit(X, y)
curve_scorer = _CurveScorer(
recall_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={"pos_label": 1},
)
scores_pos_label_1, thresholds_pos_label_1 = curve_scorer(estimator, X, y)
curve_scorer = _CurveScorer(
recall_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={"pos_label": 0},
)
scores_pos_label_0, thresholds_pos_label_0 = curve_scorer(estimator, X, y)
# Since `pos_label` is forwarded to the curve_scorer, the thresholds are not equal.
assert not (thresholds_pos_label_1 == thresholds_pos_label_0).all()
# The min-max range for the thresholds is defined by the probabilities of the
# `pos_label` class (the column of `predict_proba`).
y_pred = estimator.predict_proba(X)
assert thresholds_pos_label_0.min() == pytest.approx(y_pred.min(axis=0)[0])
assert thresholds_pos_label_0.max() == pytest.approx(y_pred.max(axis=0)[0])
assert thresholds_pos_label_1.min() == pytest.approx(y_pred.min(axis=0)[1])
assert thresholds_pos_label_1.max() == pytest.approx(y_pred.max(axis=0)[1])
# The recall cannot be negative and `pos_label=1` should have a higher recall
# since there is less samples to be considered.
assert 0.0 < scores_pos_label_0.min() < scores_pos_label_1.min()
assert scores_pos_label_0.max() == pytest.approx(1.0)
assert scores_pos_label_1.max() == pytest.approx(1.0)
@config_context(enable_metadata_routing=True)
def test_Pipeline_in_PassthroughScorer():
"""Non-regression test for
https://github.com/scikit-learn/scikit-learn/issues/30937
Make sure pipeline inside a gridsearchcv works with sample_weight passed!
"""
X, y = make_classification(10, 4)
sample_weight = np.ones_like(y)
pipe = Pipeline(
[
(
"logistic",
LogisticRegression()
.set_fit_request(sample_weight=True)
.set_score_request(sample_weight=True),
)
]
)
search = GridSearchCV(pipe, {"logistic__C": [0.1, 1]}, n_jobs=1, cv=3)
search.fit(X, y, sample_weight=sample_weight)
| DummyScorer |
python | doocs__leetcode | lcof2/剑指 Offer II 101. 分割等和子串/Solution3.py | {
"start": 0,
"end": 446
} | class ____:
def canPartition(self, nums: List[int]) -> bool:
s = sum(nums)
if s % 2 != 0:
return False
target = s >> 1
@cache
def dfs(i, s):
nonlocal target
if s > target or i >= len(nums):
return False
if s == target:
return True
return dfs(i + 1, s) or dfs(i + 1, s + nums[i])
return dfs(0, 0)
| Solution |
python | ray-project__ray | python/ray/llm/_internal/common/utils/cloud_utils.py | {
"start": 11208,
"end": 11289
} | class ____(NamedTuple):
value: Any
expire_time: Optional[float]
| _CacheEntry |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 6871,
"end": 7222
} | class ____(type):
def __new__(cls) -> MetaclassInWhichSelfCannotBeUsed: ...
def __enter__(self) -> MetaclassInWhichSelfCannotBeUsed: ...
async def __aenter__(self) -> MetaclassInWhichSelfCannotBeUsed: ...
def __isub__(self, other: MetaclassInWhichSelfCannotBeUsed) -> MetaclassInWhichSelfCannotBeUsed: ...
| MetaclassInWhichSelfCannotBeUsed |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/test_pycompat_pkg.py | {
"start": 159,
"end": 1325
} | class ____ (unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, value, types):
if not isinstance(value, types):
self.fail("%r is not an instance of %r"%(value, types))
def test_compat(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-compatmodule')
mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
mf.import_hook('pkg.api')
node = mf.find_node('pkg')
self.assertIsInstance(node, modulegraph.Package)
node = mf.find_node('pkg.api')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mf.find_node('pkg.api2')
self.assertIsInstance(node, modulegraph.InvalidSourceModule)
node = mf.find_node('pkg.api3')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mf.find_node('http.client')
self.assertIsInstance(node, modulegraph.SourceModule)
node = mf.find_node('urllib2')
self.assertIs(node, None)
if __name__ == "__main__":
unittest.main()
| TestModuleGraphImport |
python | huggingface__transformers | src/transformers/generation/logits_process.py | {
"start": 107633,
"end": 110688
} | class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] for classifier free guidance (CFG). The scores are split over the batch dimension,
where the first half correspond to the conditional logits (predicted from the input prompt) and the second half
correspond to the unconditional logits (predicted from an empty or 'null' prompt). The processor computes a
weighted average across the conditional and unconditional logits, parameterised by the `guidance_scale`.
See [the paper](https://huggingface.co/papers/2306.05284) for more information.
<Tip warning={true}>
This logits processor is exclusively compatible with
[MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen)
</Tip>
Args:
guidance_scale (float):
The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
Higher guidance scale encourages the model to generate samples that are more closely linked to the input
prompt, usually at the expense of poorer quality.
Examples:
```python
>>> from transformers import AutoProcessor, MusicgenForConditionalGeneration
>>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
>>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
>>> inputs = processor(
... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
... padding=True,
... return_tensors="pt",
... )
>>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256)
```
"""
def __init__(self, guidance_scale):
if guidance_scale > 1:
self.guidance_scale = guidance_scale
else:
raise ValueError(
"Require guidance scale >1 to use the classifier free guidance processor, got guidance scale "
f"{guidance_scale}."
)
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
# simple check to make sure we have compatible batch sizes between our
# logits scores (cond + uncond) and input ids (cond only)
if scores.shape[0] != 2 * input_ids.shape[0]:
raise ValueError(
f"Logits should have twice the batch size of the input ids, the first half of batches corresponding to "
f"the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got "
f"batch size {scores.shape[0]} for the logits and {input_ids.shape[0]} for the input ids."
)
unguided_bsz = scores.shape[0] // 2
cond_logits, uncond_logits = scores.split(unguided_bsz, dim=0)
scores_processed = uncond_logits + (cond_logits - uncond_logits) * self.guidance_scale
return scores_processed
| ClassifierFreeGuidanceLogitsProcessor |
python | walkccc__LeetCode | solutions/2654. Minimum Number of Operations to Make All Array Elements Equal to 1/2654.py | {
"start": 0,
"end": 627
} | class ____:
def minOperations(self, nums: list[int]) -> int:
n = len(nums)
ones = nums.count(1)
if ones > 0:
return n - ones
# the minimum operations to make the shortest subarray with a gcd == 1
minOps = math.inf
for i, g in enumerate(nums):
for j in range(i + 1, n):
g = math.gcd(g, nums[j])
if g == 1: # gcd(nums[i..j]:== 1
minOps = min(minOps, j - i)
break
# After making the shortest subarray with `minOps`, need additional n - 1
# operations to make the other numbers to 1.
return -1 if minOps == math.inf else minOps + n - 1
| Solution |
python | huggingface__transformers | src/transformers/integrations/integration_utils.py | {
"start": 96190,
"end": 105346
} | class ____(TrainerCallback):
"""
A [`TrainerCallback`] that logs metrics, media, model checkpoints to [SwanLab](https://swanlab.cn/).
"""
def __init__(self):
if not is_swanlab_available():
raise RuntimeError("SwanLabCallback requires swanlab to be installed. Run `pip install swanlab`.")
import swanlab
self._swanlab = swanlab
self._initialized = False
self._log_model = os.getenv("SWANLAB_LOG_MODEL", None)
def setup(self, args, state, model, **kwargs):
"""
Setup the optional SwanLab (*swanlab*) integration.
One can subclass and override this method to customize the setup if needed. Find more information
[here](https://docs.swanlab.cn/guide_cloud/integration/integration-huggingface-transformers.html).
You can also override the following environment variables. Find more information about environment
variables [here](https://docs.swanlab.cn/en/api/environment-variable.html#environment-variables)
Environment:
- **SWANLAB_API_KEY** (`str`, *optional*, defaults to `None`):
Cloud API Key. During login, this environment variable is checked first. If it doesn't exist, the system
checks if the user is already logged in. If not, the login process is initiated.
- If a string is passed to the login interface, this environment variable is ignored.
- If the user is already logged in, this environment variable takes precedence over locally stored
login information.
- **SWANLAB_PROJECT** (`str`, *optional*, defaults to `None`):
Set this to a custom string to store results in a different project. If not specified, the name of the current
running directory is used.
- **SWANLAB_LOG_DIR** (`str`, *optional*, defaults to `swanlog`):
This environment variable specifies the storage path for log files when running in local mode.
By default, logs are saved in a folder named swanlog under the working directory.
- **SWANLAB_MODE** (`Literal["local", "cloud", "disabled"]`, *optional*, defaults to `cloud`):
SwanLab's parsing mode, which involves callbacks registered by the operator. Currently, there are three modes:
local, cloud, and disabled. Note: Case-sensitive. Find more information
[here](https://docs.swanlab.cn/en/api/py-init.html#swanlab-init)
- **SWANLAB_LOG_MODEL** (`str`, *optional*, defaults to `None`):
SwanLab does not currently support the save mode functionality.This feature will be available in a future
release
- **SWANLAB_WEB_HOST** (`str`, *optional*, defaults to `None`):
Web address for the SwanLab cloud environment for private version (its free)
- **SWANLAB_API_HOST** (`str`, *optional*, defaults to `None`):
API address for the SwanLab cloud environment for private version (its free)
"""
self._initialized = True
if state.is_world_process_zero:
logger.info('Automatic SwanLab logging enabled, to disable set os.environ["SWANLAB_MODE"] = "disabled"')
combined_dict = {**args.to_dict()}
if hasattr(model, "config") and model.config is not None:
model_config = model.config if isinstance(model.config, dict) else model.config.to_dict()
combined_dict = {**model_config, **combined_dict}
if hasattr(model, "peft_config") and model.peft_config is not None:
peft_config = model.peft_config
combined_dict = {"peft_config": peft_config, **combined_dict}
trial_name = state.trial_name
init_args = {}
if trial_name is not None and args.run_name is not None:
init_args["experiment_name"] = f"{args.run_name}-{trial_name}"
elif args.run_name is not None:
init_args["experiment_name"] = args.run_name
elif trial_name is not None:
init_args["experiment_name"] = trial_name
init_args["project"] = os.getenv("SWANLAB_PROJECT", None)
if self._swanlab.get_run() is None:
self._swanlab.init(
**init_args,
)
# show transformers logo!
self._swanlab.config["FRAMEWORK"] = "🤗transformers"
# add config parameters (run may have been created manually)
self._swanlab.config.update(combined_dict)
# add number of model parameters to swanlab config
try:
self._swanlab.config.update({"model_num_parameters": model.num_parameters()})
# get peft model parameters
if type(model).__name__ == "PeftModel" or type(model).__name__ == "PeftMixedModel":
trainable_params, all_param = model.get_nb_trainable_parameters()
self._swanlab.config.update({"peft_model_trainable_params": trainable_params})
self._swanlab.config.update({"peft_model_all_param": all_param})
except AttributeError:
logger.info("Could not log the number of model parameters in SwanLab due to an AttributeError.")
# log the initial model architecture to an artifact
if self._log_model is not None:
logger.warning(
"SwanLab does not currently support the save mode functionality. "
"This feature will be available in a future release."
)
badge_markdown = (
f'[<img src="https://raw.githubusercontent.com/SwanHubX/assets/main/badge1.svg"'
f' alt="Visualize in SwanLab" height="28'
f'0" height="32"/>]({self._swanlab.get_run().public.cloud.experiment_url})'
)
modelcard.AUTOGENERATED_TRAINER_COMMENT += f"\n{badge_markdown}"
def on_train_begin(self, args, state, control, model=None, **kwargs):
if not self._initialized:
self.setup(args, state, model, **kwargs)
def on_train_end(self, args, state, control, model=None, processing_class=None, **kwargs):
if self._log_model is not None and self._initialized and state.is_world_process_zero:
logger.warning(
"SwanLab does not currently support the save mode functionality. "
"This feature will be available in a future release."
)
def on_log(self, args, state, control, model=None, logs=None, **kwargs):
single_value_scalars = [
"train_runtime",
"train_samples_per_second",
"train_steps_per_second",
"train_loss",
"total_flos",
]
if not self._initialized:
self.setup(args, state, model)
if state.is_world_process_zero:
for k, v in logs.items():
if k in single_value_scalars:
self._swanlab.log({f"single_value/{k}": v}, step=state.global_step)
non_scalar_logs = {k: v for k, v in logs.items() if k not in single_value_scalars}
non_scalar_logs = rewrite_logs(non_scalar_logs)
self._swanlab.log({**non_scalar_logs, "train/global_step": state.global_step}, step=state.global_step)
def on_save(self, args, state, control, **kwargs):
if self._log_model is not None and self._initialized and state.is_world_process_zero:
logger.warning(
"SwanLab does not currently support the save mode functionality. "
"This feature will be available in a future release."
)
def on_predict(self, args, state, control, metrics, **kwargs):
if not self._initialized:
self.setup(args, state, **kwargs)
if state.is_world_process_zero:
metrics = rewrite_logs(metrics)
self._swanlab.log(metrics)
INTEGRATION_TO_CALLBACK = {
"azure_ml": AzureMLCallback,
"comet_ml": CometCallback,
"mlflow": MLflowCallback,
"neptune": NeptuneCallback,
"tensorboard": TensorBoardCallback,
"trackio": TrackioCallback,
"wandb": WandbCallback,
"codecarbon": CodeCarbonCallback,
"clearml": ClearMLCallback,
"dagshub": DagsHubCallback,
"flyte": FlyteCallback,
"dvclive": DVCLiveCallback,
"swanlab": SwanLabCallback,
}
def get_reporting_integration_callbacks(report_to):
if report_to is None:
return []
if isinstance(report_to, str):
if "none" == report_to:
return []
elif "all" == report_to:
report_to = get_available_reporting_integrations()
else:
report_to = [report_to]
for integration in report_to:
if integration not in INTEGRATION_TO_CALLBACK:
raise ValueError(
f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported."
)
return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to]
| SwanLabCallback |
python | google__pytype | pytype/rewrite/abstract/classes.py | {
"start": 6097,
"end": 6754
} | class ____(base.BaseValue):
"""Instance of a class."""
members: Mapping[str, base.BaseValue]
def __init__(self, ctx: base.ContextType, cls: SimpleClass, members):
super().__init__(ctx)
self.cls = cls
self.members = members
@abc.abstractmethod
def set_attribute(self, name: str, value: base.BaseValue) -> None:
...
def get_attribute(self, name: str) -> base.BaseValue | None:
if name in self.members:
return self.members[name]
cls_attribute = self.cls.get_attribute(name)
if isinstance(cls_attribute, functions_lib.SimpleFunction):
return cls_attribute.bind_to(self)
return cls_attribute
| BaseInstance |
python | ray-project__ray | python/ray/serve/_private/proxy.py | {
"start": 19856,
"end": 26225
} | class ____(GenericProxy):
"""This class is meant to be instantiated and run by an gRPC server.
This is the servicer class for the gRPC server. It implements `unary_unary`
as the entry point for unary gRPC request and `unary_stream` as the entry
point for streaming gRPC request.
"""
@property
def protocol(self) -> RequestProtocol:
return RequestProtocol.GRPC
async def not_found_response(
self, proxy_request: ProxyRequest
) -> ResponseGenerator:
if not proxy_request.app_name:
application_message = "Application metadata not set."
else:
application_message = f"Application '{proxy_request.app_name}' not found."
not_found_message = (
f"{application_message} Ping "
"/ray.serve.RayServeAPIService/ListApplications for available applications."
)
yield ResponseStatus(
code=grpc.StatusCode.NOT_FOUND,
message=not_found_message,
is_error=True,
)
async def routes_response(
self, *, healthy: bool, message: str
) -> ResponseGenerator:
yield ListApplicationsResponse(
application_names=[
endpoint.app_name for endpoint in self.proxy_router.endpoints
],
).SerializeToString()
yield ResponseStatus(
code=grpc.StatusCode.OK if healthy else grpc.StatusCode.UNAVAILABLE,
message=message,
is_error=not healthy,
)
async def health_response(self, *, healthy: bool, message) -> ResponseGenerator:
yield HealthzResponse(message=message).SerializeToString()
yield ResponseStatus(
code=grpc.StatusCode.OK if healthy else grpc.StatusCode.UNAVAILABLE,
message=message,
is_error=not healthy,
)
def service_handler_factory(self, service_method: str, stream: bool) -> Callable:
async def unary_unary(
request_proto: Any, context: grpc._cython.cygrpc._ServicerContext
) -> bytes:
"""Entry point of the gRPC proxy unary request.
This method is called by the gRPC server when a unary request is received.
It wraps the request in a ProxyRequest object and calls proxy_request.
The return value is serialized user defined protobuf bytes.
"""
proxy_request = gRPCProxyRequest(
request_proto=request_proto,
context=context,
service_method=service_method,
stream=False,
)
status = None
response = None
async for message in self.proxy_request(proxy_request=proxy_request):
if isinstance(message, ResponseStatus):
status = message
else:
response = message
set_grpc_code_and_details(context, status)
return response
async def unary_stream(
request_proto: Any, context: grpc._cython.cygrpc._ServicerContext
) -> Generator[bytes, None, None]:
"""Entry point of the gRPC proxy streaming request.
This method is called by the gRPC server when a streaming request is
received. It wraps the request in a ProxyRequest object and calls
proxy_request. The return value is a generator of serialized user defined
protobuf bytes.
"""
proxy_request = gRPCProxyRequest(
request_proto=request_proto,
context=context,
service_method=service_method,
stream=True,
)
status = None
async for message in self.proxy_request(proxy_request=proxy_request):
if isinstance(message, ResponseStatus):
status = message
else:
yield message
set_grpc_code_and_details(context, status)
return unary_stream if stream else unary_unary
def setup_request_context_and_handle(
self,
app_name: str,
handle: DeploymentHandle,
route: str,
proxy_request: ProxyRequest,
internal_request_id: str,
) -> Tuple[DeploymentHandle, str]:
"""Setup request context and handle for the request.
Unpack gRPC request metadata and extract info to set up request context and
handle.
"""
multiplexed_model_id = proxy_request.multiplexed_model_id
request_id = proxy_request.request_id
if not request_id:
request_id = generate_request_id()
proxy_request.request_id = request_id
handle = handle.options(
stream=proxy_request.stream,
multiplexed_model_id=multiplexed_model_id,
method_name=proxy_request.method_name,
)
request_context_info = {
"route": route,
"request_id": request_id,
"_internal_request_id": internal_request_id,
"app_name": app_name,
"multiplexed_model_id": multiplexed_model_id,
"grpc_context": proxy_request.ray_serve_grpc_context,
}
ray.serve.context._serve_request_context.set(
ray.serve.context._RequestContext(**request_context_info)
)
proxy_request.send_request_id(request_id=request_id)
return handle, request_id
async def send_request_to_replica(
self,
request_id: str,
internal_request_id: str,
handle: DeploymentHandle,
proxy_request: ProxyRequest,
app_is_cross_language: bool = False,
) -> ResponseGenerator:
response_generator = ProxyResponseGenerator(
handle.remote(proxy_request.serialized_replica_arg()),
timeout_s=self.request_timeout_s,
)
try:
async for context, result in response_generator:
context._set_on_grpc_context(proxy_request.context)
yield result
status = ResponseStatus(code=grpc.StatusCode.OK)
except BaseException as e:
status = get_grpc_response_status(e, self.request_timeout_s, request_id)
# The status code should always be set.
assert status is not None
yield status
| gRPCProxy |
python | optuna__optuna | tutorial/20_recipes/012_artifact_tutorial.py | {
"start": 17119,
"end": 21479
} | class ____:
def __init__(self, artifact_store: FileSystemArtifactStore) -> None:
self._artifact_store = artifact_store
def __call__(self, trial: Trial) -> float:
slab = json_to_atoms(trial.study.user_attrs["slab"])
E_slab = trial.study.user_attrs["E_slab"]
mol = json_to_atoms(trial.study.user_attrs["mol"])
E_mol = trial.study.user_attrs["E_mol"]
phi = 180.0 * trial.suggest_float("phi", -1, 1)
theta = np.arccos(trial.suggest_float("theta", -1, 1)) * 180.0 / np.pi
psi = 180 * trial.suggest_float("psi", -1, 1)
x_pos = trial.suggest_float("x_pos", 0, 0.5)
y_pos = trial.suggest_float("y_pos", 0, 0.5)
z_hig = trial.suggest_float("z_hig", 1, 5)
xy_position = np.matmul([x_pos, y_pos, 0], slab.cell)[:2]
mol.euler_rotate(phi=phi, theta=theta, psi=psi)
add_adsorbate(slab, mol, z_hig, xy_position)
E_slab_mol = get_opt_energy(slab, fmax=1e-2)
write(f"./tmp/{trial.number}.json", slab, format="json")
artifact_id = upload_artifact(
artifact_store=self._artifact_store,
file_path=f"./tmp/{trial.number}.json",
study_or_trial=trial,
)
trial.set_user_attr("structure", artifact_id)
return E_slab_mol - E_slab - E_mol
def main():
study = create_study(
study_name="test_study",
storage="sqlite:///example.db",
load_if_exists=True,
)
slab, E_slab = create_slab()
study.set_user_attr("slab", atoms_to_json(slab))
study.set_user_attr("E_slab", E_slab)
mol, E_mol = create_mol()
study.set_user_attr("mol", atoms_to_json(mol))
study.set_user_attr("E_mol", E_mol)
os.makedirs("./tmp", exist_ok=True)
base_path = "./artifacts"
os.makedirs(base_path, exist_ok=True)
artifact_store = FileSystemArtifactStore(base_path=base_path)
study.optimize(Objective(artifact_store), n_trials=3)
print(
f"Best trial is #{study.best_trial.number}\n"
f" Its adsorption energy is {study.best_value}\n"
f" Its adsorption position is\n"
f" phi : {study.best_params['phi']}\n"
f" theta: {study.best_params['theta']}\n"
f" psi. : {study.best_params['psi']}\n"
f" x_pos: {study.best_params['x_pos']}\n"
f" y_pos: {study.best_params['y_pos']}\n"
f" z_hig: {study.best_params['z_hig']}"
)
best_artifact_id = study.best_trial.user_attrs["structure"]
with tempfile.TemporaryDirectory() as tmpdir_name:
download_file_path = os.path.join(tmpdir_name, f"{best_artifact_id}.json")
download_artifact(
artifact_store=artifact_store,
file_path=download_file_path,
artifact_id=best_artifact_id,
)
best_atoms = file_to_atoms(download_file_path)
print(best_atoms)
write("best_atoms.png", best_atoms, rotation=("315x,0y,0z"))
if __name__ == "__main__":
main()
###################################################################################################
# .. list-table::
# :header-rows: 1
#
# * - Fig 4. The chemical structure obtained by the above code.
# * - .. image:: https://github.com/optuna/optuna/assets/38826298/c6bd62fd-599a-424e-8c2c-ca88af85cc63
#
# As shown above, it is convenient to use the artifact module when performing the optimization of chemical structures with Optuna.
# In the case of small structures or fewer trial numbers, it's fine to convert it to a string and save it directly in the RDB.
# However, when dealing with complex structures or performing large-scale searches, it's better to save it outside the RDB to
# avoid overloading it, such as in an external file system or AWS S3.
#
# Conclusion
# ----------
#
# The artifact module is a useful feature when you want to save relatively large data for each trial. It can be used for various
# purposes such as saving snapshots of machine learning models, optimizing chemical structures, and human-in-the-loop optimization
# of images and sounds. It's a powerful assistant for black-box optimization with Optuna. Also, if there are ways to use it that
# we, the Optuna committers, haven't noticed, please let us know on GitHub discussions. Have a great optimization life with Optuna!
| Objective |
python | getsentry__sentry | src/sentry/migrations/0940_auditlog_json_field.py | {
"start": 244,
"end": 1749
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0939_rm_eventattachment_fileid_part2"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[mod.to_jsonb("sentry_auditlogentry", "data")],
state_operations=[
migrations.AlterField(
model_name="auditlogentry",
name="data",
field=models.JSONField(),
)
],
)
]
| Migration |
python | TheAlgorithms__Python | geometry/geometry.py | {
"start": 4618,
"end": 6346
} | class ____:
"""
An abstract class which represents Polygon on a 2D surface.
>>> Polygon()
Polygon(sides=[])
>>> polygon = Polygon()
>>> polygon.add_side(Side(5)).get_side(0)
Side(length=5, angle=Angle(degrees=90), next_side=None)
>>> polygon.get_side(1)
Traceback (most recent call last):
...
IndexError: list index out of range
>>> polygon.set_side(0, Side(10)).get_side(0)
Side(length=10, angle=Angle(degrees=90), next_side=None)
>>> polygon.set_side(1, Side(10))
Traceback (most recent call last):
...
IndexError: list assignment index out of range
"""
sides: list[Side] = field(default_factory=list)
def add_side(self, side: Side) -> Self:
"""
>>> Polygon().add_side(Side(5))
Polygon(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None)])
"""
self.sides.append(side)
return self
def get_side(self, index: int) -> Side:
"""
>>> Polygon().get_side(0)
Traceback (most recent call last):
...
IndexError: list index out of range
>>> Polygon().add_side(Side(5)).get_side(-1)
Side(length=5, angle=Angle(degrees=90), next_side=None)
"""
return self.sides[index]
def set_side(self, index: int, side: Side) -> Self:
"""
>>> Polygon().set_side(0, Side(5))
Traceback (most recent call last):
...
IndexError: list assignment index out of range
>>> Polygon().add_side(Side(5)).set_side(0, Side(10))
Polygon(sides=[Side(length=10, angle=Angle(degrees=90), next_side=None)])
"""
self.sides[index] = side
return self
| Polygon |
python | getsentry__sentry | fixtures/page_objects/base.py | {
"start": 1020,
"end": 1066
} | class ____(BaseElement):
pass
| TextBoxElement |
python | ray-project__ray | python/ray/serve/_private/http_util.py | {
"start": 10153,
"end": 19261
} | class ____:
"""Proxies ASGI receive from an actor.
The `receive_asgi_messages` callback will be called repeatedly to fetch messages
until a disconnect message is received.
"""
def __init__(
self,
scope: Scope,
request_metadata: RequestMetadata,
receive_asgi_messages: Callable[[RequestMetadata], Awaitable[bytes]],
):
self._type = scope["type"] # Either 'http' or 'websocket'.
# Lazy init the queue to ensure it is created in the user code event loop.
self._queue = None
self._request_metadata = request_metadata
self._receive_asgi_messages = receive_asgi_messages
self._disconnect_message = None
def _get_default_disconnect_message(self) -> Message:
"""Return the appropriate disconnect message based on the connection type.
HTTP ASGI spec:
https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event
WS ASGI spec:
https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event-ws
"""
if self._type == "websocket":
return {
"type": "websocket.disconnect",
# 1005 is the default disconnect code according to the ASGI spec.
"code": 1005,
}
else:
return {"type": "http.disconnect"}
@property
def queue(self) -> asyncio.Queue:
if self._queue is None:
self._queue = asyncio.Queue()
return self._queue
async def fetch_until_disconnect(self):
"""Fetch messages repeatedly until a disconnect message is received.
If a disconnect message is received, this function exits and returns it.
If an exception occurs, it will be raised on the next __call__ and no more
messages will be received.
"""
while True:
try:
pickled_messages = await self._receive_asgi_messages(
self._request_metadata
)
for message in pickle.loads(pickled_messages):
self.queue.put_nowait(message)
if message["type"] in {"http.disconnect", "websocket.disconnect"}:
self._disconnect_message = message
return
except KeyError:
# KeyError can be raised if the request is no longer active in the proxy
# (i.e., the user disconnects). This is expected behavior and we should
# not log an error: https://github.com/ray-project/ray/issues/43290.
message = self._get_default_disconnect_message()
self.queue.put_nowait(message)
self._disconnect_message = message
return
except Exception as e:
# Raise unexpected exceptions in the next `__call__`.
self.queue.put_nowait(e)
return
async def __call__(self) -> Message:
"""Return the next message once available.
This will repeatedly return a disconnect message once it's been received.
"""
if self.queue.empty() and self._disconnect_message is not None:
return self._disconnect_message
message = await self.queue.get()
if isinstance(message, Exception):
raise message
return message
def make_fastapi_class_based_view(fastapi_app, cls: Type) -> None:
"""Transform the `cls`'s methods and class annotations to FastAPI routes.
Modified from
https://github.com/dmontagu/fastapi-utils/blob/master/fastapi_utils/cbv.py
Usage:
>>> from fastapi import FastAPI
>>> app = FastAPI() # doctest: +SKIP
>>> class A: # doctest: +SKIP
... @app.route("/{i}") # doctest: +SKIP
... def func(self, i: int) -> str: # doctest: +SKIP
... return self.dep + i # doctest: +SKIP
>>> # just running the app won't work, here.
>>> make_fastapi_class_based_view(app, A) # doctest: +SKIP
>>> # now app can be run properly
"""
# Delayed import to prevent ciruclar imports in workers.
from fastapi import APIRouter, Depends
from fastapi.routing import APIRoute, APIWebSocketRoute
async def get_current_servable_instance():
from ray import serve
return serve.get_replica_context().servable_object
# Find all the class method routes
class_method_routes = [
route
for route in fastapi_app.routes
if
# User defined routes must all be APIRoute or APIWebSocketRoute.
isinstance(route, (APIRoute, APIWebSocketRoute))
# We want to find the route that's bound to the `cls`.
# NOTE(simon): we can't use `route.endpoint in inspect.getmembers(cls)`
# because the FastAPI supports different routes for the methods with
# same name. See #17559.
and (cls.__qualname__ in route.endpoint.__qualname__)
]
# Modify these routes and mount it to a new APIRouter.
# We need to to this (instead of modifying in place) because we want to use
# the laster fastapi_app.include_router to re-run the dependency analysis
# for each routes.
new_router = APIRouter()
for route in class_method_routes:
fastapi_app.routes.remove(route)
# This block just adds a default values to the self parameters so that
# FastAPI knows to inject the object when calling the route.
# Before: def method(self, i): ...
# After: def method(self=Depends(...), *, i):...
old_endpoint = route.endpoint
old_signature = inspect.signature(old_endpoint)
old_parameters = list(old_signature.parameters.values())
if len(old_parameters) == 0:
# TODO(simon): make it more flexible to support no arguments.
raise RayServeException(
"Methods in FastAPI class-based view must have ``self`` as "
"their first argument."
)
old_self_parameter = old_parameters[0]
new_self_parameter = old_self_parameter.replace(
default=Depends(get_current_servable_instance)
)
new_parameters = [new_self_parameter] + [
# Make the rest of the parameters keyword only because
# the first argument is no longer positional.
parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)
for parameter in old_parameters[1:]
]
new_signature = old_signature.replace(parameters=new_parameters)
route.endpoint.__signature__ = new_signature
route.endpoint._serve_cls = cls
new_router.routes.append(route)
fastapi_app.include_router(new_router)
routes_to_remove = list()
for route in fastapi_app.routes:
if not isinstance(route, (APIRoute, APIWebSocketRoute)):
continue
# If there is a response model, FastAPI creates a copy of the fields.
# But FastAPI creates the field incorrectly by missing the outer_type_.
if (
# TODO(edoakes): I don't think this check is complete because we need
# to support v1 models in v2 (from pydantic.v1 import *).
not IS_PYDANTIC_2
and isinstance(route, APIRoute)
and route.response_model
):
route.secure_cloned_response_field.outer_type_ = (
route.response_field.outer_type_
)
# Remove endpoints that belong to other class based views.
serve_cls = getattr(route.endpoint, "_serve_cls", None)
if serve_cls is not None and serve_cls != cls:
routes_to_remove.append(route)
fastapi_app.routes[:] = [r for r in fastapi_app.routes if r not in routes_to_remove]
def set_socket_reuse_port(sock: socket.socket) -> bool:
"""Mutate a socket object to allow multiple process listening on the same port.
Returns:
success: whether the setting was successful.
"""
try:
# These two socket options will allow multiple process to bind the the
# same port. Kernel will evenly load balance among the port listeners.
# Note: this will only work on Linux.
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# In some Python binary distribution (e.g., conda py3.6), this flag
# was not present at build time but available in runtime. But
# Python relies on compiler flag to include this in binary.
# Therefore, in the absence of socket.SO_REUSEPORT, we try
# to use `15` which is value in linux kernel.
# https://github.com/torvalds/linux/blob/master/tools/include/uapi/asm-generic/socket.h#L27
else:
sock.setsockopt(socket.SOL_SOCKET, 15, 1)
return True
except Exception as e:
logger.debug(
f"Setting SO_REUSEPORT failed because of {e}. SO_REUSEPORT is disabled."
)
return False
| ASGIReceiveProxy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 48042,
"end": 54135
} | class ____(ShopifyBulkQuery):
"""
Output example to BULK query `inventory_levels` from `locations` with `filter query` by `updated_at`:
{
locations(includeLegacy: true, includeInactive: true) {
edges {
node {
__typename
id
inventoryLevels(query: "updated_at:>='2020-04-13T00:00:00+00:00'") {
edges {
node {
__typename
id
canDeactivate
createdAt
deactivationAlert
updatedAt
item {
inventory_history_url: inventoryHistoryUrl
inventory_item_id: id
locations_count: locationsCount {
count
}
}
quantities(
names: ["available", "incoming", "committed", "damaged", "on_hand", "quality_control", "reserved", "safety_stock"]) {
id
name
quantity
updatedAt
}
}
}
}
}
}
}
}
"""
query_name = "locations"
# in order to return all the locations, additional query args must be provided
# https://shopify.dev/docs/api/admin-graphql/2023-10/queries/locations#query-arguments
locations_query_args = {
"includeLegacy": "true",
"includeInactive": "true",
}
record_composition = {
"new_record": "InventoryLevel",
}
# quantity related fields and filtering options
quantities_names_filter: List[str] = [
'"available"',
'"incoming"',
'"committed"',
'"damaged"',
'"on_hand"',
'"quality_control"',
'"reserved"',
'"safety_stock"',
]
item_fields: List[Field] = [
Field(name="inventoryHistoryUrl", alias="inventory_history_url"),
Field(name="id", alias="inventory_item_id"),
Field(name="locationsCount", alias="locations_count", fields=["count"]),
]
inventory_levels_fields: List[Field] = [
"__typename",
"id",
"canDeactivate",
"createdAt",
"deactivationAlert",
"updatedAt",
Field(name="item", fields=item_fields),
]
def _quantities_query(self) -> Query:
"""
Defines the `quantities` nested query.
"""
return Field(
name="quantities",
arguments=[Argument(name="names", value=self.quantities_names_filter)],
fields=[
"id",
"name",
"quantity",
"updatedAt",
],
)
def _get_inventory_levels_fields(self, filter_query: Optional[str] = None) -> List[Field]:
nested_fields = self.inventory_levels_fields + [self._quantities_query()]
return self.query_nodes + [
Field(
name="inventoryLevels",
arguments=[Argument(name="query", value=f'"{filter_query}"')],
fields=[Field(name="edges", fields=[Field(name="node", fields=nested_fields)])],
)
]
def _process_quantities(self, quantities: Iterable[MutableMapping[str, Any]] = None) -> Iterable[Mapping[str, Any]]:
if quantities:
for quantity in quantities:
# save the original string id
quantity["admin_graphql_api_id"] = quantity.get("id")
# resolve the int id from str id
quantity["id"] = self.tools.resolve_str_id(quantity.get("id"))
# convert dates from ISO-8601 to RFC-3339
quantity["updatedAt"] = self.tools.from_iso8601_to_rfc3339(quantity, "updatedAt")
return quantities
return []
def query(self, filter_query: Optional[str] = None) -> Query:
# build the main query around previous
return self.build(
name=self.query_name,
edges=self._get_inventory_levels_fields(filter_query),
# passing more query args for `locations` query
additional_query_args=self.locations_query_args,
)
def record_process_components(self, record: MutableMapping[str, Any]) -> Iterable[MutableMapping[str, Any]]:
"""
Defines how to process collected components.
"""
# process quantities
quantities = record.get("quantities", [])
record["quantities"] = self._process_quantities(quantities)
item = record.get("item", {})
if item:
# resolve `inventory_item_id` to root lvl + resolve to int
record["inventory_item_id"] = self.tools.resolve_str_id(item.get("inventory_item_id"))
record["inventory_history_url"] = item.get("inventory_history_url")
record["locations_count"] = item.get("locations_count")
# add `location_id` from `__parentId`
record["location_id"] = self.tools.resolve_str_id(record[BULK_PARENT_KEY])
# make composite `id` from `location_id|inventory_item_id`
record["id"] = "|".join((str(record.get("location_id", "")), str(record.get("inventory_item_id", ""))))
# convert dates from ISO-8601 to RFC-3339
record["updatedAt"] = self.tools.from_iso8601_to_rfc3339(record, "updatedAt")
# remove leftovers
record.pop("item", None)
record.pop(BULK_PARENT_KEY, None)
record = self.tools.fields_names_to_snake_case(record)
yield record
| InventoryLevel |
python | django__django | tests/model_fields/test_integerfield.py | {
"start": 9583,
"end": 10070
} | class ____(IntegerFieldTests):
model = PositiveIntegerModel
documented_range = (0, 2147483647)
rel_db_type_class = (
models.PositiveIntegerField
if connection.features.related_fields_match_type
else models.IntegerField
)
def test_negative_values(self):
p = PositiveIntegerModel.objects.create(value=0)
p.value = models.F("value") - 1
with self.assertRaises(IntegrityError):
p.save()
| PositiveIntegerFieldTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image11.py | {
"start": 315,
"end": 896
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image(
"C2", self.image_dir + "logo.png", {"x_offset": 8, "y_offset": 5}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | vyperlang__vyper | vyper/compiler/input_bundle.py | {
"start": 7287,
"end": 8743
} | class ____(InputBundle):
input_json: dict[PurePath, Any]
def __init__(self, input_json, search_paths):
super().__init__(search_paths)
self.input_json = {}
for path, item in input_json.items():
path = _normpath(path)
# should be checked by caller
assert path not in self.input_json
self.input_json[path] = item
def _normalize_path(self, path: PurePath) -> PurePath:
return _normpath(path)
def _load_from_path(self, resolved_path: PurePath, original_path: PurePath) -> CompilerInput:
try:
value = self.input_json[resolved_path]
except KeyError:
raise _NotFound(resolved_path)
source_id = super()._generate_source_id(resolved_path)
if "content" in value:
return FileInput(source_id, original_path, resolved_path, value["content"])
if "abi" in value:
return JSONInput(
source_id, original_path, resolved_path, json.dumps(value), value["abi"]
)
# TODO: ethPM support
# if isinstance(contents, dict) and "contractTypes" in contents:
# unreachable, based on how JSONInputBundle is constructed in
# the codebase.
raise JSONError(f"Unexpected type in file: '{resolved_path}'") # pragma: nocover
# input bundle for vyper archives. similar to JSONInputBundle, but takes
# a zipfile as input.
| JSONInputBundle |
python | pytorch__pytorch | test/functorch/test_eager_transforms.py | {
"start": 134994,
"end": 156592
} | class ____(TestCase):
def _update_params(self, params, grads, alpha, mechanism):
if mechanism == "make_functional":
return [(params[i] - alpha * grads[i]) for i in range(len(params))]
else:
assert mechanism == "functional_call"
return {k: params[k] - alpha * grads[k] for k in params}
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_maml_regression(self, device, mechanism):
class ThreeLayerNet(nn.Module):
def __init__(self) -> None:
super().__init__()
self.fc1 = nn.Linear(1, 40)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(40, 40)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(40, 1)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
# TODO: should replace with F.mse_loss
def mse_loss(x, y):
return torch.mean((x - y) ** 2)
net, params = _get_weights_and_functional_call(
ThreeLayerNet().to(device), mechanism
)
K = 20
num_tasks = 4
alpha = 0.1
def sample_tasks(outer_batch_size, inner_batch_size):
# Select amplitude and phase for the task
As = []
phases = []
for _ in range(outer_batch_size):
As.append(np.random.uniform(low=0.1, high=0.5))
phases.append(np.random.uniform(low=0.0, high=np.pi))
def get_batch():
xs, ys = [], []
for A, phase in zip(As, phases):
x = np.random.uniform(
low=-5.0, high=5.0, size=(inner_batch_size, 1)
)
y = A * np.sin(x + phase)
xs.append(x)
ys.append(y)
return torch.tensor(xs, dtype=torch.float, device=device), torch.tensor(
ys, dtype=torch.float, device=device
)
x1, y1 = get_batch()
x2, y2 = get_batch()
return x1, y1, x2, y2
def get_loss_for_task(use_transform, x1, y1, x2, y2):
def inner_loss(params, x1, y1):
f = net(params, x1)
loss = mse_loss(f, y1)
return loss
if use_transform:
grads = grad(inner_loss)(params, x1, y1)
else:
loss = inner_loss(params, x1, y1)
grad_params, spec = tree_flatten(params)
grads = torch.autograd.grad(loss, grad_params, create_graph=True)
grads = tree_unflatten(grads, spec)
new_params = self._update_params(params, grads, alpha, mechanism)
v_f = net(new_params, x2)
return mse_loss(v_f, y2)
task = sample_tasks(num_tasks, K)
list_params = (
params if mechanism == "make_functional" else list(params.values())
)
# Compute with vmap+grad
inner_losses = vmap(partial(get_loss_for_task, True))(
task[0], task[1], task[2], task[3]
)
loss2 = sum(inner_losses) / len(inner_losses)
result_grads = torch.autograd.grad(loss2, list_params)
# Compute without vmap+grad
inner_losses = [
get_loss_for_task(False, task[0][i], task[1][i], task[2][i], task[3][i])
for i in range(num_tasks)
]
loss2 = sum(inner_losses) / len(inner_losses)
expected_grads = torch.autograd.grad(loss2, list_params)
self.assertEqual(result_grads, expected_grads)
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_maml_omniglot(self, device, mechanism):
# TODO: there appears to be precision issues for float32
dtype = torch.double
# TODO: We don't support inplace relu?
inplace_relu = False
n_way = 5
n_inner_iter = 2
num_tasks = 2
# real example uses batch norm but it's numerically unstable in the first
# iteration, when near 0, and won't produce same gradients. Uses group norm instead
net = (
nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.GroupNorm(64, 64, affine=True),
nn.ReLU(inplace=inplace_relu),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way),
)
.to(device)
.to(dtype)
)
fnet, params, buffers = _get_weights_and_functional_call_with_buffers(
net, mechanism
)
net = (params, buffers, fnet)
def loss_for_task(net, n_inner_iter, use_transform, x_spt, y_spt, x_qry, y_qry):
params, buffers, fnet = net
querysz = x_qry.size(0)
def compute_loss(new_params, buffers, x, y):
logits = fnet(new_params, buffers, x)
loss = F.cross_entropy(logits, y)
return loss
new_params = params
for _ in range(n_inner_iter):
if use_transform:
grads = grad(compute_loss)(new_params, buffers, x_spt, y_spt)
else:
res = compute_loss(new_params, buffers, x_spt, y_spt)
grad_params, spec = tree_flatten(new_params)
grads = torch.autograd.grad(res, grad_params, create_graph=True)
grads = tree_unflatten(grads, spec)
new_params = self._update_params(new_params, grads, 1e-1, mechanism)
qry_logits = fnet(new_params, buffers, x_qry)
qry_loss = F.cross_entropy(qry_logits, y_qry)
qry_acc = (qry_logits.argmax(dim=1) == y_qry).sum() / querysz
return qry_loss, qry_acc
# Get some sample inputs...
x_spt = torch.randn(num_tasks, 25, 1, 28, 28, dtype=dtype, device=device)
y_spt = torch.randint(0, 5, (num_tasks, 25), device=device)
x_qry = torch.randn(num_tasks, 75, 1, 28, 28, dtype=dtype, device=device)
y_qry = torch.randint(0, 5, (num_tasks, 75), device=device)
# compute with vmap + grad
compute_loss = partial(loss_for_task, net, n_inner_iter, True)
qry_losses, _ = vmap(compute_loss)(x_spt, y_spt, x_qry, y_qry)
list_params = (
params if mechanism == "make_functional" else list(params.values())
)
result_grads = torch.autograd.grad(qry_losses.sum(), list_params)
# compute without vmap + grad
compute_loss = partial(loss_for_task, net, n_inner_iter, False)
losses = [
compute_loss(x_spt[i], y_spt[i], x_qry[i], y_qry[i])[0]
for i in range(num_tasks)
]
expected_grads = torch.autograd.grad(sum(losses), list_params)
self.assertEqual(result_grads, expected_grads)
@parametrize("mechanism", ["make_functional", "functional_call"])
@parametrize("originally_track_running_stats", [True, False])
def test_update_batch_norm(self, device, originally_track_running_stats, mechanism):
dtype = torch.double
inplace_relu = False
classes = 5
num_batches = 2
net = (
nn.Sequential(
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(
64, affine=True, track_running_stats=originally_track_running_stats
),
nn.ReLU(inplace=inplace_relu),
nn.Flatten(),
nn.Linear(43264, classes),
)
.to(device)
.to(dtype)
)
replace_all_batch_norm_modules_(net)
transformed_net = net
fnet, params, buffers = _get_weights_and_functional_call_with_buffers(
transformed_net, mechanism
)
criterion = nn.CrossEntropyLoss()
def compute_loss(x, y, params, buffers):
return criterion(fnet(params, buffers, x), y)
# Get some sample inputs...
x = torch.randn(num_batches, 1, 64, 28, 28, device=device, dtype=dtype)
y = torch.randint(0, classes, (num_batches, 1), device=device)
# compute some per sample grads with vmap + grad
result_grads = vmap(grad(compute_loss, argnums=2), in_dims=(0, 0, None, None))(
x, y, params, buffers
)
# compute some per sample grads without vmap + grad
fnet, params, buffers = _get_weights_and_functional_call_with_buffers(
transformed_net, mechanism
)
flat_params, spec = tree_flatten(params)
expected_grads = [
torch.autograd.grad(compute_loss(x[i], y[i], params, buffers), flat_params)
for i in range(num_batches)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
expected_grads = tree_unflatten(expected_grads, spec)
self.assertEqual(result_grads, expected_grads)
@parametrize("jac", ["jacfwd", "jacrev"])
def test_lennard_jones_batched_jac(self, device, jac):
sigma = 0.5
epsilon = 4.0
jac = getattr(functorch, jac)
def lennard_jones(r):
return epsilon * ((sigma / r) ** 12 - (sigma / r) ** 6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
r = torch.linspace(0.5, 2 * sigma, steps=100, requires_grad=True, device=device)
drs = torch.outer(r, torch.tensor([1.0, 0, 0], device=device))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
training_forces = torch.stack(
[force * dr for force, dr in zip(map(lennard_jones_force, norms), drs)]
)
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1),
).to(device)
def make_prediction(model, drs, use_functorch):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
if use_functorch:
network_derivs = vmap(jac(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
else:
forces = []
for r, dr in zip(norms, drs):
network_deriv = torch.autograd.functional.jacobian(
model, r, create_graph=True
)
force = -network_deriv * dr / r
forces.append(force)
forces = torch.cat(forces)
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return (
F.mse_loss(energies, predicted_energies)
+ 0.01 * F.mse_loss(forces, predicted_forces) / 3
)
energies, forces = make_prediction(model, drs, use_functorch=True)
loss = loss_fn(training_energies, training_forces, energies, forces)
result = torch.autograd.grad(loss, model.parameters())
energies, forces = make_prediction(model, drs, use_functorch=False)
loss = loss_fn(training_energies, training_forces, energies, forces)
expected = torch.autograd.grad(loss, model.parameters())
self.assertEqual(result, expected)
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_ensemble_regression(self, device, mechanism):
def make_spirals(n_samples, noise_std=0.0, rotations=1.0):
ts = torch.linspace(0, 1, n_samples)
rs = ts**0.5
thetas = rs * rotations * 2 * math.pi
signs = torch.randint(0, 2, (n_samples,)) * 2 - 1
labels = (signs > 0).to(torch.long)
xs = rs * signs * torch.cos(thetas) + torch.randn(n_samples) * noise_std
ys = rs * signs * torch.sin(thetas) + torch.randn(n_samples) * noise_std
points = torch.stack([xs, ys], dim=1)
return points.to(device), labels.to(device)
points, labels = make_spirals(100, noise_std=0.05)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.fc1 = nn.Linear(2, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
func_model, weights = _get_weights_and_functional_call(
MLPClassifier().to(device), mechanism
)
def train_step_fn(use_transform, weights, batch, targets, lr=0.2):
def compute_loss(weights, batch, targets):
output = func_model(weights, batch)
loss = loss_fn(output, targets)
return loss
if use_transform:
grad_weights, loss = grad_and_value(compute_loss)(
weights, batch, targets
)
else:
loss = compute_loss(weights, batch, targets)
flat_weights, spec = tree_flatten(weights)
flat_grad_weights = torch.autograd.grad(loss, flat_weights)
grad_weights = tree_unflatten(flat_grad_weights, spec)
new_weights = self._update_params(weights, grad_weights, lr, mechanism)
return (loss, new_weights)
def unpack(train_result):
return train_result[0], train_result[1]
def init_fn(num_models):
models = tuple(MLPClassifier().to(device) for _ in range(num_models))
if mechanism == "make_functional":
return combine_state_for_ensemble(models)[1]
else:
return stack_module_state(models)[0]
def slice_weights(batched_weights, index):
return tree_map(
lambda weight: weight[index].detach().requires_grad_(), batched_weights
)
batched_weights = init_fn(num_models=2)
parallel_train_step_fn = vmap(
partial(train_step_fn, True), in_dims=(0, None, None)
)
result_loss, result_weights = unpack(
parallel_train_step_fn(batched_weights, points, labels)
)
loss0, weights0 = unpack(
train_step_fn(False, slice_weights(batched_weights, 0), points, labels)
)
loss1, weights1 = unpack(
train_step_fn(False, slice_weights(batched_weights, 1), points, labels)
)
expected_loss = torch.stack([loss0, loss1])
weights0, spec0 = tree_flatten(weights0)
weights1, spec1 = tree_flatten(weights1)
assert spec0 == spec1
expected_weights = tuple(
torch.stack([w0, w1]) for w0, w1 in zip(weights0, weights1)
)
expected_weights = tree_unflatten(expected_weights, spec0)
self.assertEqual(result_loss, expected_loss)
self.assertEqual(result_weights, expected_weights)
@parametrize(
"dropout_layer",
[
subtest(nn.Dropout, "Dropout"),
subtest(nn.AlphaDropout, "AlphaDropout"),
subtest(nn.FeatureAlphaDropout, "FeatureAlphaDropout"),
],
)
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_find_learning_rate_ensembling(self, device, dropout_layer, mechanism):
# This example mimics what a user might do when trying to find the optimal learning rate. They would
# want to run a bunch of models with the same behavior (including the same dropout!) and have them
# each run with different learning rates. Specifically, this is an example of using same randomness with vmap
points, labels = (
torch.randn(100, 2, 2, 2, 2, device=device),
torch.randint(0, 2, (100,), device=device),
)
class MLPClassifier(nn.Module):
def __init__(self, hidden_dim=32, n_classes=2):
super().__init__()
self.hidden_dim = hidden_dim
self.n_classes = n_classes
self.dropout = dropout_layer()
self.fc1 = nn.Linear(16, self.hidden_dim)
self.fc2 = nn.Linear(self.hidden_dim, self.n_classes)
def forward(self, x):
x = self.dropout(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, -1)
return x
loss_fn = nn.NLLLoss()
func_model, weights = _get_weights_and_functional_call(
MLPClassifier().to(device), mechanism
)
def train_step_fn(weights, batch, targets, lr):
def compute_loss(weights, batch, targets):
output = func_model(weights, batch)
loss = loss_fn(output, targets)
return loss
grad_weights, loss = grad_and_value(compute_loss)(weights, batch, targets)
new_weights = self._update_params(weights, grad_weights, lr, mechanism)
if mechanism != "make_functional":
new_weights = list(new_weights.values())
# NB: return looks weird because torch.vmap must return Tensors
return (loss, *new_weights)
def unpack(train_result):
return train_result[0], train_result[1:]
def init_fn(num_models):
og_model = MLPClassifier().to(device)
models = tuple(
copy.deepcopy(og_model) for _ in range(num_models)
) # have same initialization
if mechanism == "make_functional":
return combine_state_for_ensemble(models)[1]
else:
return stack_module_state(models)[0]
batched_weights = init_fn(num_models=2)
parallel_train_step_fn = vmap(
train_step_fn, in_dims=(0, None, None, 0), randomness="same"
)
lrs = torch.tensor([0.2, 0.4], device=device)
result_loss, result_weights = unpack(
parallel_train_step_fn(batched_weights, points, labels, lrs)
)
self.assertEqual(result_loss[0], result_loss[1])
self.assertNotEqual(
tuple(weight[0] for weight in result_weights),
tuple(weight[1] for weight in result_weights),
)
@with_tf32_off # https://github.com/pytorch/pytorch/issues/86798
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
@parametrize("mechanism", ["make_functional", "functional_call"])
def test_resnet18_per_sample_grads(self, device, mechanism):
from torchvision import models
model = models.__dict__["resnet18"](
pretrained=False, norm_layer=(lambda c: nn.GroupNorm(min(32, c), c))
).to(device)
criterion = nn.CrossEntropyLoss(
reduction="sum"
) # avoid cross batch reductions for for loop comparison
func_model, weights = _get_weights_and_functional_call(model, mechanism)
def compute_loss(weights, image, target):
image = image.unsqueeze(0)
target = target.unsqueeze(0)
output = func_model(weights, image)
loss = criterion(output, target)
return loss
batch_size = 3
images = torch.randn(batch_size, 3, 32, 32, device=device)
targets = torch.randint(0, 10, (batch_size,), device=device)
result_grads = vmap(grad(compute_loss), in_dims=(None, 0, 0))(
weights, images, targets
)
flat_weights, spec = tree_flatten(weights)
expected_grads = [
torch.autograd.grad(
compute_loss(weights, images[i], targets[i]), flat_weights
)
for i in range(batch_size)
]
expected_grads = [torch.stack(shards) for shards in zip(*expected_grads)]
expected_grads = tree_unflatten(expected_grads, spec)
self.assertEqual(result_grads, expected_grads, atol=1e-3, rtol=1.0)
def normalize_devices(fx_g):
for node in fx_g.graph.nodes:
args = list(node.args)
for idx, arg in enumerate(args):
if isinstance(arg, torch.device):
args[idx] = "cpu"
node.args = tuple(args)
new_kwargs = {}
for k, v in node.kwargs.items():
if isinstance(v, torch.device):
v = "cpu"
new_kwargs[k] = v
node.kwargs = new_kwargs
fx_g.recompile()
return fx_g
@markDynamoStrictTest
| TestExamplesCorrectness |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/slice_op_test.py | {
"start": 1309,
"end": 19068
} | class ____(test.TestCase):
def testEmpty(self):
inp = np.random.rand(4, 4).astype("f")
for k in range(4):
with self.cached_session():
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testInt32(self):
inp = np.random.rand(4, 4).astype("i")
for k in range(4):
with self.cached_session():
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)
slice_t = a[2, k:k]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[2, k:k])
def testSlicingWithInt64Index(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
# Slice using int64 Tensor.
i = constant_op.constant(1, dtype=dtypes.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int64 integer.
i = np.asarray(1).astype(np.int64)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i+1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
a_int32 = constant_op.constant([0, 1, 2], dtype=dtypes.int32)
slice_t = array_ops.slice(a_int32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
a_float32 = constant_op.constant([0, 1, 2], dtype=dtypes.float32)
slice_t = array_ops.slice(a_float32,
np.asarray([1]).astype(np.int64),
np.asarray([2]).astype(np.int64))
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSlicingInt64Tensor(self):
with self.cached_session(force_gpu=test.is_gpu_available()):
a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)
# Slice using int32 Tensor.
i = constant_op.constant(1, dtype=dtypes.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
# Slice using int32 integer.
i = np.asarray(1).astype(np.int32)
slice_t = a[i]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(1, slice_val)
slice_t = a[i:i + 1]
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1], slice_val)
slice_t = array_ops.slice(a, [1], [2])
slice_val = self.evaluate(slice_t)
self.assertAllEqual([1, 2], slice_val)
def testSelectAll(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(4, 4, 4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)
slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])
slice_implicit_t = a[:, :, :, :]
self.assertAllEqual(inp, self.evaluate(slice_explicit_t))
self.assertAllEqual(inp, self.evaluate(slice_implicit_t))
self.assertEqual(inp.shape, slice_explicit_t.get_shape())
self.assertEqual(inp.shape, slice_implicit_t.get_shape())
def testSingleDimension(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(10).astype("f")
a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)
hi = np.random.randint(0, 9)
scalar_t = a[hi]
scalar_val = self.evaluate(scalar_t)
self.assertAllEqual(scalar_val, inp[hi])
if hi > 0:
lo = np.random.randint(0, hi)
else:
lo = 0
slice_t = a[lo:hi]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[lo:hi])
@test_util.run_without_tensor_float_32("Use FP32 in conv3d.")
def test3Dimension(self):
with self.cached_session():
input_shape = [8, 16, 16, 16, 8]
total_input_size = 1
for s in input_shape:
total_input_size *= s
inputs = [
i * 1.0 / total_input_size for i in range(1, total_input_size + 1)
]
a = constant_op.constant(inputs, shape=input_shape, dtype=dtypes.float32)
filter_shape = [1, 1, 1, 8, 8]
total_filter_size = 1
for s in filter_shape:
total_filter_size *= s
filters = [
i * 1.0 / total_filter_size for i in range(1, total_filter_size + 1)
]
f = constant_op.constant(
filters, shape=filter_shape, dtype=dtypes.float32)
conv_t = nn_ops.conv3d(
a, filter=f, strides=[1, 1, 1, 1, 1], padding="VALID")
slice_t = array_ops.slice(conv_t, [0, 1, 1, 1, 0], [1, 1, 1, 1, 8])
result = self.evaluate(slice_t)
expected = [
0.03028321, 0.03132677, 0.03237033, 0.03341389, 0.03445745, 0.035501,
0.03654456, 0.03758812
]
self.assertAllClose(expected, result.flatten(), rtol=1e-6)
def testScalarInput(self):
input_val = 0
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError),
"Attempting to slice scalar input."):
constant_op.constant(input_val)[:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[:]
return slice_t
with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
self.evaluate(func(input_val))
def testInvalidIndex(self):
input_val = [1, 2]
# Test with constant input; shape inference fails.
with self.assertRaisesWithPredicateMatch(
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
constant_op.constant(input_val)[1:, 1:].get_shape()
# Test evaluating with non-constant input; kernel execution fails.
@def_function.function
def func(input_t):
slice_t = input_t[1:, 1:]
return slice_t
with self.assertRaisesWithPredicateMatch(
TypeError, "must be integers or slices, not tuple"):
self.evaluate(func(input_val))
def _testSliceMatrixDim0(self, x, begin, size):
tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
np_ans = x[begin:begin + size, :]
self.assertAllEqual(tf_ans, np_ans)
def testSliceMatrixDim0(self):
x = np.random.rand(8, 4).astype("f")
self._testSliceMatrixDim0(x, 1, 2)
self._testSliceMatrixDim0(x, 3, 3)
y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned
self._testSliceMatrixDim0(y, 1, 2)
self._testSliceMatrixDim0(y, 3, 3)
def testSingleElementAll(self):
for _ in range(10):
with self.cached_session():
inp = np.random.rand(4, 4).astype("f")
a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)
x, y = np.random.randint(0, 3, size=2).tolist()
slice_t = a[x, 0:y]
slice_val = self.evaluate(slice_t)
self.assertAllEqual(slice_val, inp[x, 0:y])
def testSimple(self):
with test_util.use_gpu():
for dtype in [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.int32,
np.int64,
np.bool_,
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
dtypes.bfloat16.as_numpy_dtype,
dtypes.float8_e5m2.as_numpy_dtype,
dtypes.float8_e4m3fn.as_numpy_dtype,
]:
inp = np.random.rand(4, 4).astype(dtype)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 4],
dtype=dtypes.float32)
slice_t = array_ops.slice(a, [0, 0], [2, 2])
slice2_t = a[:2, :2]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
self.assertAllEqual(slice_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertAllEqual(slice2_val, np.array(inp[:2, :2], dtype=np.float32))
self.assertEqual(slice_val.shape, slice_t.get_shape())
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
def testComplex(self):
inp = np.random.rand(4, 10, 10, 4).astype("f")
a = constant_op.constant(inp, dtype=dtypes.float32)
x = np.random.randint(0, 9)
z = np.random.randint(0, 9)
if z > 0:
y = np.random.randint(0, z)
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6
input_shape = np.random.randint(0, 20, size=6)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
sizes = [
np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
]
slice_t = array_ops.slice(a, indices, sizes)
slice2_t = a[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
expected_val = inp[indices[0]:indices[0] + sizes[0],
indices[1]:indices[1] + sizes[1],
indices[2]:indices[2] + sizes[2],
indices[3]:indices[3] + sizes[3],
indices[4]:indices[4] + sizes[4],
indices[5]:indices[5] + sizes[5]]
self.assertAllEqual(slice_val, expected_val)
self.assertAllEqual(slice2_val, expected_val)
self.assertEqual(expected_val.shape, slice_t.get_shape())
self.assertEqual(expected_val.shape, slice2_t.get_shape())
def testPartialShapeInference(self):
z = array_ops.zeros((1, 2, 3))
self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])
m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])
self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
with self.cached_session():
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in range(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[tuple(slices)] = grads
self.assertAllClose(np_ans, result)
def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
with backprop.GradientTape() as tape:
num_inputs = np.prod(input_shape)
num_grads = np.prod(slice_size)
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
tape.watch(a)
slice_t = array_ops.slice(a, slice_begin, slice_size)
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
grad_tensor = constant_op.constant(grads)
grad = tape.gradient(slice_t, [a], grad_tensor)[0]
result = self.evaluate(grad)
# Create a zero tensor of the input shape ane place
# the grads into the right location to compare against TensorFlow.
np_ans = np.zeros(input_shape)
slices = []
for i in range(len(input_shape)):
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
np_ans[tuple(slices)] = grads
self.assertAllClose(np_ans, result)
def _testGradientVariableSize(self):
with self.cached_session():
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSizeTape(self):
with backprop.GradientTape() as tape:
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
tape.watch(inp)
out = array_ops.slice(inp, [1], [-1])
grad_actual = self.evaluate(tape.gradient(out, inp))
self.assertAllClose([0., 1., 1.], grad_actual)
def _testGradientVariableSize2D(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with self.cached_session():
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = gradients_impl.gradients(loss1, x)[0]
g2 = gradients_impl.gradients(loss2, x)[0]
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def _testGradientVariableSize2DTape(self):
# Regression test for bug in slice. A low-level bug in Eigen was causing
# incorrect results for negative indices in multi-dimensional tensors.
# See b/114318298.
with backprop.GradientTape(persistent=True) as tape:
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
tape.watch(x)
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
g1 = tape.gradient(loss1, x)
g2 = tape.gradient(loss2, x)
g1_val, g2_val = self.evaluate([g1, g2])
self.assertAllEqual(g1_val, g2_val)
def testGradientsAll(self):
with ops.Graph().as_default():
# Slice the middle square out of a 4x4 input
self._testGradientSlice([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSlice([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSlice([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSize()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2D()
def testGradientsAllTape(self):
# Slice the middle square out of a 4x4 input
self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
# Slice the upper left square out of a 4x4 input
self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
# Slice a non-square input starting from (2,1)
self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
# Slice a 3D tensor
self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
# Use -1 as a slice dimension.
self._testGradientVariableSizeTape()
# Use -1 as a slice dimension on a 2D tensor.
self._testGradientVariableSize2DTape()
def testNotIterable(self):
# Tensor iteration is disabled explicitly for only graph mode.
with ops.Graph().as_default():
# NOTE(mrry): If we register __getitem__ as an overloaded
# operator, Python will valiantly attempt to iterate over the
# Tensor from 0 to infinity. This test ensures that this
# unintended behavior is prevented.
c = constant_op.constant(5.0)
with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
"Iterating over a symbolic `tf.Tensor`"):
for _ in c:
pass
def testComputedShape(self):
# NOTE(mrry): We cannot currently handle partially-known values,
# because `tf.slice()` uses -1 to specify a wildcard size, and
# this can't be handled using the
# `tensor_util.constant_value_as_shape()` trick.
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = constant_op.constant(0)
size = constant_op.constant(1)
b = array_ops.slice(a, [begin, 0], [size, 2])
self.assertEqual([1, 2], b.get_shape())
# placeholders only make sense in a graph.
with ops.Graph().as_default():
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
begin = array_ops.placeholder(dtypes.int32, shape=())
c = array_ops.slice(a, [begin, 0], [-1, 2])
self.assertEqual([None, 2], c.get_shape().as_list())
def testSliceOfSlice(self):
with self.session():
a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
b = a[1:, :]
c = b[:-1, :]
d = c[1, :]
res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]
self.assertAllEqual([0, 0, 0], self.evaluate(res))
if __name__ == "__main__":
test.main()
| SliceTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec18.py | {
"start": 453,
"end": 751
} | class ____(Protocol[P]):
def __call__(self, x: int, /, *args: P.args, **kwargs: P.kwargs) -> int: ...
def func_with_protocol(cb: ClassWithConcatenate[P]) -> Callable[P, bool]: ...
x2 = func_with_protocol(callback)
reveal_type(x2, expected_text="(b: str, c: str) -> bool")
| ClassWithConcatenate |
python | apache__airflow | scripts/ci/prek/check_contextmanager_class_decorators.py | {
"start": 1159,
"end": 4830
} | class ____(ast.NodeVisitor):
"""AST visitor to check for context manager decorators on test classes."""
def __init__(self, filename: str):
self.filename = filename
self.errors: list[str] = []
def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""Check class definitions for problematic decorators."""
if not node.name.startswith("Test"):
self.generic_visit(node)
return
for decorator in node.decorator_list:
decorator_name = self._get_decorator_name(decorator)
if self._is_problematic_decorator(decorator_name):
self.errors.append(
f"{self.filename}:{node.lineno}: Class '{node.name}' uses @{decorator_name} "
f"decorator which prevents pytest collection. Use @pytest.mark.usefixtures instead."
)
self.generic_visit(node)
def _get_decorator_name(self, decorator: ast.expr) -> str:
"""Extract decorator name from AST node."""
if isinstance(decorator, ast.Name):
return decorator.id
if isinstance(decorator, ast.Call):
if isinstance(decorator.func, ast.Name):
return decorator.func.id
if isinstance(decorator.func, ast.Attribute):
return f"{self._get_attr_chain(decorator.func)}"
elif isinstance(decorator, ast.Attribute):
return f"{self._get_attr_chain(decorator)}"
return "unknown"
def _get_attr_chain(self, node: ast.Attribute) -> str:
"""Get the full attribute chain (e.g., 'contextlib.contextmanager')."""
if isinstance(node.value, ast.Name):
return f"{node.value.id}.{node.attr}"
if isinstance(node.value, ast.Attribute):
return f"{self._get_attr_chain(node.value)}.{node.attr}"
return node.attr
def _is_problematic_decorator(self, decorator_name: str) -> bool:
"""Check if decorator is known to break pytest class collection."""
problematic_decorators = {
"conf_vars",
"env_vars",
"contextlib.contextmanager",
"contextmanager",
}
return decorator_name in problematic_decorators
def check_file(filepath: Path) -> list[str]:
"""Check a single file for problematic decorators."""
try:
with open(filepath, encoding="utf-8") as f:
content = f.read()
tree = ast.parse(content, filename=str(filepath))
checker = ContextManagerClassDecoratorChecker(str(filepath))
checker.visit(tree)
return checker.errors
except Exception as e:
return [f"{filepath}: Error parsing file: {e}"]
def main() -> int:
"""Main entry point."""
if len(sys.argv) < 2:
print("Usage: check_contextmanager_class_decorators.py <file_or_directory>...")
return 1
all_errors = []
for arg in sys.argv[1:]:
path = Path(arg)
if path.is_file() and path.suffix == ".py":
if "test" in str(path): # Only check test files
all_errors.extend(check_file(path))
else:
print(f"Skipping non-test file: {path}")
elif path.is_dir():
for py_file in path.rglob("*.py"):
if "test" in str(py_file): # Only check test files
all_errors.extend(check_file(py_file))
if all_errors:
print("Found problematic context manager class decorators:")
for error in all_errors:
print(f" {error}")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
| ContextManagerClassDecoratorChecker |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_sync_versions.py | {
"start": 688,
"end": 35379
} | class ____(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.user = User.objects.get(username="eric")
self.client.force_login(self.user)
self.pip = Project.objects.get(slug="pip")
# Run tests for .com
if settings.ALLOW_PRIVATE_REPOS:
self.org = get(
Organization,
name="testorg",
)
OrganizationOwner.objects.create(
owner=self.user,
organization=self.org,
)
self.org.projects.add(self.pip)
Version.objects.create(
project=self.pip,
identifier="origin/master",
verbose_name="master",
active=True,
machine=True,
type=BRANCH,
)
Version.objects.create(
project=self.pip,
identifier="to_delete",
verbose_name="to_delete",
active=False,
type=TAG,
)
self.pip.update_stable_version()
self.pip.save()
def test_proper_url_no_slash(self):
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/to_add",
"verbose_name": "to_add",
},
]
self.assertEqual(
set(self.pip.versions.all().values_list("slug", flat=True)),
{"master", "latest", "stable", "0.8.1", "0.8", "to_delete"},
)
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
self.assertEqual(
set(self.pip.versions.all().values_list("slug", flat=True)),
{"master", "latest", "stable", "0.8.1", "0.8", "to_add"},
)
def test_new_tag_update_active(self):
Version.objects.create(
project=self.pip,
identifier="0.8.3",
verbose_name="0.8.3",
active=True,
)
self.pip.update_stable_version()
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/to_add",
"verbose_name": "to_add",
},
]
tags_data = [
{
"identifier": "0.9",
"verbose_name": "0.9",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
version_9 = Version.objects.get(slug="0.9")
self.assertTrue(version_9.active)
# Version 0.9 becomes the stable version
self.assertEqual(
version_9.identifier,
self.pip.get_stable_version().identifier,
)
def test_new_tag_dont_update_inactive(self):
Version.objects.create(
project=self.pip,
identifier="0.8.3",
verbose_name="0.8.3",
type=TAG,
active=False,
)
self.pip.update_stable_version()
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/to_add",
"verbose_name": "to_add",
},
]
tags_data = [
{
"identifier": "0.9",
"verbose_name": "0.9",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# Version 0.9 becomes the stable version, but it's inactive
version_9 = self.pip.versions.get(slug="0.9")
self.assertEqual(
version_9.identifier,
self.pip.get_stable_version().identifier,
)
self.assertFalse(version_9.active)
# Version 0.8.3 is still inactive
version_8 = Version.objects.get(slug="0.8.3")
self.assertFalse(version_8.active)
def test_delete_version(self):
Version.objects.create(
project=self.pip,
identifier="0.8.3",
verbose_name="0.8.3",
active=False,
)
Version.objects.create(
project=self.pip,
identifier="external",
verbose_name="external",
type=EXTERNAL,
active=False,
)
self.pip.update_stable_version()
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
self.assertTrue(
Version.objects.filter(slug="0.8.3").exists(),
)
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# There isn't a v0.8.3
self.assertFalse(
Version.objects.filter(slug="0.8.3").exists(),
)
# The inactive external version isn't deleted
self.assertTrue(
Version.objects.filter(slug="external").exists(),
)
def test_update_stable_version_type(self):
self.pip.update_stable_version()
stable_version = self.pip.get_stable_version()
self.assertEqual(stable_version.type, TAG)
branches_data = [
{
"identifier": "master",
"verbose_name": "master",
},
{
"identifier": "1.0",
"verbose_name": "1.0",
},
{
"identifier": "1.1",
"verbose_name": "1.1",
},
{
"identifier": "2.0",
"verbose_name": "2.0",
},
]
# Deactivate all other versions, so we only have branches for consideration
# for the new stable version.
self.pip.versions.exclude(slug__in=[LATEST, STABLE]).update(active=False)
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
self.pip.update_stable_version()
stable_version = self.pip.get_stable_version()
self.assertEqual(stable_version.type, BRANCH)
self.assertEqual(stable_version.identifier, "2.0")
self.assertEqual(stable_version.verbose_name, "stable")
original_stable = self.pip.get_original_stable_version()
self.assertEqual(original_stable.type, BRANCH)
self.assertEqual(original_stable.slug, "2.0")
self.assertEqual(original_stable.identifier, "2.0")
self.assertEqual(original_stable.verbose_name, "2.0")
def test_update_latest_version_type(self):
latest_version = self.pip.versions.get(slug=LATEST)
self.assertEqual(latest_version.type, BRANCH)
branches_data = [
{
"identifier": "master",
"verbose_name": "master",
},
]
tags_data = [
{
"identifier": "abc123",
"verbose_name": "latest",
}
]
# Latest is created as machine=False, and as a tag.
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
latest_version = self.pip.versions.get(slug=LATEST)
self.assertEqual(latest_version.type, TAG)
self.assertEqual(latest_version.identifier, "abc123")
self.assertEqual(latest_version.verbose_name, "latest")
self.assertEqual(latest_version.machine, False)
# Latest is back as machine created, and as a branch.
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
latest_version = self.pip.versions.get(slug=LATEST)
self.assertIsNone(self.pip.default_branch)
self.assertEqual(latest_version.type, BRANCH)
self.assertEqual(latest_version.identifier, "master")
self.assertEqual(latest_version.verbose_name, "latest")
self.assertEqual(latest_version.machine, True)
# Latest points to the default branch/tag.
self.pip.default_branch = "2.6"
self.pip.save()
sync_versions_task(
self.pip.pk,
branches_data=[
{
"identifier": "master",
"verbose_name": "master",
},
{
"identifier": "2.6",
"verbose_name": "2.6",
},
],
tags_data=[],
)
latest_version = self.pip.versions.get(slug=LATEST)
self.assertEqual(latest_version.type, BRANCH)
self.assertEqual(latest_version.identifier, "2.6")
self.assertEqual(latest_version.verbose_name, "latest")
self.assertEqual(latest_version.machine, True)
sync_versions_task(
self.pip.pk,
branches_data=[
{
"identifier": "master",
"verbose_name": "master",
},
],
tags_data=[
{
"identifier": "abc123",
"verbose_name": "2.6",
}
],
)
latest_version = self.pip.versions.get(slug=LATEST)
self.assertEqual(latest_version.type, TAG)
self.assertEqual(latest_version.identifier, "2.6")
self.assertEqual(latest_version.verbose_name, "latest")
self.assertEqual(latest_version.machine, True)
def test_machine_attr_when_user_define_stable_tag_and_delete_it(self):
"""
The user creates a tag named ``stable`` on an existing repo,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the tag is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
version8 = Version.objects.create(
project=self.pip,
identifier="0.8.3",
verbose_name="0.8.3",
type=TAG,
active=False,
machine=False,
)
self.pip.update_stable_version()
current_stable = self.pip.get_stable_version()
# 0.8.3 is the current stable
self.assertEqual(
version8.identifier,
current_stable.identifier,
)
self.assertTrue(current_stable.machine)
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
# User new stable
{
"identifier": "1abc2def3",
"verbose_name": "stable",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
current_stable = self.pip.get_stable_version()
self.assertEqual(
"1abc2def3",
current_stable.identifier,
)
# Deleting the tag should return the RTD's stable
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous commit
current_stable = self.pip.get_stable_version()
self.assertEqual(
"0.8.3",
current_stable.identifier,
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_stable_tag_and_delete_it_new_project(self):
"""
The user imports a new project with a tag named ``stable``,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the tag is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
# There isn't a stable version yet
self.pip.versions.exclude(slug="master").delete()
current_stable = self.pip.get_stable_version()
self.assertIsNone(current_stable)
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
# User stable
{
"identifier": "1abc2def3",
"verbose_name": "stable",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
current_stable = self.pip.get_stable_version()
self.assertEqual(
"1abc2def3",
current_stable.identifier,
)
# User activates the stable version
current_stable.active = True
current_stable.save()
# Deleting the tag should return the RTD's stable
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous commit
current_stable = self.pip.get_stable_version()
self.assertEqual(
"0.8.3",
current_stable.identifier,
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_stable_branch_and_delete_it(self):
"""
The user creates a branch named ``stable`` on an existing repo,
when syncing the versions, the RTD's ``stable`` is lost
(set to machine=False) and doesn't update automatically anymore,
when the branch is deleted on the user repository, the RTD's ``stable``
is back (set to machine=True).
"""
# Project with just branches
self.pip.versions.filter(type=TAG).delete()
Version.objects.create(
project=self.pip,
identifier="0.8.3",
verbose_name="0.8.3",
type=BRANCH,
active=False,
machine=False,
)
self.pip.update_stable_version()
current_stable = self.pip.get_stable_version()
# 0.8.3 is the current stable
self.assertEqual(
"0.8.3",
current_stable.identifier,
)
self.assertTrue(current_stable.machine)
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
# User new stable
{
"identifier": "origin/stable",
"verbose_name": "stable",
},
{
"identifier": "origin/0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
current_stable = self.pip.get_stable_version()
self.assertEqual(
"origin/stable",
current_stable.identifier,
)
# Deleting the branch should return the RTD's stable
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous branch
current_stable = self.pip.get_stable_version()
self.assertEqual(
"origin/0.8.3",
current_stable.identifier,
)
self.assertTrue(current_stable.machine)
def test_machine_attr_when_user_define_stable_branch_and_delete_it_new_project(
self,
):
"""The user imports a new project with a branch named ``stable``, when
syncing the versions, the RTD's ``stable`` is lost (set to
machine=False) and doesn't update automatically anymore, when the branch
is deleted on the user repository, the RTD's ``stable`` is back (set to
machine=True)."""
# There isn't a stable version yet
self.pip.versions.exclude(slug="master").delete()
current_stable = self.pip.get_stable_version()
self.assertIsNone(current_stable)
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
# User stable
{
"identifier": "origin/stable",
"verbose_name": "stable",
},
{
"identifier": "origin/0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
current_stable = self.pip.get_stable_version()
self.assertEqual(
"origin/stable",
current_stable.identifier,
)
# User activates the stable version
current_stable.active = True
current_stable.save()
# Deleting the branch should return the RTD's stable
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# The version 8 should be the new stable.
# The stable isn't stuck with the previous commit
current_stable = self.pip.get_stable_version()
self.assertEqual(
"origin/0.8.3",
current_stable.identifier,
)
self.assertTrue(current_stable.machine)
def test_restore_machine_stable_verbose_name(self):
"""
The user imports a new project with a branch named ``Stable``, when
syncing the versions, the RTD's ``stable`` is lost (set to machine=False)
and doesn't update automatically anymore, when the branch
is deleted on the user repository, the RTD's ``stable`` is back
(set to machine=True, and with the correct name in lowercase).
"""
self.pip.versions.exclude(slug="master").delete()
current_stable = self.pip.get_stable_version()
assert current_stable is None
custom_stable = get(
Version,
project=self.pip,
identifier="Stable",
verbose_name="Stable",
slug="stable",
type=BRANCH,
machine=False,
active=True,
)
self.pip.update_stable_version()
assert self.pip.get_stable_version() == custom_stable
branches_data = [
{
"identifier": "master",
"verbose_name": "master",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# RTD stable is restored correctly.
current_stable = self.pip.get_stable_version()
assert current_stable.identifier == "0.8.3"
assert current_stable.verbose_name == "stable"
assert current_stable.machine
def test_machine_attr_when_user_define_latest_tag_and_delete_it(self):
"""The user creates a tag named ``latest`` on an existing repo, when
syncing the versions, the RTD's ``latest`` is lost (set to
machine=False) and doesn't update automatically anymore, when the tag is
deleted on the user repository, the RTD's ``latest`` is back (set to
machine=True)."""
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
# User new stable
{
"identifier": "1abc2def3",
"verbose_name": "latest",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# The tag is the new latest
version_latest = self.pip.versions.get(slug="latest")
self.assertEqual(
"1abc2def3",
version_latest.identifier,
)
# Deleting the tag should return the RTD's latest
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# The latest isn't stuck with the previous commit
version_latest = self.pip.versions.get(slug="latest")
self.assertIsNone(self.pip.default_branch)
self.assertTrue(version_latest.machine)
self.assertEqual(
"master",
version_latest.identifier,
)
self.assertTrue(version_latest.machine)
# Test with an explicit default branch (tag).
self.pip.default_branch = "default-tag"
self.pip.save()
tags_data = [
{
"identifier": "1abc2def3",
"verbose_name": "default-tag",
}
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
version_latest = self.pip.versions.get(slug="latest")
self.assertTrue(version_latest.machine)
self.assertEqual(version_latest.identifier, "default-tag")
self.assertEqual(version_latest.type, TAG)
def test_machine_attr_when_user_define_latest_branch_and_delete_it(self):
"""The user creates a branch named ``latest`` on an existing repo, when
syncing the versions, the RTD's ``latest`` is lost (set to
machine=False) and doesn't update automatically anymore, when the branch
is deleted on the user repository, the RTD's ``latest`` is back (set to
machine=True).
"""
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
# User new latest
{
"identifier": "origin/latest",
"verbose_name": "latest",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# The branch is the new latest
version_latest = self.pip.versions.get(slug="latest")
self.assertIsNone(self.pip.default_branch)
self.assertFalse(version_latest.machine)
self.assertEqual(
"origin/latest",
version_latest.identifier,
)
# Deleting the branch should return the RTD's latest
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
# The latest isn't stuck with the previous branch
version_latest = self.pip.versions.get(slug="latest")
self.assertIsNone(self.pip.default_branch)
self.assertTrue(version_latest.machine)
self.assertEqual(
"master",
version_latest.identifier,
)
self.assertTrue(version_latest.machine)
# Test with an explicit default branch.
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/default-branch",
"verbose_name": "default-branch",
},
]
self.pip.default_branch = "default-branch"
self.pip.save()
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=[],
)
version_latest = self.pip.versions.get(slug="latest")
self.assertTrue(version_latest.machine)
self.assertEqual(version_latest.identifier, "default-branch")
self.assertEqual(version_latest.type, BRANCH)
def test_deletes_version_with_same_identifier(self):
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
{
"identifier": "1234",
"verbose_name": "one",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# We only have one version with an identifier `1234`
self.assertEqual(
self.pip.versions.filter(identifier="1234").count(),
1,
)
# We add a new tag with the same identifier
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
{
"identifier": "1234",
"verbose_name": "two",
},
{
"identifier": "1234",
"verbose_name": "one",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# We have two versions with an identifier `1234`
self.assertEqual(
self.pip.versions.filter(identifier="1234").count(),
2,
)
# We delete one version with identifier `1234`
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
]
tags_data = [
{
"identifier": "1234",
"verbose_name": "one",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
# We have only one version with an identifier `1234`
self.assertEqual(
self.pip.versions.filter(identifier="1234").count(),
1,
)
def test_versions_with_same_verbose_name(self):
get(
Version,
project=self.pip,
identifier="v2",
verbose_name="v2",
active=True,
type=BRANCH,
)
get(
Version,
project=self.pip,
identifier="1234abc",
verbose_name="v2",
active=True,
type=TAG,
)
branches_data = [
{
"identifier": "v2",
"verbose_name": "v2",
},
]
tags_data = [
{
# THe identifier has changed!
"identifier": "12345abc",
"verbose_name": "v2",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
self.assertEqual(
self.pip.versions.filter(
verbose_name="v2", identifier="v2", type=BRANCH
).count(),
1,
)
self.assertEqual(
self.pip.versions.filter(
verbose_name="v2", identifier="12345abc", type=TAG
).count(),
1,
)
@mock.patch("readthedocs.builds.tasks.run_automation_rules")
def test_automation_rules_are_triggered_for_new_versions(
self, run_automation_rules
):
Version.objects.create(
project=self.pip,
identifier="0.8.3",
verbose_name="0.8.3",
active=True,
type=TAG,
)
branches_data = [
{
"identifier": "origin/master",
"verbose_name": "master",
},
{
"identifier": "origin/new_branch",
"verbose_name": "new_branch",
},
]
tags_data = [
{
"identifier": "new_tag",
"verbose_name": "new_tag",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
sync_versions_task(
self.pip.pk,
branches_data=branches_data,
tags_data=tags_data,
)
run_automation_rules.assert_called_with(
self.pip,
{"new_branch", "new_tag"},
{"0.8", "0.8.1"},
)
@mock.patch("readthedocs.builds.automation_actions.trigger_build", mock.MagicMock())
def test_automation_rule_activate_version(self):
tags_data = [
{
"identifier": "new_tag",
"verbose_name": "new_tag",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
RegexAutomationRule.objects.create(
project=self.pip,
priority=0,
match_arg=r"^new_tag$",
action=VersionAutomationRule.ACTIVATE_VERSION_ACTION,
version_type=TAG,
)
self.assertFalse(self.pip.versions.filter(verbose_name="new_tag").exists())
sync_versions_task(
self.pip.pk,
branches_data=[],
tags_data=tags_data,
)
new_tag = self.pip.versions.get(verbose_name="new_tag")
self.assertTrue(new_tag.active)
@mock.patch("readthedocs.builds.automation_actions.trigger_build", mock.MagicMock())
def test_automation_rule_set_default_version(self):
tags_data = [
{
"identifier": "new_tag",
"verbose_name": "new_tag",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
RegexAutomationRule.objects.create(
project=self.pip,
priority=0,
match_arg=r"^new_tag$",
action=VersionAutomationRule.SET_DEFAULT_VERSION_ACTION,
version_type=TAG,
)
self.assertEqual(self.pip.get_default_version(), LATEST)
sync_versions_task(
self.pip.pk,
branches_data=[],
tags_data=tags_data,
)
self.pip.refresh_from_db()
self.assertEqual(self.pip.get_default_version(), "new_tag")
def test_automation_rule_delete_version(self):
tags_data = [
{
"identifier": "new_tag",
"verbose_name": "new_tag",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
version_slug = "0.8"
RegexAutomationRule.objects.create(
project=self.pip,
priority=0,
match_arg=r"^0\.8$",
action=VersionAutomationRule.DELETE_VERSION_ACTION,
version_type=TAG,
)
version = self.pip.versions.get(slug=version_slug)
self.assertTrue(version.active)
sync_versions_task(
self.pip.pk,
branches_data=[],
tags_data=tags_data,
)
self.assertFalse(self.pip.versions.filter(slug=version_slug).exists())
def test_automation_rule_dont_delete_default_version(self):
tags_data = [
{
"identifier": "new_tag",
"verbose_name": "new_tag",
},
{
"identifier": "0.8.3",
"verbose_name": "0.8.3",
},
]
version_slug = "0.8"
RegexAutomationRule.objects.create(
project=self.pip,
priority=0,
match_arg=r"^0\.8$",
action=VersionAutomationRule.DELETE_VERSION_ACTION,
version_type=TAG,
)
version = self.pip.versions.get(slug=version_slug)
self.assertTrue(version.active)
self.pip.default_version = version_slug
self.pip.save()
sync_versions_task(
self.pip.pk,
branches_data=[],
tags_data=tags_data,
)
self.assertTrue(self.pip.versions.filter(slug=version_slug).exists())
@mock.patch("readthedocs.core.utils.trigger_build", mock.MagicMock())
@mock.patch("readthedocs.builds.tasks.trigger_build", mock.MagicMock())
| TestSyncVersions |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 31403,
"end": 32202
} | class ____(PrefectBaseModel, OperatorMixin):
"""Filter artifacts. Only artifacts matching all criteria will be returned"""
id: Optional[ArtifactFilterId] = Field(
default=None, description="Filter criteria for `Artifact.id`"
)
key: Optional[ArtifactFilterKey] = Field(
default=None, description="Filter criteria for `Artifact.key`"
)
flow_run_id: Optional[ArtifactFilterFlowRunId] = Field(
default=None, description="Filter criteria for `Artifact.flow_run_id`"
)
task_run_id: Optional[ArtifactFilterTaskRunId] = Field(
default=None, description="Filter criteria for `Artifact.task_run_id`"
)
type: Optional[ArtifactFilterType] = Field(
default=None, description="Filter criteria for `Artifact.type`"
)
| ArtifactFilter |
python | justquick__django-activity-stream | actstream/admin.py | {
"start": 245,
"end": 559
} | class ____(ModelAdmin):
date_hierarchy = 'timestamp'
list_display = ('__str__', 'actor', 'verb', 'target', 'public')
list_editable = ('verb',)
list_filter = ('timestamp',)
raw_id_fields = ('actor_content_type', 'target_content_type',
'action_object_content_type')
| ActionAdmin |
python | ray-project__ray | python/ray/train/_internal/state/schema.py | {
"start": 2285,
"end": 2485
} | class ____(BaseModel):
uuid: str
index: int
name: str
utilizationGpu: Optional[float]
memoryUsed: float
memoryTotal: float
processInfo: ProcessGPUUsage
@DeveloperAPI
| GPUStats |
python | tensorflow__tensorflow | tensorflow/lite/python/lite.py | {
"start": 12753,
"end": 25895
} | class ____:
"""QuantizationMode determines the quantization type from user options."""
def __init__(
self,
optimizations,
target_spec,
representative_dataset,
graph_def,
disable_per_channel=False,
experimental_new_dynamic_range_quantizer=False,
experimental_low_bit_qat=False,
full_integer_quantization_bias_type=None,
experimental_mlir_variable_quantization=False,
experimental_qdq_annotation=False,
):
self._optimizations = optimizations
for deprecated_optimization in [
Optimize.OPTIMIZE_FOR_SIZE,
Optimize.OPTIMIZE_FOR_LATENCY,
]:
if deprecated_optimization in self._optimizations:
logging.warning(
(
"Optimization option %s is deprecated, please use"
" optimizations=[Optimize.DEFAULT] instead."
),
deprecated_optimization,
)
self._experimental_qdq_annotation = experimental_qdq_annotation
self._target_spec = target_spec
self._representative_dataset = representative_dataset
self._graph_def = graph_def
if self._is_int8_target_required():
self._validate_int8_required()
self.enable_mlir_variable_quantization = (
experimental_mlir_variable_quantization
)
if self._is_float16_target_required():
self._validate_float16_required()
self._disable_per_channel = disable_per_channel
self._enable_new_dynamic_range_quantizer = (
experimental_new_dynamic_range_quantizer
)
# Allow training with lower than 8 bit weights to be converted
# to constants with trained scale.
self._experimental_low_bit_qat = experimental_low_bit_qat
self._full_integer_quantization_bias_type = (
full_integer_quantization_bias_type
)
self._validate_full_integer_quantization_bias_type()
def is_post_training_int8_only_quantization(self):
return (
self.is_any_optimization_enabled()
and self._representative_dataset is not None
and not self._is_int16x8_target_required()
and not self.is_allow_float()
and self._is_int8_target_required()
)
def is_post_training_int8_quantization_with_float_fallback(self):
return (
self.is_any_optimization_enabled()
and self._representative_dataset is not None
and not self._is_int16x8_target_required()
and self.is_allow_float()
and self._smallest_supported_type() == _dtypes.int8
)
def is_post_training_int8_quantization(self):
return (
self.is_post_training_int8_only_quantization()
or self.is_post_training_int8_quantization_with_float_fallback()
)
def is_post_training_int16x8_only_quantization(self):
return (
self.is_any_optimization_enabled()
and self._representative_dataset is not None
and self._is_int16x8_target_required()
and not self.is_allow_float()
)
def is_post_training_int16x8_quantization_with_float_fallback(self):
return (
self.is_any_optimization_enabled()
and self._representative_dataset is not None
and self._is_int16x8_target_required()
and self.is_allow_float()
)
def is_post_training_int16x8_quantization(self):
return (
self.is_post_training_int16x8_only_quantization()
or self.is_post_training_int16x8_quantization_with_float_fallback()
)
def is_post_training_integer_quantization(self):
return (
self.is_post_training_int8_quantization()
or self.is_post_training_int16x8_quantization()
)
def is_low_bit_quantize_aware_training(self):
return (
self.is_any_optimization_enabled()
and self.is_quantization_aware_trained_model()
and self._experimental_low_bit_qat
)
def is_quantization_aware_training(self):
if self._experimental_qdq_annotation:
return True
return (
self.is_any_optimization_enabled()
and self.is_quantization_aware_trained_model()
and not self.is_low_bit_quantize_aware_training()
)
def is_integer_quantization(self):
return (
self.is_post_training_integer_quantization()
or self.is_quantization_aware_training()
or self.is_low_bit_quantize_aware_training()
)
def is_post_training_dynamic_range_quantization(self):
# Post-training dynamic range quantization is only enabled if post-training
# int8 quantization and training time quantization was not done.
return (
self.is_any_optimization_enabled()
and self._representative_dataset is None
and not self.is_quantization_aware_trained_model()
and self._smallest_supported_type() == _dtypes.int8
)
def is_post_training_float16_quantization(self):
return (
self.is_any_optimization_enabled()
and self._smallest_supported_type().size == 2
and _dtypes.float16 in self._target_spec.supported_types
)
def is_bfloat16_quantization(self):
return (
self.is_any_optimization_enabled()
and self._smallest_supported_type().size == 2
and _dtypes.bfloat16 in self._target_spec.supported_types
)
def activations_type(self):
if self.is_integer_quantization():
if self._is_int16x8_target_required():
return _dtypes.int16
else:
return _dtypes.int8
else:
return _dtypes.float32
def bias_type(self):
if self._full_integer_quantization_bias_type:
return self._full_integer_quantization_bias_type
if self.activations_type() == _dtypes.int16:
return _dtypes.int64
elif self.activations_type() == _dtypes.int8:
return _dtypes.int32
else:
return _dtypes.float32
def converter_flags(self, inference_ty=None, inference_input_ty=None):
"""Flags to the converter."""
if self.is_integer_quantization():
is_low_bit_qat = self.is_low_bit_quantize_aware_training()
return {
"inference_type": (
inference_ty
if inference_ty is not None
else self.activations_type()
),
"inference_input_type": _dtypes.float32,
"post_training_quantize": False, # disable dynamic range quantization
"quantize_to_float16": False, # disable float16 quantization
"disable_infer_tensor_range": is_low_bit_qat,
"use_fake_quant_num_bits": is_low_bit_qat,
"enable_mlir_variable_quantization": (
self.enable_mlir_variable_quantization
),
}
elif self.is_post_training_dynamic_range_quantization():
return {
"inference_type": _dtypes.float32,
"inference_input_type": _dtypes.float32,
"post_training_quantize": True, # enable dynamic range quantization
"quantize_to_float16": False, # disable float16 quantization
# experimental: disable per-channel (per-axis) quantization.
"disable_per_channel_quantization": self._disable_per_channel,
"enable_mlir_dynamic_range_quantizer": (
self._enable_new_dynamic_range_quantizer
),
"enable_mlir_variable_quantization": (
self.enable_mlir_variable_quantization
),
}
elif self.is_post_training_float16_quantization():
return {
"inference_type": _dtypes.float32,
"inference_input_type": _dtypes.float32,
"post_training_quantize": True,
"quantize_to_float16": True, # enable float16 quantization
# pylint: disable=protected-access
"accumulation_type": (
self._target_spec._experimental_supported_accumulation_type
),
# pylint: enable=protected-access
"allow_bfloat16": self.is_bfloat16_quantization(),
"enable_mlir_dynamic_range_quantizer": (
self._enable_new_dynamic_range_quantizer
),
"enable_mlir_variable_quantization": (
self.enable_mlir_variable_quantization
),
}
else:
# Note this might still trigger (uint8) quantization to be compatible with
# the old converter.
return {
"inference_type": (
inference_ty if inference_ty is not None else _dtypes.float32
),
"inference_input_type": inference_input_ty,
"post_training_quantize": False, # enable dynamic range quantization
"quantize_to_float16": False, # disable float16 quantization
"allow_bfloat16": self.is_bfloat16_quantization(),
}
# Below are helpers for the above functions.
def _validate_int8_required(self):
"""Int8 mode requires certain parameters to exist and be compatible."""
# Validate target_spec attibute.
if set(self._target_spec.supported_ops) == {
OpsSet.TFLITE_BUILTINS_INT8
} and not (
set(self._target_spec.supported_types) == set()
or set(self._target_spec.supported_types) == {_dtypes.int8}
):
raise ValueError(
"As full integer quantization has been enabled by setting "
"`target_spec.supported_ops`={tf.lite.OpsSet.TFLITE_BUILTINS_INT8}, "
"thus `target_spec.supported_types` should be left uninitizalized "
"or set to {tf.int8}."
)
if set(self._target_spec.supported_types) == {_dtypes.int8}:
self._target_spec.supported_ops = {OpsSet.TFLITE_BUILTINS_INT8}
# Check if representative_dataset is specified.
if (
not self._representative_dataset
and not self.is_quantization_aware_training()
):
raise ValueError(
"For full integer quantization, a "
"`representative_dataset` must be specified."
)
# Update represenative dataset to the expected format.
if self._representative_dataset:
if not isinstance(self._representative_dataset, RepresentativeDataset):
self._representative_dataset = RepresentativeDataset(
self._representative_dataset
)
def _validate_float16_required(self):
"""Float16 mode requires certain parameters to exist and be compatible."""
if self.enable_mlir_variable_quantization:
raise ValueError(
"`_experimental_variable_quantization` is only supported for full"
" integer quantization."
)
def _validate_full_integer_quantization_bias_type(self):
"""Validates bias type for full interger quantization."""
bias_type = self._full_integer_quantization_bias_type
if not bias_type:
return
if self.activations_type() == _dtypes.float32:
raise ValueError(
"`full_integer_quantization_bias_type` is only supported for full"
" integer quantization."
)
if self.activations_type() == _dtypes.int8 and bias_type != _dtypes.int32:
raise ValueError(
"Expected bias type to be `dtypes.int32` for Int8Quant. "
f"Current setting bias type: {bias_type}"
)
if (
self.activations_type() == _dtypes.int16
and bias_type != _dtypes.int32
and bias_type != _dtypes.int64
):
raise ValueError(
"Expected bias type to be `dtypes.int32` or `dtypes.int64` for "
f"Int16Quant. Current setting bias type: {bias_type}"
)
def _is_int8_target_required(self):
return (
OpsSet.TFLITE_BUILTINS_INT8 in set(self._target_spec.supported_ops)
) or (set(self._target_spec.supported_types) == set([_dtypes.int8]))
def _is_int16x8_target_required(self):
return (
OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
in set(self._target_spec.supported_ops)
)
def is_allow_float(self):
return (OpsSet.TFLITE_BUILTINS in set(self._target_spec.supported_ops)) or (
OpsSet.SELECT_TF_OPS in set(self._target_spec.supported_ops)
)
def _is_float16_target_required(self):
return _dtypes.float16 in self._target_spec.supported_types
def is_any_optimization_enabled(self):
return bool(
set(self._optimizations).intersection([
Optimize.OPTIMIZE_FOR_LATENCY,
Optimize.OPTIMIZE_FOR_SIZE,
Optimize.DEFAULT,
])
)
def _smallest_supported_type(self):
if self._target_spec.supported_types:
return min(self._target_spec.supported_types, key=lambda x: x.size)
else:
# The default smallest supported type is INT8.
return _dtypes.int8
def is_quantization_aware_trained_model(self):
"""Checks if the graph contains any training-time quantization ops."""
training_quant_ops = frozenset({
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxVarsPerChannel",
"FakeQuantWithMinMaxArgs",
"QuantizeAndDequantizeV2",
"QuantizeAndDequantizeV3",
})
if self._graph_def:
for node_def in self._graph_def.node:
if node_def.op in training_quant_ops:
return True
for function in self._graph_def.library.function:
for node_def in function.node_def:
if node_def.op in training_quant_ops:
return True
return False
| QuantizationMode |
python | doocs__leetcode | solution/3000-3099/3062.Winner of the Linked List Game/Solution.py | {
"start": 151,
"end": 527
} | class ____:
def gameResult(self, head: Optional[ListNode]) -> str:
odd = even = 0
while head:
a = head.val
b = head.next.val
odd += a < b
even += a > b
head = head.next.next
if odd > even:
return "Odd"
if odd < even:
return "Even"
return "Tie"
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 108154,
"end": 108761
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("organization_id", "actor", "actor_type", "client_mutation_id")
organization_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="organizationId"
)
actor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="actor")
actor_type = sgqlc.types.Field(
sgqlc.types.non_null(ActorType), graphql_name="actorType"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| GrantMigratorRoleInput |
python | pytorch__pytorch | test/profiler/test_profiler.py | {
"start": 95936,
"end": 96514
} | class ____:
_name: str
_start_us: int
_duration_us: int
_linked_correlation_id: int
_device_type: int
@property
def name(self) -> str:
return self._name
def start_ns(self) -> int:
return self._start_us * 1000
def duration_ns(self) -> int:
return self._duration_us * 1000
def linked_correlation_id(self) -> int:
return self._linked_correlation_id
def device_type(self) -> DeviceType:
return DeviceType.CUDA if self._device_type == 1 else DeviceType.CPU
@dataclass(frozen=True)
| MockKinetoEvent |
python | getsentry__sentry | tests/sentry/incidents/endpoints/serializers/test_alert_rule_trigger.py | {
"start": 925,
"end": 2340
} | class ____(BaseAlertRuleTriggerSerializerTest, TestCase):
def test_simple(self) -> None:
alert_rule = self.create_alert_rule(resolve_threshold=200)
trigger = create_alert_rule_trigger(alert_rule, "hi", 1000)
result = serialize(trigger)
self.assert_alert_rule_trigger_serialized(trigger, result)
def test_decimal(self) -> None:
alert_rule = self.create_alert_rule(resolve_threshold=200.70)
trigger = create_alert_rule_trigger(alert_rule, "hi", 1000.50)
result = serialize(trigger)
self.assert_alert_rule_trigger_serialized(trigger, result)
def test_comparison_above(self) -> None:
alert_rule = self.create_alert_rule(
comparison_delta=60, detection_type=AlertRuleDetectionType.PERCENT
)
trigger = create_alert_rule_trigger(alert_rule, "hi", 180)
result = serialize(trigger)
self.assert_alert_rule_trigger_serialized(trigger, result, 80)
def test_comparison_below(self) -> None:
alert_rule = self.create_alert_rule(
comparison_delta=60,
threshold_type=AlertRuleThresholdType.BELOW,
detection_type=AlertRuleDetectionType.PERCENT,
)
trigger = create_alert_rule_trigger(alert_rule, "hi", 80)
result = serialize(trigger)
self.assert_alert_rule_trigger_serialized(trigger, result, 20)
| AlertRuleTriggerSerializerTest |
python | tensorflow__tensorflow | tensorflow/python/tools/api/generator2/generator/generator.py | {
"start": 6121,
"end": 25293
} | class ____:
v1_entrypoints_by_module: Mapping[str, set[_Entrypoint]]
v2_entrypoints_by_module: Mapping[str, set[_Entrypoint]]
v1_generated_imports_by_module: Mapping[str, set[str]]
v2_generated_imports_by_module: Mapping[str, set[str]]
docs_by_module: Mapping[str, str]
def get_module(dir_path: str, relative_to_dir: str) -> str:
"""Get module that corresponds to path relative to relative_to_dir.
Args:
dir_path: Path to directory.
relative_to_dir: Get module relative to this directory.
Returns:
Name of module that corresponds to the given directory.
"""
dir_path = dir_path[len(relative_to_dir) :]
# Convert path separators to '/' for easier parsing below.
dir_path = dir_path.replace(os.sep, '/')
return dir_path.replace('/', '.').strip('.')
def generate_proxy_api_files(
output_files: list[str], proxy_module_root: str, output_dir: str
):
"""Creates __init__.py files in proxy format for the Python API.
Args:
output_files: List of __init__.py file paths to create.
proxy_module_root: Module root for proxy-import format. If specified, proxy
files with content like `from proxy_module_root.proxy_module import *`
will be created to enable import resolution under TensorFlow.
output_dir: output API root directory.
"""
for file in output_files:
file_dir = os.path.dirname(file)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
module = get_module(file_dir, output_dir)
content = f'from {proxy_module_root}.{module} import *'
with open(file, 'w') as f:
f.write(content)
def _should_skip_file(
file: str,
file_prefixes_to_strip: Sequence[str],
packages_to_ignore: Sequence[str],
module_prefix: str,
) -> bool:
import_path = _get_import_path(file, file_prefixes_to_strip, module_prefix)
return any(import_path.startswith(package) for package in packages_to_ignore)
def get_public_api(
api_mapping_files: Sequence[str],
file_prefixes_to_strip: Sequence[str],
packages_to_ignore: Sequence[str],
output_package: str,
module_prefix: str,
) -> PublicAPI:
"""Generates the structure of the public API from the given files.
Args:
api_mapping_files: List of files containing the exported API mappings and
docstrings.
file_prefixes_to_strip: A list of prefixes to strip from files when
determining the packages to ignore.
packages_to_ignore: A list of python packages that should be ignored when
searching for tf_exports.
output_package: The package to use for the imports.
module_prefix: A prefix to add to the non-generated imports.
Raises:
DocExportedTwiceError: Two docstrings are registered for the same module.
Returns:
The public API structure.
"""
ea = exported_api.ExportedApi()
for f in api_mapping_files:
ea.read(f)
v1_entrypoints_by_module = collections.defaultdict(set)
v2_entrypoints_by_module = collections.defaultdict(set)
def add_exported_symbols(
api_names: list[str],
s: exported_api.ExportedSymbol,
entrypoints_by_module: Mapping[str, set[_Entrypoint]],
):
for api_name in api_names:
index_of_last_dot = api_name.rfind('.')
index_of_first_dot = api_name.find('.')
module = output_package
if index_of_first_dot + 1 < index_of_last_dot:
module += f'.{api_name[index_of_first_dot + 1:index_of_last_dot]}'
name = api_name[index_of_last_dot + 1 :]
entrypoints_by_module[module].add(_Entrypoint(module, name, s))
for s in ea.symbols:
if _should_skip_file(
s.file_name, file_prefixes_to_strip, packages_to_ignore, module_prefix
):
continue
add_exported_symbols(s.v1_apis, s, v1_entrypoints_by_module)
add_exported_symbols(s.v2_apis, s, v2_entrypoints_by_module)
v1_generated_imports_by_module = collections.defaultdict(set)
v2_generated_imports_by_module = collections.defaultdict(set)
def add_generated_imports(
entrypoints_by_module: Mapping[str, set[_Entrypoint]],
generated_imports_by_module: Mapping[str, set[str]],
):
for module in entrypoints_by_module:
i = module.rfind('.')
if i == -1:
continue
while i != -1:
parent = module[:i]
generated_imports_by_module[parent].add(module)
module = parent
i = module.rfind('.')
add_generated_imports(
v1_entrypoints_by_module, v1_generated_imports_by_module
)
add_generated_imports(
v2_entrypoints_by_module, v2_generated_imports_by_module
)
docs_by_module = {}
for d in ea.docs:
for m in d.modules:
if m in docs_by_module:
raise DocExportedTwiceError(
f'Docstring at {d.file_name}:{d.line_no} is registered for {m},'
' which already has a registered docstring.'
)
docs_by_module[m] = d.docstring
return PublicAPI(
v1_entrypoints_by_module=v1_entrypoints_by_module,
v2_entrypoints_by_module=v2_entrypoints_by_module,
v1_generated_imports_by_module=v1_generated_imports_by_module,
v2_generated_imports_by_module=v2_generated_imports_by_module,
docs_by_module=docs_by_module,
)
def _get_module_docstring(
docs_by_module: Mapping[str, str], module: str
) -> str:
if module in docs_by_module:
return docs_by_module[module]
module = module.replace('tensorflow', 'tf')
return f'Public API for {module} namespace'
def _get_imports_for_module(
module: str,
output_package: str,
symbols_by_module: Mapping[str, set[_Entrypoint]],
generated_imports_by_module: Mapping[str, set[str]],
file_prefixes_to_strip: Sequence[str],
module_prefix: str,
use_lazy_loading: bool,
subpackage_rewrite: Optional[str],
) -> str:
"""Returns the imports for a module.
Args:
module: The module to get imports for.
output_package: The package to use for the imports.
symbols_by_module: The symbols that should be exposed by each module.
generated_imports_by_module: The sub-modules that should be exposed by each
module.
file_prefixes_to_strip: The prefixes to strip from the file names of the
imports.
module_prefix: A prefix to add to the non-generated imports.
use_lazy_loading: Whether to use lazy loading or not.
subpackage_rewrite: The subpackage to use for the imports.
"""
content = ''
symbol_imports = list(symbols_by_module[module])
symbol_imports = sorted(
symbol_imports, key=lambda s: f'{s.exported_symbol.file_name}:{s.name}'
)
generated_imports = sorted(generated_imports_by_module[module])
for imp in generated_imports:
if subpackage_rewrite:
imp = imp.replace(output_package, subpackage_rewrite)
last_dot = imp.rfind('.')
if use_lazy_loading:
content += f" '{imp[last_dot+1:]}': ('', '{imp}'),\n"
else:
content += f'from {imp[:last_dot]} import {imp[last_dot+1:]}\n'
for s in symbol_imports:
content += (
f'{s.get_import(file_prefixes_to_strip, module_prefix, use_lazy_loading=use_lazy_loading)}\n'
)
return content
def gen_public_api(
output_dir: str,
output_package: str,
root_init_template: str,
api_version: int,
compat_api_versions: Sequence[int],
compat_init_templates: Sequence[str],
use_lazy_loading: bool,
file_prefixes_to_strip: Sequence[str],
mapping_files: Sequence[str],
packages_to_ignore: Sequence[str],
module_prefix: str,
root_file_name: str,
output_files: Set[str],
):
"""Generates the public API for tensorflow.
Args:
output_dir: The directory to output the files to.
output_package: The package to use for the imports.
root_init_template: The template for the root init file.
api_version: The version of the API to generate.
compat_api_versions: The versions of the compat APIs to generate.
compat_init_templates: The templates for the compat init files.
use_lazy_loading: Whether to use lazy loading or not.
file_prefixes_to_strip: The prefixes to strip from the file names of the
imports.
mapping_files: The mapping files created by the API Extractor.
packages_to_ignore: A list of python packages that should be ignored when
searching for tf_exports.
module_prefix: A prefix to add to the non-generated imports.
root_file_name: The file name that should be generated for the top level
API.
output_files: List of files expected to generate.
"""
public_api = get_public_api(
mapping_files,
file_prefixes_to_strip,
packages_to_ignore,
output_package,
module_prefix,
)
root_entrypoints_by_module = public_api.v2_entrypoints_by_module
root_generated_imports_by_module = public_api.v2_generated_imports_by_module
if api_version == 1:
root_entrypoints_by_module = public_api.v1_entrypoints_by_module
root_generated_imports_by_module = public_api.v1_generated_imports_by_module
for compat_version in compat_api_versions:
compat_package = f'{output_package}.compat'
compat_version_package = f'{compat_package}.v{compat_version}'
public_api.v2_generated_imports_by_module[compat_package].add(
compat_version_package
)
public_api.v1_generated_imports_by_module[compat_package].add(
compat_version_package
)
_gen_init_files(
output_dir,
output_package,
api_version,
root_entrypoints_by_module,
root_generated_imports_by_module,
public_api.docs_by_module,
root_init_template,
file_prefixes_to_strip,
use_lazy_loading,
module_prefix,
output_files,
root_file_name=root_file_name,
)
for compat_index, compat_version in enumerate(compat_api_versions):
compat_output_dir = os.path.join(output_dir, 'compat', f'v{compat_version}')
os.makedirs(compat_output_dir, exist_ok=True)
compat_version = int(compat_version)
compat_entrypoints_by_module = public_api.v2_entrypoints_by_module
compat_generated_imports_by_module = (
public_api.v2_generated_imports_by_module
)
if compat_version == 1:
compat_entrypoints_by_module = public_api.v1_entrypoints_by_module
compat_generated_imports_by_module = (
public_api.v1_generated_imports_by_module
)
_gen_init_files(
compat_output_dir,
output_package,
compat_version,
compat_entrypoints_by_module,
compat_generated_imports_by_module,
public_api.docs_by_module,
compat_init_templates[compat_index] if compat_init_templates else '',
file_prefixes_to_strip,
use_lazy_loading,
module_prefix,
output_files,
subpackage_rewrite=f'{output_package}.compat.v{compat_version}',
)
for nested_compat_index, nested_compat_version in enumerate(
compat_api_versions
):
nested_compat_version = int(nested_compat_version)
nested_compat_output_dir = os.path.join(
compat_output_dir, 'compat', f'v{nested_compat_version}'
)
nested_compat_entrypoints_by_module = public_api.v2_entrypoints_by_module
nested_compat_generated_imports_by_module = (
public_api.v2_generated_imports_by_module
)
if nested_compat_version == 1:
nested_compat_entrypoints_by_module = (
public_api.v1_entrypoints_by_module
)
nested_compat_generated_imports_by_module = (
public_api.v1_generated_imports_by_module
)
os.makedirs(nested_compat_output_dir, exist_ok=True)
gen_nested_compat_files(
nested_compat_output_dir,
output_package,
nested_compat_version,
nested_compat_entrypoints_by_module,
nested_compat_generated_imports_by_module,
public_api.docs_by_module,
compat_init_templates[nested_compat_index]
if compat_init_templates
else '',
file_prefixes_to_strip,
use_lazy_loading,
compat_api_versions,
module_prefix,
output_files,
)
def _get_module_wrapper(
module: str,
output_dir: str,
output_package: str,
api_version: int,
symbols_by_module: Mapping[str, set[_Entrypoint]],
use_lazy_loading: bool,
) -> str:
"""Returns the module wrapper for the given module."""
if api_version != 1 and not use_lazy_loading:
return ''
deprecated = 'False'
has_lite = 'False'
public_apis_name = 'None'
if api_version == 1 and not output_dir.strip('/').endswith('compat/v1'):
deprecated = 'True'
if 'lite' in symbols_by_module and use_lazy_loading:
has_lite = 'True'
if use_lazy_loading:
public_apis_name = '_PUBLIC_APIS'
return _DEPRECATION_FOOTER % (
module.removeprefix(output_package).strip('.'),
public_apis_name,
deprecated,
has_lite,
)
def _gen_init_files(
output_dir: str,
output_package: str,
api_version: int,
symbols_by_module: Mapping[str, set[_Entrypoint]],
generated_imports_by_module: Mapping[str, set[str]],
docs_by_module: Mapping[str, str],
root_template_path: str,
file_prefixes_to_strip: Sequence[str],
use_lazy_loading: bool,
module_prefix: str,
output_files: Set[str],
subpackage_rewrite: Optional[str] = None,
root_file_name='__init__.py',
):
"""Generates the __init__.py files for the given API version."""
modules = set(symbols_by_module.keys())
modules.update(generated_imports_by_module.keys())
for module in modules:
if len(module) < len(output_package):
continue
module_relative_to_package = module[len(output_package) + 1 :]
module_path = os.path.join(
output_dir, module_relative_to_package.replace('.', '/')
)
os.makedirs(module_path, exist_ok=True)
module_file_path = os.path.join(
module_path,
root_file_name if not module_relative_to_package else '__init__.py',
)
module_file_path = os.path.normpath(module_file_path)
if module_file_path not in output_files:
raise AssertionError(
f'Exported api attempted to write to "{module_file_path}" but it is'
' not in output_files.'
)
with open(module_file_path, 'w') as f:
module_imports = _get_imports_for_module(
module,
output_package,
symbols_by_module,
generated_imports_by_module,
file_prefixes_to_strip,
module_prefix,
use_lazy_loading,
subpackage_rewrite,
)
if use_lazy_loading:
module_imports = _LAZY_LOADING_MODULE_TEXT_TEMPLATE % module_imports
# If this module is the root and there is a root template, use it
if module == output_package and root_template_path:
with open(root_template_path, 'r') as template:
content = template.read()
content = content.replace('# API IMPORTS PLACEHOLDER', module_imports)
underscore_elements = [
s.name
for s in symbols_by_module[module]
if s.name.startswith('_')
]
for i in generated_imports_by_module[module]:
module_name = i[i.rfind('.') + 1 :]
if module_name.startswith('_'):
underscore_elements.append(module_name)
root_module_footer = f"""
_names_with_underscore = [{', '.join(sorted([f"'{s}'" for s in underscore_elements]))}]
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend([_s for _s in _names_with_underscore])
"""
content = content.replace('# __all__ PLACEHOLDER', root_module_footer)
content = content.replace(
'# WRAPPER_PLACEHOLDER',
_get_module_wrapper(
module,
output_dir,
output_package,
api_version,
symbols_by_module,
use_lazy_loading,
),
)
f.write(content)
continue
f.write(
_GENERATED_FILE_HEADER % _get_module_docstring(docs_by_module, module)
)
f.write(module_imports)
f.write(
_get_module_wrapper(
module,
output_dir,
output_package,
api_version,
symbols_by_module,
use_lazy_loading,
)
)
def gen_nested_compat_files(
output_dir: str,
output_package: str,
api_version: int,
symbols_by_module: Mapping[str, set[_Entrypoint]],
generated_imports_by_module: Mapping[str, set[str]],
docs_by_module: Mapping[str, str],
root_template_path: str,
file_prefixes_to_strip: Sequence[str],
use_lazy_loading: bool,
compat_versions: Sequence[int],
module_prefix: str,
output_files: Set[str],
):
"""Generates the nested compat __init__.py files."""
nested_compat_symbols_by_module: dict[str, set[_Entrypoint]] = {}
nested_generated_imports_by_module: dict[str, set[str]] = {}
compat_module = f'{output_package}.compat'
# The nested compat files should only generate imports for the nested root
# package, and its corresponding compat package.
if output_package in symbols_by_module:
nested_compat_symbols_by_module[output_package] = symbols_by_module[
output_package
]
if compat_module in symbols_by_module:
nested_compat_symbols_by_module[compat_module] = symbols_by_module[
compat_module
]
if output_package in generated_imports_by_module:
nested_generated_imports_by_module[output_package] = (
generated_imports_by_module[output_package]
)
if compat_module in generated_imports_by_module:
nested_generated_imports_by_module[compat_module] = (
generated_imports_by_module[compat_module]
)
_gen_init_files(
output_dir,
output_package,
api_version,
nested_compat_symbols_by_module,
nested_generated_imports_by_module,
docs_by_module,
root_template_path,
file_prefixes_to_strip,
use_lazy_loading,
module_prefix,
output_files,
f'{compat_module}.v{api_version}',
)
for compat_version in compat_versions:
nested_generated_imports_by_module[compat_module].add(
f'{output_package}.compat.v{compat_version}'
)
def main(argv: Sequence[str]) -> None:
if not _OUTPUT_DIR.value or not _OUTPUT_FILES.value:
raise app.UsageError('--output_dir and --output_files are required')
if _PROXY_MODULE_ROOT.value:
generate_proxy_api_files(
_OUTPUT_FILES.value, _PROXY_MODULE_ROOT.value, _OUTPUT_DIR.value
)
return
output_files = [os.path.normpath(f) for f in _OUTPUT_FILES.value]
for out_file in output_files:
with open(out_file, 'w') as f:
f.write('')
gen_public_api(
_OUTPUT_DIR.value,
_OUTPUT_PACKAGE.value,
_ROOT_INIT_TEMPLATE.value,
_API_VERSION.value,
[int(v) for v in _COMPAT_API_VERSIONS.value],
_COMPAT_INIT_TEMPLATES.value,
_USE_LAZY_LOADING.value,
_FILE_PREFIXES_TO_STRIP.value,
argv[1:],
_PACKAGES_TO_IGNORE.value,
_MODULE_PREFIX.value,
_ROOT_FILE_PATH.value,
set(output_files),
)
| PublicAPI |
python | tensorflow__tensorflow | tensorflow/python/ops/special_math_ops_test.py | {
"start": 1586,
"end": 7051
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_one_dimensional_arg(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.session():
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one))))
self.assertAllClose(
0.5, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual([], special_math_ops.lbeta(x_one).get_shape())
@test_util.run_deprecated_v1
def test_one_dimensional_arg_dynamic(self):
# Should evaluate to 1 and 1/2.
x_one = [1, 1.]
x_one_half = [2, 1.]
with self.session():
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose(1, beta_ph.eval(feed_dict={ph: x_one}))
self.assertAllClose(0.5,
beta_ph.eval(feed_dict={ph: x_one_half}))
@test_util.run_deprecated_v1
def test_four_dimensional_arg_with_partial_shape_dynamic(self):
x_ = np.ones((3, 2, 3, 4))
# Gamma(1) = 0! = 1
# Gamma(1 + 1 + 1 + 1) = Gamma(4) = 3! = 6
# ==> Beta([1, 1, 1, 1])
# = Gamma(1) * Gamma(1) * Gamma(1) * Gamma(1) / Gamma(1 + 1 + 1 + 1)
# = 1 / 6
expected_beta_x = 1 / 6 * np.ones((3, 2, 3))
with self.session():
x_ph = array_ops.placeholder(dtypes.float32, [3, 2, 3, None])
beta_ph = math_ops.exp(special_math_ops.lbeta(x_ph))
self.assertAllClose(expected_beta_x,
beta_ph.eval(feed_dict={x_ph: x_}))
@test_util.run_in_graph_and_eager_modes
def test_two_dimensional_arg(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session():
self.assertAllClose(
[0.5, 0.5],
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual((2,), special_math_ops.lbeta(x_one_half).get_shape())
@test_util.run_deprecated_v1
def test_two_dimensional_arg_dynamic(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session():
ph = array_ops.placeholder(dtypes.float32)
beta_ph = math_ops.exp(special_math_ops.lbeta(ph))
self.assertAllClose([0.5, 0.5],
beta_ph.eval(feed_dict={ph: x_one_half}))
@test_util.run_in_graph_and_eager_modes
def test_two_dimensional_proper_shape(self):
# Should evaluate to 1/2.
x_one_half = [[2, 1.], [2, 1.]]
with self.session():
self.assertAllClose(
[0.5, 0.5],
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
(2,),
self.evaluate(array_ops.shape(special_math_ops.lbeta(x_one_half))))
self.assertEqual(
tensor_shape.TensorShape([2]),
special_math_ops.lbeta(x_one_half).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_complicated_shape(self):
with self.session():
x = ops.convert_to_tensor(np.random.rand(3, 2, 2))
self.assertAllEqual(
(3, 2), self.evaluate(array_ops.shape(special_math_ops.lbeta(x))))
self.assertEqual(
tensor_shape.TensorShape([3, 2]),
special_math_ops.lbeta(x).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_length_1_last_dimension_results_in_one(self):
# If there is only one coefficient, the formula still works, and we get one
# as the answer, always.
x_a = [5.5]
x_b = [0.1]
with self.session():
self.assertAllClose(
1,
self.evaluate(math_ops.exp(special_math_ops.lbeta(x_a))),
rtol=3e-6)
self.assertAllClose(
1, self.evaluate(math_ops.exp(special_math_ops.lbeta(x_b))))
self.assertEqual((), special_math_ops.lbeta(x_a).get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank1_returns_negative_infinity(self):
with self.session():
x = constant_op.constant([], shape=[0])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=())
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_last_dim_returns_negative_infinity(self):
with self.session():
event_size = 0
for batch_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant(-np.inf, shape=[batch_size])
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_in_graph_and_eager_modes
def test_empty_rank2_with_zero_batch_dim_returns_empty(self):
with self.session():
batch_size = 0
for event_size in [0, 1, 2]:
x = constant_op.constant([], shape=[batch_size, event_size])
lbeta_x = special_math_ops.lbeta(x)
expected_result = constant_op.constant([], shape=[batch_size])
self.assertAllEqual(self.evaluate(expected_result),
self.evaluate(lbeta_x))
self.assertEqual(expected_result.get_shape(), lbeta_x.get_shape())
@test_util.run_all_in_graph_and_eager_modes
| LBetaTest |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modeling_qwen3_vl.py | {
"start": 10690,
"end": 11723
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, attn_implementation: str = "sdpa") -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.attn = Qwen3VLVisionAttention(config=config)
self.mlp = Qwen3VLVisionMLP(config=config)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> torch.Tensor:
hidden_states = hidden_states + self.attn(
self.norm1(hidden_states),
cu_seqlens=cu_seqlens,
rotary_pos_emb=rotary_pos_emb,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
return hidden_states
| Qwen3VLVisionBlock |
python | coleifer__peewee | tests/regressions.py | {
"start": 66021,
"end": 66726
} | class ____(ModelTestCase):
requires = [IMC]
def test_chunked_insert_many(self):
data = [(i, i if i % 2 == 0 else None) for i in range(100)]
for chunk in chunked(data, 10):
IMC.insert_many(chunk).execute()
q = IMC.select(IMC.a, IMC.b).order_by(IMC.id).tuples()
self.assertEqual(list(q), data)
IMC.delete().execute()
data = [{'a': i, 'b': i if i % 2 == 0 else None} for i in range(100)]
for chunk in chunked(data, 5):
IMC.insert_many(chunk).execute()
q = IMC.select(IMC.a, IMC.b).order_by(IMC.id).dicts()
self.assertEqual(list(q), data)
IMC.delete().execute()
@slow_test()
| TestChunkedInsertMany |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/model_service.py | {
"start": 29753,
"end": 33246
} | class ____(GoogleCloudBaseOperator):
"""
Deletes version aliases for the Model.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param model_id: Required. The ID of the model to delete version aliases from.
Should be in format `projects/{project}/locations/{location}/models/{model_id}@{version_id}` or
`projects/{project}/locations/{location}/models/{model_id}@{version_alias}`.
:param version_aliases: List of version aliases to be deleted from model version.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("model_id", "project_id", "impersonation_chain")
operator_extra_links = (VertexAIModelLink(),)
def __init__(
self,
*,
region: str,
project_id: str,
model_id: str,
version_aliases: Sequence[str],
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.region = region
self.project_id = project_id
self.model_id = model_id
self.version_aliases = version_aliases
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.region,
"project_id": self.project_id,
}
def execute(self, context: Context):
hook = ModelServiceHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info(
"Deleting aliases %s from model version %s",
self.version_aliases,
self.model_id.rpartition("@")[0],
)
updated_model = hook.delete_version_aliases(
region=self.region,
model_id=self.model_id,
version_aliases=self.version_aliases,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
VertexAIModelLink.persist(context=context, model_id=self.model_id)
return Model.to_dict(updated_model)
| DeleteVersionAliasesOnModelOperator |
python | encode__starlette | tests/test_exceptions.py | {
"start": 1004,
"end": 1374
} | class ____(HTTPException):
pass
async def read_body_and_raise_exc(request: Request) -> None:
await request.body()
raise BadBodyException(422)
async def handler_that_reads_body(request: Request, exc: BadBodyException) -> JSONResponse:
body = await request.body()
return JSONResponse(status_code=422, content={"body": body.decode()})
| BadBodyException |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/forLoop1.py | {
"start": 503,
"end": 855
} | class ____(object):
def __aiter__(self):
return self
async def __anext__(self):
return 1
iter1 = AsyncIterable1()
async def func2():
async for foo3 in iter1:
requires_int(foo3)
for d in [b for b in list1]:
requires_int(d)
for e in [b async for b in iter1]:
requires_int(e)
| AsyncIterable1 |
python | bokeh__bokeh | src/bokeh/core/serialization.py | {
"start": 3719,
"end": 3846
} | class ____(TypedDict):
type: Literal["typed_array"]
array: BytesRep
order: ByteOrder
dtype: DataType
| TypedArrayRep |
python | modin-project__modin | modin/config/envvars.py | {
"start": 26396,
"end": 26545
} | class ____(EnvironmentVariable, type=int):
"""How may GPU devices to utilize across the whole distribution."""
varname = "MODIN_GPUS"
| GpuCount |
python | pytorch__pytorch | test/inductor/test_max_autotune.py | {
"start": 95072,
"end": 99051
} | class ____(TestCase):
def test_precompilation_threads(self):
import threading
from typing import Any
from unittest.mock import Mock, patch
class FakeChoiceCaller(ChoiceCaller):
def __init__(self) -> None:
super().__init__("none", [], Mock(), description="")
self.thread_id = None
def precompile(self):
self.thread_id = threading.get_ident()
def call_name(self) -> str:
return None
def to_callable(self):
return None
def hash_key(self) -> str:
return str(hash(self))
def output_node(self) -> "TensorBox": # noqa: F821
return None
fake_choices = [FakeChoiceCaller() for i in range(10)]
fake_lookup_result = dict.fromkeys(fake_choices, 0.123)
def no_lookup(
choices: list[ChoiceCaller],
op: str,
inputs: str,
benchmark: Callable[[Any], dict[ChoiceCaller, float]],
hint_override: Optional[int] = None,
) -> Optional[dict[ChoiceCaller, float]]:
if benchmark is not None:
return benchmark(choices)
asc = AlgorithmSelectorCache()
def fake_benchmark_fn(*args, **kwargs):
return fake_lookup_result
main_thread_id = threading.get_ident()
mock_debug_handler = Mock()
old_debug_handler = V.debug
try:
V.set_debug_handler(mock_debug_handler)
with patch.object(asc, "lookup", new=no_lookup):
with patch.object(
asc, "make_benchmark_fn", return_value=fake_benchmark_fn
):
with config.patch(
{
"autotune_in_subproc": False,
"compile_threads": len(fake_choices),
}
):
asc("test_call", fake_choices, [], Mock())
for fake_choice in fake_choices:
assert fake_choice.thread_id is not None, (
"Expected all ChoiceCaller's precompile method to have been called"
)
assert fake_choice.thread_id != main_thread_id, (
"Expected all ChoiceCaller's precompile method to have been called on separate thread"
)
finally:
V.set_debug_handler(old_debug_handler)
def test_filled_cache_precompile(self):
def fn(a, b, c):
a = (a @ b) @ c
a, b, c = (t.to(torch.float16) for t in [a, b, c])
return (a @ b) @ c
fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
inputs = [torch.rand([256, 256], device=GPU_TYPE) for _ in range(3)]
from torch._dynamo.utils import counters
self.assertEqual(fn(*inputs), fn_c(*inputs), atol=1e-2, rtol=1e-2)
torch._dynamo.reset()
counters.clear()
fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 0)
@config.patch(autotune_local_cache=False, autotune_remote_cache=False)
@runOnRocmArch(MI300_ARCH)
@unittest.skipIf(config.triton.native_matmul, "native matmul has counter 0")
def test_precompilations(self):
def fn(a, b, c):
a = (a @ b) @ c
a, b, c = (t.to(torch.float16) for t in [a, b, c])
return (a @ b) @ c
fn_c = torch.compile(mode="max-autotune-no-cudagraphs")(fn)
inputs = [torch.rand([256, 256], device=GPU_TYPE) for _ in range(3)]
torch.testing.assert_close(fn_c(*inputs), fn(*inputs), atol=1e-2, rtol=1e-2)
from torch._dynamo.utils import counters
self.assertEqual(counters["inductor"]["select_algorithm_precompile"], 2)
@instantiate_parametrized_tests
| TestMaxAutotunePrecompile |
python | PyCQA__pylint | tests/pyreverse/functional/class_diagrams/colorized_output/colorized.py | {
"start": 167,
"end": 355
} | class ____:
def __init__(self):
self.checker1 = ExceptionsChecker(None)
self.checker2 = ElseifUsedChecker(None)
self.checker3 = StdlibChecker(None)
| CheckerCollector |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/connectors/aioodbc.py | {
"start": 714,
"end": 1060
} | class ____(AsyncAdapt_dbapi_cursor):
__slots__ = ()
def setinputsizes(self, *inputsizes):
# see https://github.com/aio-libs/aioodbc/issues/451
return self._cursor._impl.setinputsizes(*inputsizes)
# how it's supposed to work
# return await_(self._cursor.setinputsizes(*inputsizes))
| AsyncAdapt_aioodbc_cursor |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 38629,
"end": 39172
} | class ____(TestCase):
def test_n_by_n(self):
actual = list(mi.matmul([(7, 5), (3, 5)], [[2, 5], [7, 9]]))
expected = [(49, 80), (41, 60)]
self.assertEqual(actual, expected)
def test_m_by_n(self):
m1 = [[2, 5], [7, 9], [3, 4]]
m2 = [[7, 11, 5, 4, 9], [3, 5, 2, 6, 3]]
actual = list(mi.matmul(m1, m2))
expected = [
(29, 47, 20, 38, 33),
(76, 122, 53, 82, 90),
(33, 53, 23, 36, 39),
]
self.assertEqual(actual, expected)
| MatMulTests |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_oklahoma_zip.py | {
"start": 1751,
"end": 4094
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Oklahoma zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_oklahoma_zip": ["73001", "73620", "74027", "74966"],
"invalid_oklahoma_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_oklahoma_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_oklahoma_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_oklahoma_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidOklahomaZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidOklahomaZip |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/tests/test_code_hierarchy_with_skeleton.py | {
"start": 533,
"end": 920
} | class ____:
def bar() -> None:
print("bar")
async def baz():
print("baz")"""
text_node = TextNode(
text=text,
metadata={
"module": "example.foo",
},
)
chunks: List[TextNode] = code_splitter.get_nodes_from_documents([text_node])
# This is the module scope
assert (
chunks[0].text
== f"""\
| Foo |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 276555,
"end": 276896
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field(DeploymentProtectionRule, graphql_name="node")
| DeploymentProtectionRuleEdge |
python | django__django | django/db/models/fields/__init__.py | {
"start": 99419,
"end": 99641
} | class ____(AutoFieldMixin, BigIntegerField):
def get_internal_type(self):
return "BigAutoField"
def rel_db_type(self, connection):
return BigIntegerField().db_type(connection=connection)
| BigAutoField |
python | PyCQA__pylint | tests/functional/e/enum_self_defined_member_6805.py | {
"start": 225,
"end": 317
} | class ____:
def __new__(cls, *_args, **_kwargs):
return object.__new__(cls)
| Parent |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 29038,
"end": 30278
} | class ____(BaseFormatter):
"""A JSON string formatter.
To define the callables that compute the JSONable representation of
your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this.
The return value of this formatter should be a JSONable list or dict.
JSON scalars (None, number, string) are not allowed, only dict or list containers.
"""
format_type = Unicode('application/json')
_return_type = (list, dict)
print_method = ObjectName('_repr_json_')
def _check_return(self, r, obj):
"""Check that a return value is appropriate
Return the value if so, None otherwise, warning if invalid.
"""
if r is None:
return
md = None
if isinstance(r, tuple):
# unpack data, metadata tuple for type checking on first element
r, md = r
assert not isinstance(
r, str
), "JSON-as-string has been deprecated since IPython < 3"
if md is not None:
# put the tuple back together
r = (r, md)
return super(JSONFormatter, self)._check_return(r, obj)
| JSONFormatter |
python | huggingface__transformers | tests/quantization/bnb/test_mixed_int8.py | {
"start": 28448,
"end": 34252
} | class ____(BaseMixedInt8Test):
def setUp(self):
super().setUp()
def check_inference_correctness(self, model):
# Check that inference pass works on the model
encoded_input = self.tokenizer(self.input_text, return_tensors="pt")
# Check the exactness of the results
output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10)
# Get the generation
output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True)
self.assertIn(output_text, self.EXPECTED_OUTPUTS)
def test_cpu_accelerator_loading_random_device_map(self):
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`.
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": 0,
"lm_head": 0,
"transformer.h.0": "cpu",
"transformer.h.1": "cpu",
"transformer.h.2": 0,
"transformer.h.3": 0,
"transformer.h.4": 0,
"transformer.h.5": 0,
"transformer.h.6": 0,
"transformer.h.7": 0,
"transformer.h.8": 0,
"transformer.h.9": 1,
"transformer.h.10": 0,
"transformer.h.11": 1,
"transformer.h.12": 0,
"transformer.h.13": 0,
"transformer.h.14": 1,
"transformer.h.15": 0,
"transformer.h.16": 0,
"transformer.h.17": 1,
"transformer.h.18": 1,
"transformer.h.19": 0,
"transformer.h.20": 1,
"transformer.h.21": 1,
"transformer.h.22": 0,
"transformer.h.23": 0,
"transformer.ln_f": 1,
}
bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)
model_8bit = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=device_map,
quantization_config=bnb_config,
)
# Check that the model has been correctly set on device 0, 1, and `cpu`.
self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"})
self.check_inference_correctness(model_8bit)
def test_cpu_accelerator_loading_custom_device_map(self):
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time the device map is more organized than the test above and uses the abstraction
`transformer.h` to encapsulate all the decoder layers.
"""
device_map = {
"transformer.word_embeddings": "cpu",
"transformer.word_embeddings_layernorm": "cpu",
"lm_head": "cpu",
"transformer.h": 0,
"transformer.ln_f": 1,
}
bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)
# Load model
model_8bit = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=device_map,
quantization_config=bnb_config,
)
# Check that the model has been correctly set on device 0, 1, and `cpu`.
self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"})
self.check_inference_correctness(model_8bit)
def test_cpu_accelerator_disk_loading_custom_device_map(self):
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map.
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": "cpu",
"lm_head": 0,
"transformer.h": 1,
"transformer.ln_f": "disk",
}
bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True)
with tempfile.TemporaryDirectory() as tmpdirname:
# Load model
model_8bit = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=device_map,
quantization_config=bnb_config,
offload_folder=tmpdirname,
)
# Check that the model has been correctly set on device 0, 1, and `cpu`.
self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"})
self.check_inference_correctness(model_8bit)
def test_cpu_accelerator_disk_loading_custom_device_map_kwargs(self):
r"""
A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`.
This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config
"""
device_map = {
"transformer.word_embeddings": 0,
"transformer.word_embeddings_layernorm": "cpu",
"lm_head": 0,
"transformer.h": 1,
"transformer.ln_f": "disk",
}
with tempfile.TemporaryDirectory() as tmpdirname:
# Load model
model_8bit = AutoModelForCausalLM.from_pretrained(
self.model_name,
device_map=device_map,
quantization_config=BitsAndBytesConfig(load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True),
offload_folder=tmpdirname,
)
# Check that the model has been correctly set on device 0, 1, and `cpu`.
self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"})
self.check_inference_correctness(model_8bit)
@apply_skip_if_not_implemented
| MixedInt8TestCpuGpu |
python | doocs__leetcode | solution/2300-2399/2328.Number of Increasing Paths in a Grid/Solution.py | {
"start": 0,
"end": 519
} | class ____:
def countPaths(self, grid: List[List[int]]) -> int:
@cache
def dfs(i: int, j: int) -> int:
ans = 1
for a, b in pairwise((-1, 0, 1, 0, -1)):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and grid[i][j] < grid[x][y]:
ans = (ans + dfs(x, y)) % mod
return ans
mod = 10**9 + 7
m, n = len(grid), len(grid[0])
return sum(dfs(i, j) for i in range(m) for j in range(n)) % mod
| Solution |
python | doocs__leetcode | solution/1400-1499/1499.Max Value of Equation/Solution.py | {
"start": 0,
"end": 352
} | class ____:
def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:
ans = -inf
pq = []
for x, y in points:
while pq and x - pq[0][1] > k:
heappop(pq)
if pq:
ans = max(ans, x + y - pq[0][0])
heappush(pq, (x - y, x))
return ans
| Solution |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_with_poly.py | {
"start": 1235,
"end": 5132
} | class ____(_PolymorphicFixtureBase):
def test_join_base_to_sub(self):
sess = fixture_session()
pa = with_polymorphic(Person, [Engineer])
def go():
eq_(
sess.query(pa)
.filter(pa.Engineer.primary_language == "java")
.all(),
self._emps_wo_relationships_fixture()[0:1],
)
self.assert_sql_count(testing.db, go, 1)
@testing.combinations((True,), (False,), argnames="use_star")
def test_col_expression_base_plus_two_subs(self, use_star):
sess = fixture_session()
if use_star:
pa = with_polymorphic(Person, "*")
else:
pa = with_polymorphic(Person, [Engineer, Manager])
eq_(
sess.query(
pa.name, pa.Engineer.primary_language, pa.Manager.manager_name
)
.filter(
or_(
pa.Engineer.primary_language == "java",
pa.Manager.manager_name == "dogbert",
)
)
.order_by(pa.Engineer.type)
.all(),
[("dilbert", "java", None), ("dogbert", None, "dogbert")],
)
def test_orm_entity_w_gc(self):
"""test #6680"""
sess = fixture_session()
stmt = select(with_polymorphic(Person, "*"))
eq_(len(sess.execute(stmt).all()), 5)
def test_join_to_join_entities(self):
sess = fixture_session()
pa = with_polymorphic(Person, [Engineer])
pa_alias = with_polymorphic(Person, [Engineer], aliased=True)
eq_(
[
(p1.name, type(p1), p2.name, type(p2))
for (p1, p2) in sess.query(pa, pa_alias)
.join(
pa_alias,
or_(
pa.Engineer.primary_language
== pa_alias.Engineer.primary_language,
and_(
pa.Engineer.primary_language == None, # noqa
pa_alias.Engineer.primary_language == None,
pa.person_id > pa_alias.person_id,
),
),
)
.order_by(pa.name, pa_alias.name)
],
[
("dilbert", Engineer, "dilbert", Engineer),
("dogbert", Manager, "pointy haired boss", Boss),
("vlad", Engineer, "vlad", Engineer),
("wally", Engineer, "wally", Engineer),
],
)
def test_join_to_join_columns(self):
sess = fixture_session()
pa = with_polymorphic(Person, [Engineer])
pa_alias = with_polymorphic(Person, [Engineer], aliased=True)
eq_(
[
row
for row in sess.query(
pa.name,
pa.Engineer.primary_language,
pa_alias.name,
pa_alias.Engineer.primary_language,
)
.join(
pa_alias,
or_(
pa.Engineer.primary_language
== pa_alias.Engineer.primary_language,
and_(
pa.Engineer.primary_language == None, # noqa
pa_alias.Engineer.primary_language == None,
pa.person_id > pa_alias.person_id,
),
),
)
.order_by(pa.name, pa_alias.name)
],
[
("dilbert", "java", "dilbert", "java"),
("dogbert", None, "pointy haired boss", None),
("vlad", "cobol", "vlad", "cobol"),
("wally", "c++", "wally", "c++"),
],
)
| _WithPolymorphicBase |
python | pytorch__pytorch | torch/_inductor/kernel/flex/flex_attention.py | {
"start": 18104,
"end": 33390
} | class ____:
"""Results from processing joint outputs."""
grad_input: ComputedBuffer
captured_grads_compute: list[ComputedBuffer]
captured_grads: list[Optional[TensorBox]]
mutated_grads: list[TensorBox]
def process_joint_outputs(
all_joint_outputs: SubgraphResults, num_placeholders: int
) -> JointOutputResult:
"""Process joint outputs and extract various buffers needed for lowering
Args:
all_joint_outputs: List of all the outputs from build_subgraphs
num_placeholders: The number of placeholder inputs, used to skip over unused backward compute buffers
Returns:
JointOutputResult containing processed buffers and gradients
"""
assert isinstance(all_joint_outputs, list)
assert all_joint_outputs[0] is not None, (
"joint_subgraph_buffer is None - this is a bug!"
)
joint_buffer = all_joint_outputs[0]
other_grads = all_joint_outputs[num_placeholders - 1 :]
# outer_grads has the structure: Len(other_buffer_grads) if buffer doesn't require grad than it will be None
# We only grab the buffers that require grad for inlining into kernel
grads_compute = [buf for buf in other_grads if buf is not None]
def get_out(buf):
if buf is None:
return None
assert isinstance(buf, ComputedBuffer)
assert buf.name is not None
return TensorBox.create(V.graph.get_buffer(buf.name))
grads_out = [get_out(x) for x in other_grads]
mutated_grads = [buf for buf in grads_out if buf is not None]
return JointOutputResult(
grad_input=joint_buffer,
captured_grads_compute=grads_compute,
captured_grads=grads_out,
mutated_grads=mutated_grads,
)
# TODO: We probably also need a layout constraint?
@register_lowering(
torch.ops.higher_order.flex_attention_backward, type_promotion_kind=None
)
def flex_attention_backward(*args, **kwargs):
"""Lowering for the flex_attention_backward op in triton"""
(
query,
key,
value,
out,
logsumexp,
grad_out,
grad_logsumexp,
fw_graph,
joint_graph,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
) = args
(
_, # q_length
_, # kv_length
kv_num_blocks,
kv_indices,
full_kv_num_blocks,
full_kv_indices,
q_num_blocks,
q_indices,
full_q_num_blocks,
full_q_indices,
SPARSE_Q_BLOCK_SIZE,
SPARSE_KV_BLOCK_SIZE,
mask_graph,
) = block_mask
(
query,
key,
value,
logsumexp,
grad_out,
kv_num_blocks,
kv_indices,
full_kv_num_blocks,
full_kv_indices,
q_num_blocks,
q_indices,
full_q_num_blocks,
full_q_indices,
) = maybe_realize(
[
query,
key,
value,
logsumexp,
grad_out,
kv_num_blocks,
kv_indices,
full_kv_num_blocks,
full_kv_indices,
q_num_blocks,
q_indices,
full_q_num_blocks,
full_q_indices,
]
)
device = query.get_device()
dtype = query.get_dtype()
Bq, Hq, seq_len_q, qk_head_dim = query.get_size()
Bkv, Hkv, seq_len_kv, v_head_dim = value.get_size()
assert V.graph.sizevars.evaluate_expr(sympy.Eq(Bq, Bkv) | sympy.Eq(Bkv, 1)), (
f"Bq and Bkv must broadcastable. Got Bq={Bq} and Bkv={Bkv}"
)
kernel_options, backend = _sanitize_kernel_options_for_triton(kernel_options)
# Mark symbols in custom kernel options as static shapes and add guards.
kernel_options = {
k: V.graph.sizevars.guard_int(v) if isinstance(v, sympy.Symbol) else v
for k, v in kernel_options.items()
}
kernel_options.setdefault("FLOAT32_PRECISION", get_float32_precision())
seq_q_divisible = V.graph.sizevars.statically_known_true(seq_len_q % 128 == 0)
seq_kv_divisible = V.graph.sizevars.statically_known_true(seq_len_kv % 128 == 0)
if seq_q_divisible and seq_kv_divisible:
kernel_options.setdefault("IS_DIVISIBLE", True)
else:
kernel_options.setdefault("IS_DIVISIBLE", False)
fwd_placeholder_inps = [
create_placeholder(name, dtype, device)
for name, dtype in [
("score", dtype),
("b", torch.int32),
("h", torch.int32),
("m", torch.int32),
("n", torch.int32),
]
]
fw_subgraph_buffer = build_subgraph_buffer(
fwd_placeholder_inps + list(score_mod_other_buffers), fw_graph
)
freeze_irnodes(fw_subgraph_buffer)
joint_placeholder_inps = fwd_placeholder_inps + [
create_placeholder("grad_score_mod", dtype, device)
]
# Sometimes we have weird unused nodes here
joint_graph.graph_module.graph.eliminate_dead_code()
# It is hard to raise nice errors for some joint graphs during subgraph lowering
# This lets us do some checks before attempting to lower
validate_joint_graph(joint_graph.graph_module.graph)
all_joint_outputs = build_subgraph_buffer(
joint_placeholder_inps + list(score_mod_other_buffers),
joint_graph,
)
freeze_irnodes(all_joint_outputs)
joint_outputs = process_joint_outputs(
all_joint_outputs, len(joint_placeholder_inps)
)
mask_graph_placeholder_inps = [
create_placeholder(name, dtype, query.get_device())
for name, dtype in [
("b", torch.int32),
("h", torch.int32),
("m", torch.int32),
("n", torch.int32),
]
]
mask_graph_buffer = build_subgraph_buffer(
mask_graph_placeholder_inps + list(mask_mod_other_buffers), mask_graph
)
freeze_irnodes(mask_graph_buffer)
if _use_flex_flash_attention_backward(
fw_graph,
mask_graph,
backend=backend,
):
return create_flex_flash_attention_backward_kernel(
query, key, value, out, logsumexp, grad_out, scale, kernel_options
)
# Construct layout with stride order matching K
key_size = [Bq, Hkv, seq_len_kv, qk_head_dim]
key_strides = infer_dense_strides(key_size, key.get_stride())
layout_broadcasted_k = FixedLayout(
key.get_device(),
key.get_dtype(),
key_size,
stride=[sympy.sympify(s) for s in key_strides],
)
# Create delta which will is needed for the bwd's kernel
grad_lse_exp2 = lowerings[aten.mul](grad_logsumexp, 1 / math.log(2))
mul_delta = lowerings[aten.mul](out, grad_out)
delta = lowerings[aten.sum](mul_delta, axis=-1)
delta = lowerings[aten.sub](delta, grad_lse_exp2)
delta = ExternKernel.require_contiguous(delta)
grad_lse_exp2, delta = maybe_realize([grad_lse_exp2, delta])
# # see NOTE:[TritonTemplates with multiple outputs]
query_size = [Bq, Hq, seq_len_q, qk_head_dim]
grad_query_strides = infer_dense_strides(query_size, query.get_stride())
grad_query = empty_strided(
query_size,
stride=[sympy.sympify(s) for s in grad_query_strides],
dtype=query.get_dtype(),
device=query.get_device(),
)
# Construct output layout with stride order matching value
value_size = [Bq, Hkv, seq_len_kv, v_head_dim]
value_strides = infer_dense_strides(value_size, value.get_stride())
broadcasted_grad_value = empty_strided(
value_size,
stride=[sympy.sympify(s) for s in value_strides],
dtype=value.get_dtype(),
device=value.get_device(),
)
kernel_options.setdefault("SM_SCALE", scale)
# Determine GQA factor
gqa_shared_heads = Hq // Hkv
kernel_options.setdefault("GQA_SHARED_HEADS", gqa_shared_heads)
# Inside of Triton kernel, only apply partial masking if partial blocks are computed.
# full_kv_num_blocks is torch.zeros([1, 1, 1]) if partial blocks are not computed.
has_full_blocks = full_kv_num_blocks is not None
kernel_options.setdefault("HAS_FULL_BLOCKS", has_full_blocks)
if not has_full_blocks:
full_kv_num_blocks, full_kv_indices, full_q_num_blocks, full_q_indices = (
empty(0, device=query.get_device()) for _ in range(4)
)
set_head_dim_values(kernel_options, qk_head_dim, v_head_dim, V.graph.sizevars)
SPARSE_Q_BLOCK_SIZE = V.graph.sizevars.guard_int(SPARSE_Q_BLOCK_SIZE)
SPARSE_KV_BLOCK_SIZE = V.graph.sizevars.guard_int(SPARSE_KV_BLOCK_SIZE)
choices: list[Any] = []
dtype = query.get_dtype()
head_dim = V.graph.sizevars.guard_int(query.get_size()[-1])
configs: list[FlexBwDConfig] = V.choices.get_flex_attention_bwd_configs(
head_dim, dtype, query.get_device().type
)
# Default config for warp specialization
num_consumer_groups, num_buffers_warp_spec = 0, 0
original_kernel_options = kernel_options.copy()
for conf in configs:
if (
SPARSE_KV_BLOCK_SIZE % conf.block_n1 != 0
or SPARSE_Q_BLOCK_SIZE % conf.block_m1 != 0
or SPARSE_KV_BLOCK_SIZE % conf.block_n2 != 0
or SPARSE_Q_BLOCK_SIZE % conf.block_m2 != 0
):
continue
# Performance tuning
# Triton heuristics
cur_kernel_options = original_kernel_options.copy()
# Remove prefix for backward kernels options and delete forward kernel options.
for k in list(cur_kernel_options.keys()):
if k.startswith("bwd_"):
v = cur_kernel_options.pop(k)
cur_kernel_options[k[4:]] = v
if k.startswith("fwd_"):
cur_kernel_options.pop(k)
cur_kernel_options.setdefault("num_warps", conf.num_warps)
cur_kernel_options.setdefault("num_stages", conf.num_stages)
if cur_kernel_options.get("num_consumer_groups", False):
cur_kernel_options.setdefault("num_consumer_groups", num_consumer_groups)
cur_kernel_options.setdefault(
"num_buffers_warp_spec", num_buffers_warp_spec
)
cur_kernel_options.setdefault("BLOCK_M1", conf.block_m1)
cur_kernel_options.setdefault("BLOCK_N1", conf.block_n1)
cur_kernel_options.setdefault("BLOCK_M2", conf.block_m2)
cur_kernel_options.setdefault("BLOCK_N2", conf.block_n2)
# Blocksparse options
cur_kernel_options.setdefault("SPARSE_Q_BLOCK_SIZE", SPARSE_Q_BLOCK_SIZE)
cur_kernel_options.setdefault("SPARSE_KV_BLOCK_SIZE", SPARSE_KV_BLOCK_SIZE)
# ROCm specific kernargs
for attrib in ["kpack", "matrix_instr_nonkdim", "waves_per_eu"]:
if hasattr(conf, attrib):
cur_kernel_options[attrib] = getattr(conf, attrib)
flex_attention_backward_template.maybe_append_choice(
choices=choices,
input_nodes=[
query,
key,
value,
logsumexp,
delta,
grad_out,
grad_query,
broadcasted_grad_value,
kv_num_blocks,
kv_indices,
q_num_blocks,
q_indices,
full_kv_num_blocks,
full_kv_indices,
full_q_num_blocks,
full_q_indices,
],
layout=layout_broadcasted_k, # We use store_output only for grad_key
subgraphs=[
fw_subgraph_buffer,
joint_outputs.grad_input,
mask_graph_buffer,
joint_outputs.captured_grads_compute,
],
mutated_inputs=[
grad_query,
broadcasted_grad_value,
*joint_outputs.mutated_grads,
],
call_sizes=query.get_size() + key.get_size()[1:3],
**cur_kernel_options,
)
inputs_for_autotuning = (
# pyrefly: ignore [unsupported-operation]
[
query,
key,
value,
logsumexp,
delta,
grad_out,
grad_query,
broadcasted_grad_value,
kv_num_blocks,
kv_indices,
q_num_blocks,
q_indices,
full_kv_num_blocks,
full_kv_indices,
full_q_num_blocks,
full_q_indices,
]
+ list(score_mod_other_buffers)
+ list(mask_mod_other_buffers)
+ joint_outputs.mutated_grads
)
input_gen_fns = {
8: create_num_blocks_fake_generator(kv_indices), # kv_num_blocks
9: create_indices_fake,
10: create_num_blocks_fake_generator(q_indices), # q_num_blocks
11: create_indices_fake,
12: create_num_blocks_fake_generator(full_kv_indices), # full_kv_num_blocks
13: create_indices_fake,
14: create_num_blocks_fake_generator(full_q_indices), # full_q_num_blocks
15: create_indices_fake,
}
broadcasted_grad_key = autotune_select_algorithm(
"flex_attention_backward",
choices,
[x for x in inputs_for_autotuning if isinstance(x, torch._inductor.ir.IRNode)],
layout_broadcasted_k,
input_gen_fns=input_gen_fns,
) # [Bq, Hkv, seq_len_kv, k_head_dim]
# need subgraph inputs and outputs to analyze all symints used in flex attention
broadcasted_grad_key.data.data.subgraph_inps = list(score_mod_other_buffers) + list(
mask_mod_other_buffers
)
broadcasted_grad_key.data.data.subgraph_outs = get_bwd_subgraph_outputs(
fw_subgraph_buffer, mask_graph_buffer, joint_outputs
)
if V.graph.sizevars.evaluate_expr(sympy.Eq(Bq, Bkv)):
grad_key = broadcasted_grad_key
grad_value = broadcasted_grad_value
else:
assert V.graph.sizevars.evaluate_expr(sympy.Gt(Bq, 1) & sympy.Eq(Bkv, 1)), (
f"Bq and Bkv must broadcastable. "
f"Got Bq={V.graph.sizevars.evaluate_expr(Bq)} "
f"and Bkv={V.graph.sizevars.evaluate_expr(Bkv)}"
)
grad_key = lowerings[aten.sum](broadcasted_grad_key, axis=0, keepdims=True)
grad_value = lowerings[aten.sum](broadcasted_grad_value, axis=0, keepdims=True)
return (grad_query, grad_key, grad_value, tuple(joint_outputs.captured_grads))
def get_bwd_subgraph_outputs(
subgraph_buffer: SubgraphResults,
mask_graph_buffer: SubgraphResults,
joint_outputs: JointOutputResult,
) -> list[Optional[Union[ComputedBuffer, TensorBox]]]:
subgraph_buffer = (
# pyrefly: ignore [bad-assignment]
subgraph_buffer if isinstance(subgraph_buffer, Sequence) else [subgraph_buffer]
)
mask_graph_buffer = (
# pyrefly: ignore [bad-assignment]
mask_graph_buffer
if isinstance(mask_graph_buffer, Sequence)
else [mask_graph_buffer]
)
joint_output_buffers = [
joint_outputs.grad_input,
*joint_outputs.captured_grads_compute,
*joint_outputs.captured_grads,
*joint_outputs.mutated_grads,
]
# pyrefly: ignore [not-iterable]
return [*subgraph_buffer, *mask_graph_buffer, *joint_output_buffers]
| JointOutputResult |
python | numpy__numpy | numpy/ma/tests/test_regression.py | {
"start": 75,
"end": 2719
} | class ____:
def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
def test_masked_array(self):
# Ticket #61
np.ma.array(1, mask=[1])
def test_mem_masked_where(self):
# Ticket #62
from numpy.ma import MaskType, masked_where
a = np.zeros((1, 1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b, a)
a - c
def test_masked_array_multiply(self):
# Ticket #254
a = np.ma.zeros((4, 1))
a[2, 0] = np.ma.masked
b = np.zeros((4, 2))
a * b
b * a
def test_masked_array_repeat(self):
# Ticket #271
np.ma.array([1], mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
# Ticket #1256
repr(np.ma.array("Unicode"))
def test_atleast_2d(self):
# Ticket #1559
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
def test_set_fill_value_unicode_py3(self):
# Ticket #2733
a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
a.fill_value = 'X'
assert_(a.fill_value == 'X')
def test_var_sets_maskedarray_scalar(self):
# Issue gh-2757
a = np.ma.array(np.arange(5), mask=True)
mout = np.ma.array(-1, dtype=float)
a.var(out=mout)
assert_(mout._data == 0)
def test_mask_not_backmangled(self):
# See gh-10314. Test case taken from gh-3140.
a = np.ma.MaskedArray([1., 2.], mask=[False, False])
assert_(a.mask.shape == (2,))
b = np.tile(a, (2, 1))
# Check that the above no longer changes a.shape to (1, 2)
assert_(a.mask.shape == (2,))
assert_(b.shape == (2, 2))
assert_(b.mask.shape == (2, 2))
def test_empty_list_on_structured(self):
# See gh-12464. Indexing with empty list should give empty result.
ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
assert_array_equal(ma[[]], ma[:0])
def test_masked_array_tobytes_fortran(self):
ma = np.ma.arange(4).reshape((2, 2))
assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes())
def test_structured_array(self):
# see gh-22041
np.ma.array((1, (b"", b"")),
dtype=[("x", np.int_),
("y", [("i", np.void), ("j", np.void)])])
| TestRegression |
python | sympy__sympy | sympy/sets/ordinals.py | {
"start": 1501,
"end": 7045
} | class ____(Basic):
"""
Represents ordinals in Cantor normal form.
Internally, this class is just a list of instances of OmegaPower.
Examples
========
>>> from sympy import Ordinal, OmegaPower
>>> from sympy.sets.ordinals import omega
>>> w = omega
>>> w.is_limit_ordinal
True
>>> Ordinal(OmegaPower(w + 1, 1), OmegaPower(3, 2))
w**(w + 1) + w**3*2
>>> 3 + w
w
>>> (w + 1) * w
w**2
References
==========
.. [1] https://en.wikipedia.org/wiki/Ordinal_arithmetic
"""
def __new__(cls, *terms):
obj = super().__new__(cls, *terms)
powers = [i.exp for i in obj.args]
if not all(powers[i] >= powers[i+1] for i in range(len(powers) - 1)):
raise ValueError("powers must be in decreasing order")
return obj
@property
def terms(self):
return self.args
@property
def leading_term(self):
if self == ord0:
raise ValueError("ordinal zero has no leading term")
return self.terms[0]
@property
def trailing_term(self):
if self == ord0:
raise ValueError("ordinal zero has no trailing term")
return self.terms[-1]
@property
def is_successor_ordinal(self):
try:
return self.trailing_term.exp == ord0
except ValueError:
return False
@property
def is_limit_ordinal(self):
try:
return not self.trailing_term.exp == ord0
except ValueError:
return False
@property
def degree(self):
return self.leading_term.exp
@classmethod
def convert(cls, integer_value):
if integer_value == 0:
return ord0
return Ordinal(OmegaPower(0, integer_value))
def __eq__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return self.terms == other.terms
def __hash__(self):
return hash(self.args)
def __lt__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
for term_self, term_other in zip(self.terms, other.terms):
if term_self != term_other:
return term_self < term_other
return len(self.terms) < len(other.terms)
def __le__(self, other):
return (self == other or self < other)
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def __str__(self):
net_str = ""
if self == ord0:
return 'ord0'
for plus_count, i in enumerate(self.terms):
if plus_count:
net_str += " + "
if i.exp == ord0:
net_str += str(i.mult)
elif i.exp == 1:
net_str += 'w'
elif len(i.exp.terms) > 1 or i.exp.is_limit_ordinal:
net_str += 'w**(%s)'%i.exp
else:
net_str += 'w**%s'%i.exp
if not i.mult == 1 and not i.exp == ord0:
net_str += '*%s'%i.mult
return(net_str)
__repr__ = __str__
def __add__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
if other == ord0:
return self
a_terms = list(self.terms)
b_terms = list(other.terms)
r = len(a_terms) - 1
b_exp = other.degree
while r >= 0 and a_terms[r].exp < b_exp:
r -= 1
if r < 0:
terms = b_terms
elif a_terms[r].exp == b_exp:
sum_term = OmegaPower(b_exp, a_terms[r].mult + other.leading_term.mult)
terms = a_terms[:r] + [sum_term] + b_terms[1:]
else:
terms = a_terms[:r+1] + b_terms
return Ordinal(*terms)
def __radd__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return other + self
def __mul__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
if ord0 in (self, other):
return ord0
a_exp = self.degree
a_mult = self.leading_term.mult
summation = []
if other.is_limit_ordinal:
for arg in other.terms:
summation.append(OmegaPower(a_exp + arg.exp, arg.mult))
else:
for arg in other.terms[:-1]:
summation.append(OmegaPower(a_exp + arg.exp, arg.mult))
b_mult = other.trailing_term.mult
summation.append(OmegaPower(a_exp, a_mult*b_mult))
summation += list(self.terms[1:])
return Ordinal(*summation)
def __rmul__(self, other):
if not isinstance(other, Ordinal):
try:
other = Ordinal.convert(other)
except TypeError:
return NotImplemented
return other * self
def __pow__(self, other):
if not self == omega:
return NotImplemented
return Ordinal(OmegaPower(other, 1))
| Ordinal |
python | ray-project__ray | doc/source/ray-core/examples/rdt/grpo_contextual_bandits.py | {
"start": 2887,
"end": 4012
} | class ____(torch.nn.Module): # Sized to ~50 MB of parameters.
"""Model used for Generator and Learner.
It takes a 2D state vector as input and produces logits for each action.
"""
def __init__(self, hidden_dim: int = 512, depth: int = 50):
super().__init__()
self.input = torch.nn.Linear(STATE_DIM, hidden_dim, bias=True)
self.backbone = torch.nn.ModuleList(
ResidualBlock(hidden_dim) for _ in range(depth - 1)
)
self.head = torch.nn.Linear(hidden_dim, ACTION_DIM, bias=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.input(x)
for block in self.backbone:
x = block(x)
x = self.head(x)
return x
# -- Utilities --
def sample_unit_vector(batch_size: int, dim: int = STATE_DIM) -> torch.Tensor:
"""Sample unit vectors of shape [batch_size, dim] by normalizing Gaussian draws."""
assert batch_size > 1, "Batch size must be greater than 1"
v = torch.randn(batch_size, dim)
norms = v.norm(dim=-1, keepdim=True) + 1e-8
return v / norms
# -- Actors --
@ray.remote
| ResidualMLP |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.