language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | GoogleCloudPlatform__python-docs-samples | healthcare/api-client/v1beta1/fhir/fhir_stores_test.py | {
"start": 1028,
"end": 2668
} | class ____(Exception):
"""Operation is not yet complete"""
@retry.Retry(predicate=retry.if_exception_type(OperationNotComplete))
def wait_for_operation(operation_name: str):
operation = (
client.projects()
.locations()
.datasets()
.operations()
.get(name=operation_name)
.execute()
)
if not operation.get("done", False):
raise OperationNotComplete(operation)
@pytest.fixture(scope="module")
def test_dataset():
operation = fhir_stores.create_dataset(
service_account_json, project_id, cloud_region, dataset_id
)
# Wait for the dataset to be created
wait_for_operation(operation["name"])
yield
# Clean up
fhir_stores.delete_dataset(
service_account_json, project_id, cloud_region, dataset_id
)
@pytest.fixture(scope="module")
def test_fhir_store():
resp = fhir_stores.create_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, test_fhir_store_id
)
yield resp
fhir_stores.delete_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, test_fhir_store_id
)
def test_create_delete_fhir_store(test_dataset, capsys):
fhir_stores.create_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, fhir_store_id
)
fhir_stores.delete_fhir_store(
service_account_json, project_id, cloud_region, dataset_id, fhir_store_id
)
out, _ = capsys.readouterr()
# Check that create/get/list/delete worked
assert "Created FHIR store" in out
assert "Deleted FHIR store" in out
| OperationNotComplete |
python | huggingface__transformers | src/transformers/models/minimax/modular_minimax.py | {
"start": 21720,
"end": 21783
} | class ____(MixtralSparseMoeBlock):
pass
| MiniMaxSparseMoeBlock |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 119163,
"end": 123799
} | class ____(Request):
"""
Get all 'plot' events for this task
:param task: Task ID
:type task: str
:param iters: Max number of latest iterations for which to return debug images
:type iters: int
:param scroll_id: Scroll ID of previous call (used for getting more results)
:type scroll_id: str
:param metrics: List of metrics and variants
:type metrics: Sequence[MetricVariants]
:param no_scroll: If Truethen no scroll is created. Suitable for one time calls
:type no_scroll: bool
"""
_service = "events"
_action = "get_task_plots"
_version = "2.20"
_schema = {
"definitions": {
"metric_variants": {
"metric": {"description": "The metric name", "type": "string"},
"type": "object",
"variants": {
"description": "The names of the metric variants",
"items": {"type": "string"},
"type": "array",
},
}
},
"properties": {
"iters": {
"description": "Max number of latest iterations for which to return debug images",
"type": "integer",
},
"metrics": {
"description": "List of metrics and variants",
"items": {"$ref": "#/definitions/metric_variants"},
"type": "array",
},
"no_scroll": {
"default": False,
"description": "If Truethen no scroll is created. Suitable for one time calls",
"type": "boolean",
},
"scroll_id": {
"description": "Scroll ID of previous call (used for getting more results)",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
iters: Optional[int] = None,
scroll_id: Optional[str] = None,
metrics: Optional[List[Any]] = None,
no_scroll: Optional[bool] = False,
**kwargs: Any
) -> None:
super(GetTaskPlotsRequest, self).__init__(**kwargs)
self.task = task
self.iters = iters
self.scroll_id = scroll_id
self.metrics = metrics
self.no_scroll = no_scroll
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iters")
def iters(self) -> Optional[int]:
return self._property_iters
@iters.setter
def iters(self, value: Optional[int]) -> None:
if value is None:
self._property_iters = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iters", six.integer_types)
self._property_iters = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("metrics")
def metrics(self) -> Optional[List[Any]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetricVariants.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metrics", MetricVariants, is_array=True)
self._property_metrics = value
@schema_property("no_scroll")
def no_scroll(self) -> Optional[bool]:
return self._property_no_scroll
@no_scroll.setter
def no_scroll(self, value: Optional[bool]) -> None:
if value is None:
self._property_no_scroll = None
return
self.assert_isinstance(value, "no_scroll", (bool,))
self._property_no_scroll = value
| GetTaskPlotsRequest |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 17978,
"end": 21756
} | class ____:
"""Container class that manages a set of Indexers for a Repo.
This class is responsible for checking packages in a repository for
updates (using ``FastPackageChecker``) and for regenerating indexes
when they're needed.
``Indexers`` should be added to the ``RepoIndex`` using
``add_indexer(name, indexer)``, and they should support the interface
defined by ``Indexer``, so that the ``RepoIndex`` can read, generate,
and update stored indices.
Generated indexes are accessed by name via ``__getitem__()``."""
def __init__(
self,
package_checker: FastPackageChecker,
namespace: str,
cache: spack.util.file_cache.FileCache,
):
self.checker = package_checker
self.packages_path = self.checker.packages_path
if sys.platform == "win32":
self.packages_path = spack.llnl.path.convert_to_posix_path(self.packages_path)
self.namespace = namespace
self.indexers: Dict[str, Indexer] = {}
self.indexes: Dict[str, Any] = {}
self.cache = cache
def add_indexer(self, name: str, indexer: Indexer):
"""Add an indexer to the repo index.
Arguments:
name: name of this indexer
indexer: object implementing the ``Indexer`` interface"""
self.indexers[name] = indexer
def __getitem__(self, name):
"""Get the index with the specified name, reindexing if needed."""
indexer = self.indexers.get(name)
if not indexer:
raise KeyError("no such index: %s" % name)
if name not in self.indexes:
self._build_all_indexes()
return self.indexes[name]
def _build_all_indexes(self):
"""Build all the indexes at once.
We regenerate *all* indexes whenever *any* index needs an update,
because the main bottleneck here is loading all the packages. It
can take tens of seconds to regenerate sequentially, and we'd
rather only pay that cost once rather than on several
invocations."""
for name, indexer in self.indexers.items():
self.indexes[name] = self._build_index(name, indexer)
def _build_index(self, name: str, indexer: Indexer):
"""Determine which packages need an update, and update indexes."""
# Filename of the provider index cache (we assume they're all json)
from spack.spec import SPECFILE_FORMAT_VERSION
cache_filename = f"{name}/{self.namespace}-specfile_v{SPECFILE_FORMAT_VERSION}-index.json"
# Compute which packages needs to be updated in the cache
index_mtime = self.cache.mtime(cache_filename)
needs_update = self.checker.modified_since(index_mtime)
index_existed = self.cache.init_entry(cache_filename)
if index_existed and not needs_update:
# If the index exists and doesn't need an update, read it
with self.cache.read_transaction(cache_filename) as f:
indexer.read(f)
else:
# Otherwise update it and rewrite the cache file
with self.cache.write_transaction(cache_filename) as (old, new):
indexer.read(old) if old else indexer.create()
# Compute which packages needs to be updated **again** in case someone updated them
# while we waited for the lock
new_index_mtime = self.cache.mtime(cache_filename)
if new_index_mtime != index_mtime:
needs_update = self.checker.modified_since(new_index_mtime)
for pkg_name in needs_update:
indexer.update(f"{self.namespace}.{pkg_name}")
indexer.write(new)
return indexer.index
| RepoIndex |
python | scikit-learn__scikit-learn | sklearn/ensemble/tests/test_stacking.py | {
"start": 10023,
"end": 33530
} | class ____(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.clf = DummyClassifier(strategy="stratified")
return self.clf.fit(X, y)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[
(y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("svm", SVC(max_iter=50_000)),
],
"stack_method": "predict_proba",
},
ValueError,
"does not implement the method predict_proba",
),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("cor", NoWeightClassifier()),
]
},
TypeError,
"does not support sample weight",
),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("cor", LinearSVC(max_iter=50_000)),
],
"final_estimator": NoWeightClassifier(),
},
TypeError,
"does not support sample weight",
),
],
)
def test_stacking_classifier_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
clf = StackingClassifier(**params, cv=3)
clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[
(y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
(
y_diabetes,
{"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]},
TypeError,
"does not support sample weight",
),
(
y_diabetes,
{
"estimators": [
("lr", LinearRegression()),
("cor", LinearSVR()),
],
"final_estimator": NoWeightRegressor(),
},
TypeError,
"does not support sample weight",
),
],
)
def test_stacking_regressor_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
reg = StackingRegressor(**params, cv=3)
reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0]))
@pytest.mark.parametrize(
"estimator, X, y",
[
(
StackingClassifier(
estimators=[
("first", LogisticRegression(random_state=0)),
("second", LinearSVC(random_state=0)),
]
),
X_iris[:100],
y_iris[:100],
), # keep only classes 0 and 1
(
StackingRegressor(
estimators=[
("first", Ridge(alpha=1.0)),
("second", Ridge(alpha=1e-6)),
]
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_randomness(estimator, X, y):
# checking that fixing the random state of the CV will lead to the same
# results
estimator_full = clone(estimator)
estimator_full.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
estimator_drop = clone(estimator)
estimator_drop.set_params(first="drop")
estimator_drop.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
assert_allclose(
estimator_full.fit(X, y).transform(X)[:, 1:],
estimator_drop.fit(X, y).transform(X),
)
def test_stacking_classifier_stratify_default():
# check that we stratify the classes for the default CV
clf = StackingClassifier(
estimators=[
("lr", LogisticRegression(max_iter=10_000)),
("svm", LinearSVC(max_iter=10_000)),
]
)
# since iris is not shuffled, a simple k-fold would not contain the
# 3 classes during training
clf.fit(X_iris, y_iris)
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC(random_state=42)),
],
final_estimator=LogisticRegression(),
cv=KFold(shuffle=True, random_state=42),
),
*load_breast_cancer(return_X_y=True),
),
(
StackingRegressor(
estimators=[
("first", Ridge(alpha=1.0)),
("second", Ridge(alpha=1e-6)),
],
final_estimator=LinearRegression(),
cv=KFold(shuffle=True, random_state=42),
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_with_sample_weight(stacker, X, y):
# check that sample weights has an influence on the fitting
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
n_half_samples = len(y) // 2
total_sample_weight = np.array(
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
)
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
X, y, total_sample_weight, random_state=42
)
stacker = clone(stacker)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train)
y_pred_no_weight = stacker.predict(X_test)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
y_pred_unit_weight = stacker.predict(X_test)
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred_biased = stacker.predict(X_test)
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invocations of fit
stacker = StackingClassifier(
estimators=[("lr", CheckingClassifier(expected_sample_weight=True))],
final_estimator=CheckingClassifier(expected_sample_weight=True),
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC(random_state=42)),
],
final_estimator=LogisticRegression(),
),
*load_breast_cancer(return_X_y=True),
),
(
StackingRegressor(
estimators=[
("ridge1", Ridge(alpha=1.0)),
("ridge2", Ridge(alpha=1e-6)),
],
final_estimator=LinearRegression(),
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_cv_influence(stacker, X, y):
# check that the stacking affects the fit of the final estimator but not
# the fit of the base estimators
# note: ConvergenceWarning are caught since we are not worrying about the
# convergence here
stacker_cv_3 = clone(stacker)
stacker_cv_5 = clone(stacker)
stacker_cv_3.set_params(cv=3)
stacker_cv_5.set_params(cv=5)
stacker_cv_3.fit(X, y)
stacker_cv_5.fit(X, y)
# the base estimators should be identical
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_):
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
# the final estimator should be different
with pytest.raises(AssertionError, match="Not equal"):
assert_allclose(
stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_
)
@pytest.mark.parametrize(
"Stacker, Estimator, stack_method, final_estimator, X, y",
[
(
StackingClassifier,
DummyClassifier,
"predict_proba",
LogisticRegression(random_state=42),
X_iris,
y_iris,
),
(
StackingRegressor,
DummyRegressor,
"predict",
LinearRegression(),
X_diabetes,
y_diabetes,
),
],
)
def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y):
"""Check the behaviour of stacking when `cv='prefit'`"""
X_train1, X_train2, y_train1, y_train2 = train_test_split(
X, y, random_state=42, test_size=0.5
)
estimators = [
("d0", Estimator().fit(X_train1, y_train1)),
("d1", Estimator().fit(X_train1, y_train1)),
]
# mock out fit and stack_method to be asserted later
for _, estimator in estimators:
estimator.fit = Mock(name="fit")
stack_func = getattr(estimator, stack_method)
predict_method_mocked = Mock(side_effect=stack_func)
# Mocking a method will not provide a `__name__` while Python methods
# do and we are using it in `_get_response_method`.
predict_method_mocked.__name__ = stack_method
setattr(estimator, stack_method, predict_method_mocked)
stacker = Stacker(
estimators=estimators, cv="prefit", final_estimator=final_estimator
)
stacker.fit(X_train2, y_train2)
assert stacker.estimators_ == [estimator for _, estimator in estimators]
# fit was not called again
assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_)
# stack method is called with the proper inputs
for estimator in stacker.estimators_:
stack_func_mock = getattr(estimator, stack_method)
stack_func_mock.assert_called_with(X_train2)
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[("lr", LogisticRegression()), ("svm", SVC())],
cv="prefit",
),
X_iris,
y_iris,
),
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR()),
],
cv="prefit",
),
X_diabetes,
y_diabetes,
),
],
)
def test_stacking_prefit_error(stacker, X, y):
# check that NotFittedError is raised
# if base estimators are not fitted when cv="prefit"
with pytest.raises(NotFittedError):
stacker.fit(X, y)
@pytest.mark.parametrize(
"make_dataset, Stacking, Estimator",
[
(make_classification, StackingClassifier, LogisticRegression),
(make_regression, StackingRegressor, LinearRegression),
],
)
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
# Stacking supports estimators without `n_features_in_`. Regression test
# for #17353
class MyEstimator(Estimator):
"""Estimator without n_features_in_"""
def fit(self, X, y):
super().fit(X, y)
del self.n_features_in_
X, y = make_dataset(random_state=0, n_samples=100)
stacker = Stacking(estimators=[("lr", MyEstimator())])
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
# Does not raise
stacker.fit(X, y)
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
@pytest.mark.parametrize(
"estimator",
[
# output a 2D array of the probability of the positive class for each output
MLPClassifier(random_state=42),
# output a list of 2D array containing the probability of each class
# for each output
RandomForestClassifier(random_state=42),
],
ids=["MLPClassifier", "RandomForestClassifier"],
)
def test_stacking_classifier_multilabel_predict_proba(estimator):
"""Check the behaviour for the multilabel classification case and the
`predict_proba` stacking method.
Estimators are not consistent with the output arrays and we need to ensure that
we handle all cases.
"""
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
n_outputs = 3
estimators = [("est", estimator)]
stacker = StackingClassifier(
estimators=estimators,
final_estimator=KNeighborsClassifier(),
stack_method="predict_proba",
).fit(X_train, y_train)
X_trans = stacker.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_outputs)
# we should not have any collinear classes and thus nothing should sum to 1
assert not any(np.isclose(X_trans.sum(axis=1), 1.0))
y_pred = stacker.predict(X_test)
assert y_pred.shape == y_test.shape
def test_stacking_classifier_multilabel_decision_function():
"""Check the behaviour for the multilabel classification case and the
`decision_function` stacking method. Only `RidgeClassifier` supports this
case.
"""
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
n_outputs = 3
estimators = [("est", RidgeClassifier())]
stacker = StackingClassifier(
estimators=estimators,
final_estimator=KNeighborsClassifier(),
stack_method="decision_function",
).fit(X_train, y_train)
X_trans = stacker.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_outputs)
y_pred = stacker.predict(X_test)
assert y_pred.shape == y_test.shape
@pytest.mark.parametrize("stack_method", ["auto", "predict"])
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough):
"""Check the behaviour for the multilabel classification case for stack methods
supported for all estimators or automatically picked up.
"""
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
y_train_before_fit = y_train.copy()
n_outputs = 3
estimators = [
("mlp", MLPClassifier(random_state=42)),
("rf", RandomForestClassifier(random_state=42)),
("ridge", RidgeClassifier()),
]
final_estimator = KNeighborsClassifier()
clf = StackingClassifier(
estimators=estimators,
final_estimator=final_estimator,
passthrough=passthrough,
stack_method=stack_method,
).fit(X_train, y_train)
# make sure we don't change `y_train` inplace
assert_array_equal(y_train_before_fit, y_train)
y_pred = clf.predict(X_test)
assert y_pred.shape == y_test.shape
if stack_method == "auto":
expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"]
else:
expected_stack_methods = ["predict"] * len(estimators)
assert clf.stack_method_ == expected_stack_methods
n_features_X_trans = n_outputs * len(estimators)
if passthrough:
n_features_X_trans += X_train.shape[1]
X_trans = clf.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_features_X_trans)
assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs)
@pytest.mark.parametrize(
"stacker, feature_names, X, y, expected_names",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("svm", LinearSVC(random_state=0)),
]
),
iris.feature_names,
X_iris,
y_iris,
[
"stackingclassifier_lr0",
"stackingclassifier_lr1",
"stackingclassifier_lr2",
"stackingclassifier_svm0",
"stackingclassifier_svm1",
"stackingclassifier_svm2",
],
),
(
StackingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("other", "drop"),
("svm", LinearSVC(random_state=0)),
]
),
iris.feature_names,
X_iris[:100],
y_iris[:100], # keep only classes 0 and 1
[
"stackingclassifier_lr",
"stackingclassifier_svm",
],
),
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR(random_state=0)),
]
),
diabetes.feature_names,
X_diabetes,
y_diabetes,
[
"stackingregressor_lr",
"stackingregressor_svm",
],
),
],
ids=[
"StackingClassifier_multiclass",
"StackingClassifier_binary",
"StackingRegressor",
],
)
@pytest.mark.parametrize("passthrough", [True, False])
def test_get_feature_names_out(
stacker, feature_names, X, y, expected_names, passthrough
):
"""Check get_feature_names_out works for stacking."""
stacker = clone(stacker)
stacker.set_params(passthrough=passthrough)
stacker.fit(scale(X), y)
if passthrough:
expected_names = np.concatenate((expected_names, feature_names))
names_out = stacker.get_feature_names_out(feature_names)
assert_array_equal(names_out, expected_names)
def test_stacking_classifier_base_regressor():
"""Check that a regressor can be used as the first layer in `StackingClassifier`."""
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
clf = StackingClassifier(estimators=[("ridge", Ridge())])
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
def test_stacking_final_estimator_attribute_error():
"""Check that we raise the proper AttributeError when the final estimator
does not implement the `decision_function` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
X, y = make_classification(random_state=42)
estimators = [
("lr", LogisticRegression()),
("rf", RandomForestClassifier(n_estimators=2, random_state=42)),
]
# RandomForestClassifier does not implement 'decision_function' and should raise
# an AttributeError
final_estimator = RandomForestClassifier(n_estimators=2, random_state=42)
clf = StackingClassifier(
estimators=estimators, final_estimator=final_estimator, cv=3
)
outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'"
inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
clf.fit(X, y).decision_function(X)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
# Metadata Routing Tests
# ======================
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
def test_routing_passed_metadata_not_supported(Estimator, Child):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
Estimator(["clf", Child()]).fit(
X_iris, y_iris, sample_weight=[1, 1, 1, 1, 1], metadata="a"
)
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
@config_context(enable_metadata_routing=True)
def test_get_metadata_routing_without_fit(Estimator, Child):
# Test that metadata_routing() doesn't raise when called before fit.
est = Estimator([("sub_est", Child())])
est.get_metadata_routing()
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
@pytest.mark.parametrize(
"prop, prop_value", [("sample_weight", np.ones(X_iris.shape[0])), ("metadata", "a")]
)
@config_context(enable_metadata_routing=True)
def test_metadata_routing_for_stacking_estimators(Estimator, Child, prop, prop_value):
"""Test that metadata is routed correctly for Stacking*."""
est = Estimator(
[
(
"sub_est1",
Child(registry=_Registry()).set_fit_request(**{prop: True}),
),
(
"sub_est2",
Child(registry=_Registry()).set_fit_request(**{prop: True}),
),
],
final_estimator=Child(registry=_Registry()).set_predict_request(**{prop: True}),
)
est.fit(X_iris, y_iris, **{prop: prop_value})
est.fit_transform(X_iris, y_iris, **{prop: prop_value})
est.predict(X_iris, **{prop: prop_value})
for estimator in est.estimators:
# access sub-estimator in (name, est) with estimator[1]:
registry = estimator[1].registry
assert len(registry)
for sub_est in registry:
check_recorded_metadata(
obj=sub_est,
method="fit",
parent="fit",
split_params=(prop),
**{prop: prop_value},
)
# access final_estimator:
registry = est.final_estimator_.registry
assert len(registry)
check_recorded_metadata(
obj=registry[-1],
method="predict",
parent="predict",
split_params=(prop),
**{prop: prop_value},
)
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
@config_context(enable_metadata_routing=True)
def test_metadata_routing_error_for_stacking_estimators(Estimator, Child):
"""Test that the right error is raised when metadata is not requested."""
sample_weight, metadata = np.ones(X_iris.shape[0]), "a"
est = Estimator([("sub_est", Child())])
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested"
f" or not requested for {Child.__name__}.fit"
)
with pytest.raises(ValueError, match=re.escape(error_message)):
est.fit(X_iris, y_iris, sample_weight=sample_weight, metadata=metadata)
# End of Metadata Routing Tests
# =============================
| NoWeightClassifier |
python | pandas-dev__pandas | pandas/core/dtypes/dtypes.py | {
"start": 54438,
"end": 68913
} | class ____(ExtensionDtype):
"""
Dtype for data stored in :class:`SparseArray`.
``SparseDtype`` is used as the data type for :class:`SparseArray`, enabling
more efficient storage of data that contains a significant number of
repetitive values typically represented by a fill value. It supports any
scalar dtype as the underlying data type of the non-fill values.
Parameters
----------
dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64
The dtype of the underlying array storing the non-fill value values.
fill_value : scalar, optional
The scalar value not stored in the SparseArray. By default, this
depends on ``dtype``.
=========== ==========
dtype na_value
=========== ==========
float ``np.nan``
complex ``np.nan``
int ``0``
bool ``False``
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The default value may be overridden by specifying a ``fill_value``.
Attributes
----------
None
Methods
-------
None
See Also
--------
arrays.SparseArray : The array structure that uses SparseDtype
for data representation.
Examples
--------
>>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0))
>>> ser
0 1
1 0
2 0
dtype: Sparse[int64, 0]
>>> ser.sparse.density
0.3333333333333333
"""
_is_immutable = True
# We include `_is_na_fill_value` in the metadata to avoid hash collisions
# between SparseDtype(float, 0.0) and SparseDtype(float, nan).
# Without is_na_fill_value in the comparison, those would be equal since
# hash(nan) is (sometimes?) 0.
_metadata = ("_dtype", "_fill_value", "_is_na_fill_value")
def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None:
if isinstance(dtype, type(self)):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
from pandas.core.dtypes.common import (
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.missing import na_value_for_dtype
dtype = pandas_dtype(dtype)
if is_string_dtype(dtype):
dtype = np.dtype("object")
if not isinstance(dtype, np.dtype):
# GH#53160
raise TypeError("SparseDtype subtype must be a numpy dtype")
if fill_value is None:
fill_value = na_value_for_dtype(dtype)
self._dtype = dtype
self._fill_value = fill_value
self._check_fill_value()
def __hash__(self) -> int:
# Python3 doesn't inherit __hash__ when a base class overrides
# __eq__, so we explicitly do it here.
return super().__hash__()
def __eq__(self, other: object) -> bool:
# We have to override __eq__ to handle NA values in _metadata.
# The base class does simple == checks, which fail for NA.
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
subtype = self.subtype == other.subtype
if self._is_na_fill_value or other._is_na_fill_value:
# this case is complicated by two things:
# SparseDtype(float, float(nan)) == SparseDtype(float, np.nan)
# SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT)
# i.e. we want to treat any floating-point NaN as equal, but
# not a floating-point NaN and a datetime NaT.
fill_value = isinstance(
self.fill_value, type(other.fill_value)
) or isinstance(other.fill_value, type(self.fill_value))
else:
with warnings.catch_warnings():
# Ignore spurious numpy warning
warnings.filterwarnings(
"ignore",
"elementwise comparison failed",
category=DeprecationWarning,
)
fill_value = self.fill_value == other.fill_value
return subtype and fill_value
return False
@property
def fill_value(self):
"""
The fill value of the array.
Converting the SparseArray to a dense ndarray will fill the
array with this value.
.. warning::
It's possible to end up with a SparseArray that has ``fill_value``
values in ``sp_values``. This can occur, for example, when setting
``SparseArray.fill_value`` directly.
"""
return self._fill_value
def _check_fill_value(self) -> None:
if not lib.is_scalar(self._fill_value):
raise ValueError(
f"fill_value must be a scalar. Got {self._fill_value} instead"
)
from pandas.core.dtypes.cast import can_hold_element
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
)
from pandas.core.construction import ensure_wrapped_if_datetimelike
# GH#23124 require fill_value and subtype to match
val = self._fill_value
if isna(val):
if not is_valid_na_for_dtype(val, self.subtype):
raise ValueError(
# GH#53043
"fill_value must be a valid value for the SparseDtype.subtype"
)
else:
dummy = np.empty(0, dtype=self.subtype)
dummy = ensure_wrapped_if_datetimelike(dummy)
if not can_hold_element(dummy, val):
raise ValueError(
# GH#53043
"fill_value must be a valid value for the SparseDtype.subtype"
)
@property
def _is_na_fill_value(self) -> bool:
from pandas import isna
return isna(self.fill_value)
@property
def _is_numeric(self) -> bool:
return not self.subtype == object
@property
def _is_boolean(self) -> bool:
return self.subtype.kind == "b"
@property
def kind(self) -> str:
"""
The sparse kind. Either 'integer', or 'block'.
"""
return self.subtype.kind
@property
def type(self):
return self.subtype.type
@property
def subtype(self):
return self._dtype
@property
def name(self) -> str:
return f"Sparse[{self.subtype.name}, {self.fill_value!r}]"
def __repr__(self) -> str:
return self.name
def construct_array_type(self) -> type_t[SparseArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.sparse.array import SparseArray
return SparseArray
@classmethod
def construct_from_string(cls, string: str) -> SparseDtype:
"""
Construct a SparseDtype from a string form.
Parameters
----------
string : str
Can take the following forms.
string dtype
================ ============================
'int' SparseDtype[np.int64, 0]
'Sparse' SparseDtype[np.float64, nan]
'Sparse[int]' SparseDtype[np.int64, 0]
'Sparse[int, 0]' SparseDtype[np.int64, 0]
================ ============================
It is not possible to specify non-default fill values
with a string. An argument like ``'Sparse[int, 1]'``
will raise a ``TypeError`` because the default fill value
for integers is 0.
Returns
-------
SparseDtype
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
msg = f"Cannot construct a 'SparseDtype' from '{string}'"
if string.startswith("Sparse"):
try:
sub_type, has_fill_value = cls._parse_subtype(string)
except ValueError as err:
raise TypeError(msg) from err
else:
result = SparseDtype(sub_type)
msg = (
f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt "
"looks like the fill_value in the string is not "
"the default for the dtype. Non-default fill_values "
"are not supported. Use the 'SparseDtype()' "
"constructor instead."
)
if has_fill_value and str(result) != string:
raise TypeError(msg)
return result
else:
raise TypeError(msg)
@staticmethod
def _parse_subtype(dtype: str) -> tuple[str, bool]:
"""
Parse a string to get the subtype
Parameters
----------
dtype : str
A string like
* Sparse[subtype]
* Sparse[subtype, fill_value]
Returns
-------
subtype : str
Raises
------
ValueError
When the subtype cannot be extracted.
"""
xpr = re.compile(r"Sparse\[(?P<subtype>[^,]*)(, )?(?P<fill_value>.*?)?\]$")
m = xpr.match(dtype)
has_fill_value = False
if m:
subtype = m.groupdict()["subtype"]
has_fill_value = bool(m.groupdict()["fill_value"])
elif dtype == "Sparse":
subtype = "float64"
else:
raise ValueError(f"Cannot parse {dtype}")
return subtype, has_fill_value
@classmethod
def is_dtype(cls, dtype: object) -> bool:
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, str) and dtype.startswith("Sparse"):
sub_type, _ = cls._parse_subtype(dtype)
dtype = np.dtype(sub_type)
elif isinstance(dtype, cls):
return True
return isinstance(dtype, np.dtype) or dtype == "Sparse"
def update_dtype(self, dtype) -> SparseDtype:
"""
Convert the SparseDtype to a new dtype.
This takes care of converting the ``fill_value``.
Parameters
----------
dtype : Union[str, numpy.dtype, SparseDtype]
The new dtype to use.
* For a SparseDtype, it is simply returned
* For a NumPy dtype (or str), the current fill value
is converted to the new dtype, and a SparseDtype
with `dtype` and the new fill value is returned.
Returns
-------
SparseDtype
A new SparseDtype with the correct `dtype` and fill value
for that `dtype`.
Raises
------
ValueError
When the current fill value cannot be converted to the
new `dtype` (e.g. trying to convert ``np.nan`` to an
integer dtype).
Examples
--------
>>> SparseDtype(int, 0).update_dtype(float)
Sparse[float64, 0.0]
>>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan))
Sparse[float64, nan]
"""
from pandas.core.dtypes.astype import astype_array
from pandas.core.dtypes.common import pandas_dtype
cls = type(self)
dtype = pandas_dtype(dtype)
if not isinstance(dtype, cls):
if not isinstance(dtype, np.dtype):
raise TypeError("sparse arrays of extension dtypes not supported")
fv_asarray = np.atleast_1d(np.array(self.fill_value))
fvarr = astype_array(fv_asarray, dtype)
# NB: not fv_0d.item(), as that casts dt64->int
fill_value = fvarr[0]
dtype = cls(dtype, fill_value=fill_value)
return dtype
@property
def _subtype_with_str(self):
"""
Whether the SparseDtype's subtype should be considered ``str``.
Typically, pandas will store string data in an object-dtype array.
When converting values to a dtype, e.g. in ``.astype``, we need to
be more specific, we need the actual underlying type.
Returns
-------
>>> SparseDtype(int, 1)._subtype_with_str
dtype('int64')
>>> SparseDtype(object, 1)._subtype_with_str
dtype('O')
>>> dtype = SparseDtype(str, "")
>>> dtype.subtype
dtype('O')
>>> dtype._subtype_with_str
<class 'str'>
"""
if isinstance(self.fill_value, str):
return type(self.fill_value)
return self.subtype
def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None:
# TODO for now only handle SparseDtypes and numpy dtypes => extend
# with other compatible extension dtypes
from pandas.core.dtypes.cast import np_find_common_type
if any(
isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype)
for x in dtypes
):
return None
fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)]
fill_value = fill_values[0]
from pandas import isna
# np.nan isn't a singleton, so we may end up with multiple
# NaNs here, so we ignore the all NA case too.
if get_option("performance_warnings") and (
not (len(set(fill_values)) == 1 or isna(fill_values).all())
):
warnings.warn(
"Concatenating sparse arrays with multiple fill "
f"values: '{fill_values}'. Picking the first and "
"converting the rest.",
PerformanceWarning,
stacklevel=find_stack_level(),
)
np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes)
# error: Argument 1 to "np_find_common_type" has incompatible type
# "*Generator[Any | dtype[Any] | ExtensionDtype, None, None]";
# expected "dtype[Any]" [arg-type]
return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value) # type: ignore [arg-type]
@register_extension_dtype
@set_module("pandas")
| SparseDtype |
python | RaRe-Technologies__gensim | gensim/interfaces.py | {
"start": 4896,
"end": 7428
} | class ____(CorpusABC):
"""Interface for corpora that are the result of an online (streamed) transformation."""
def __init__(self, obj, corpus, chunksize=None, **kwargs):
"""
Parameters
----------
obj : object
A transformation :class:`~gensim.interfaces.TransformationABC` object that will be applied
to each document from `corpus` during iteration.
corpus : iterable of list of (int, number)
Corpus in bag-of-words format.
chunksize : int, optional
If provided, a slightly more effective processing will be performed by grouping documents from `corpus`.
"""
self.obj, self.corpus, self.chunksize = obj, corpus, chunksize
# add the new parameters like per_word_topics to base class object of LdaModel
for key, value in kwargs.items():
setattr(self.obj, key, value)
self.metadata = False
def __len__(self):
"""Get corpus size."""
return len(self.corpus)
def __iter__(self):
"""Iterate over the corpus, applying the selected transformation.
If `chunksize` was set in the constructor, works in "batch-manner" (more efficient).
Yields
------
list of (int, number)
Documents in the sparse Gensim bag-of-words format.
"""
if self.chunksize:
for chunk in utils.grouper(self.corpus, self.chunksize):
for transformed in self.obj.__getitem__(chunk, chunksize=None):
yield transformed
else:
for doc in self.corpus:
yield self.obj[doc]
def __getitem__(self, docno):
"""Transform the document at position `docno` within `corpus` specified in the constructor.
Parameters
----------
docno : int
Position of the document to transform. Document offset inside `self.corpus`.
Notes
-----
`self.corpus` must support random indexing.
Returns
-------
list of (int, number)
Transformed document in the sparse Gensim bag-of-words format.
Raises
------
RuntimeError
If corpus doesn't support index slicing (`__getitem__` doesn't exists).
"""
if hasattr(self.corpus, '__getitem__'):
return self.obj[self.corpus[docno]]
else:
raise RuntimeError('Type {} does not support slicing.'.format(type(self.corpus)))
| TransformedCorpus |
python | viewflow__viewflow | tests/json/test_json__json.py | {
"start": 95,
"end": 240
} | class ____(models.Model):
data = models.JSONField(default=dict)
json_field = jsonstore.JSONField(max_length=250, blank=True)
| JsonFieldModel |
python | django__django | tests/backends/mysql/test_creation.py | {
"start": 424,
"end": 6423
} | class ____(SimpleTestCase):
def _execute_raise_database_exists(self, cursor, parameters, keepdb=False):
raise DatabaseError(
1007, "Can't create database '%s'; database exists" % parameters["dbname"]
)
def _execute_raise_access_denied(self, cursor, parameters, keepdb=False):
raise DatabaseError(1044, "Access denied for user")
def patch_test_db_creation(self, execute_create_test_db):
return mock.patch.object(
BaseDatabaseCreation, "_execute_create_test_db", execute_create_test_db
)
@mock.patch("sys.stdout", new_callable=StringIO)
@mock.patch("sys.stderr", new_callable=StringIO)
def test_create_test_db_database_exists(self, *mocked_objects):
# Simulate test database creation raising "database exists"
creation = DatabaseCreation(connection)
with self.patch_test_db_creation(self._execute_raise_database_exists):
with mock.patch("builtins.input", return_value="no"):
with self.assertRaises(SystemExit):
# SystemExit is raised if the user answers "no" to the
# prompt asking if it's okay to delete the test database.
creation._create_test_db(
verbosity=0, autoclobber=False, keepdb=False
)
# "Database exists" shouldn't appear when keepdb is on
creation._create_test_db(verbosity=0, autoclobber=False, keepdb=True)
@mock.patch("sys.stdout", new_callable=StringIO)
@mock.patch("sys.stderr", new_callable=StringIO)
def test_create_test_db_unexpected_error(self, *mocked_objects):
# Simulate test database creation raising unexpected error
creation = DatabaseCreation(connection)
with self.patch_test_db_creation(self._execute_raise_access_denied):
with self.assertRaises(SystemExit):
creation._create_test_db(verbosity=0, autoclobber=False, keepdb=False)
def test_clone_test_db_database_exists(self):
creation = DatabaseCreation(connection)
with self.patch_test_db_creation(self._execute_raise_database_exists):
with mock.patch.object(DatabaseCreation, "_clone_db") as _clone_db:
creation._clone_test_db("suffix", verbosity=0, keepdb=True)
_clone_db.assert_not_called()
def test_clone_test_db_options_ordering(self):
creation = DatabaseCreation(connection)
mock_subprocess_call = mock.MagicMock()
mock_subprocess_call.returncode = 0
try:
saved_settings = connection.settings_dict
connection.settings_dict = {
"NAME": "source_db",
"USER": "",
"PASSWORD": "",
"PORT": "",
"HOST": "",
"ENGINE": "django.db.backends.mysql",
"OPTIONS": {
"read_default_file": "my.cnf",
},
}
with mock.patch.object(subprocess, "Popen") as mocked_popen:
mocked_popen.return_value.__enter__.return_value = mock_subprocess_call
creation._clone_db("source_db", "target_db")
mocked_popen.assert_has_calls(
[
mock.call(
[
"mysqldump",
"--defaults-file=my.cnf",
"--routines",
"--events",
"source_db",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=None,
),
]
)
finally:
connection.settings_dict = saved_settings
def test_clone_test_db_subprocess_mysqldump_error(self):
creation = DatabaseCreation(connection)
mock_subprocess_call = mock.MagicMock()
mock_subprocess_call.returncode = 0
# Simulate mysqldump in test database cloning raises an error.
msg = "Couldn't execute 'SELECT ...'"
mock_subprocess_call_error = mock.MagicMock()
mock_subprocess_call_error.returncode = 2
mock_subprocess_call_error.stderr = BytesIO(msg.encode())
with mock.patch.object(subprocess, "Popen") as mocked_popen:
mocked_popen.return_value.__enter__.side_effect = [
mock_subprocess_call_error, # mysqldump mock
mock_subprocess_call, # load mock
]
with captured_stderr() as err, self.assertRaises(SystemExit) as cm:
creation._clone_db("source_db", "target_db")
self.assertEqual(cm.exception.code, 2)
self.assertIn(
f"Got an error on mysqldump when cloning the test database: {msg}",
err.getvalue(),
)
def test_clone_test_db_subprocess_mysql_error(self):
creation = DatabaseCreation(connection)
mock_subprocess_call = mock.MagicMock()
mock_subprocess_call.returncode = 0
# Simulate load in test database cloning raises an error.
msg = "Some error"
mock_subprocess_call_error = mock.MagicMock()
mock_subprocess_call_error.returncode = 3
mock_subprocess_call_error.stderr = BytesIO(msg.encode())
with mock.patch.object(subprocess, "Popen") as mocked_popen:
mocked_popen.return_value.__enter__.side_effect = [
mock_subprocess_call, # mysqldump mock
mock_subprocess_call_error, # load mock
]
with captured_stderr() as err, self.assertRaises(SystemExit) as cm:
creation._clone_db("source_db", "target_db")
self.assertEqual(cm.exception.code, 3)
self.assertIn(f"Got an error cloning the test database: {msg}", err.getvalue())
| DatabaseCreationTests |
python | doocs__leetcode | solution/2600-2699/2673.Make Costs of Paths Equal in a Binary Tree/Solution.py | {
"start": 0,
"end": 296
} | class ____:
def minIncrements(self, n: int, cost: List[int]) -> int:
ans = 0
for i in range(n >> 1, 0, -1):
l, r = i << 1, i << 1 | 1
ans += abs(cost[l - 1] - cost[r - 1])
cost[i - 1] += max(cost[l - 1], cost[r - 1])
return ans
| Solution |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 20015,
"end": 20124
} | class ____(CythonParameter):
"""
Tell cygdb whether to colorize source code.
"""
| ColorizeSourceCode |
python | django__django | tests/aggregation_regress/models.py | {
"start": 335,
"end": 451
} | class ____(models.Model):
name = models.CharField(max_length=255)
num_awards = models.IntegerField()
| Publisher |
python | has2k1__plotnine | plotnine/themes/elements/element_blank.py | {
"start": 41,
"end": 187
} | class ____(element_base):
"""
Theme element: Blank
"""
def __init__(self):
self.properties = {"visible": False}
| element_blank |
python | tiangolo__fastapi | docs_src/sql_databases/tutorial002.py | {
"start": 284,
"end": 406
} | class ____(HeroBase, table=True):
id: Union[int, None] = Field(default=None, primary_key=True)
secret_name: str
| Hero |
python | scipy__scipy | scipy/sparse/_dia.py | {
"start": 21466,
"end": 23643
} | class ____(spmatrix, _dia_base):
"""
Sparse matrix with DIAgonal storage.
This can be instantiated in several ways:
dia_matrix(D)
where D is a 2-D ndarray
dia_matrix(S)
with another sparse array or matrix S (equivalent to S.todia())
dia_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_matrix((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
size
data
DIA format data array of the matrix
offsets
DIA format offset array of the matrix
T
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Sparse matrices with DIAgonal storage do not support slicing.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_matrix
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
>>> from scipy.sparse import dia_matrix
>>> n = 10
>>> ex = np.ones(n)
>>> data = np.array([ex, 2 * ex, ex])
>>> offsets = np.array([-1, 0, 1])
>>> dia_matrix((data, offsets), shape=(n, n)).toarray()
array([[2., 1., 0., ..., 0., 0., 0.],
[1., 2., 1., ..., 0., 0., 0.],
[0., 1., 2., ..., 0., 0., 0.],
...,
[0., 0., 0., ..., 2., 1., 0.],
[0., 0., 0., ..., 1., 2., 1.],
[0., 0., 0., ..., 0., 1., 2.]])
"""
| dia_matrix |
python | lxml__lxml | doc/s5/ep2008/atom.py | {
"start": 18037,
"end": 18124
} | class ____(APPElement):
workspaces = _findall_property('workspace', ns=app_ns)
| Service |
python | instagram__MonkeyType | monkeytype/stubs.py | {
"start": 1944,
"end": 2560
} | class ____(enum.Enum):
"""Strategies for handling existing annotations in the source."""
# Attempt to replicate existing source annotations in the stub. Useful for
# generating complete-looking stubs for inspection.
REPLICATE = 0
# Ignore existing annotations entirely and generate a stub purely from trace
# data. Probably won't apply cleanly, but useful for comparison purposes.
IGNORE = 1
# Generate a stub that omits annotations anywhere the existing source has
# them. Maximizes likelihood that the stub will cleanly apply using retype.
OMIT = 2
| ExistingAnnotationStrategy |
python | scipy__scipy | scipy/optimize/tests/test_trustregion_exact.py | {
"start": 1221,
"end": 1968
} | class ____:
def test_for_ill_condiotioned_matrix(self):
# Ill-conditioned triangular matrix
C = np.array([[1, 2, 3, 4],
[0, 0.05, 60, 7],
[0, 0, 0.8, 9],
[0, 0, 0, 10]])
# Get svd decomposition
U, s, Vt = svd(C)
# Get smallest singular value and correspondent right singular vector.
smin_svd = s[-1]
zmin_svd = Vt[-1, :]
# Estimate smallest singular value
smin, zmin = estimate_smallest_singular_value(C)
# Check the estimation
assert_array_almost_equal(smin, smin_svd, decimal=8)
assert_array_almost_equal(abs(zmin), abs(zmin_svd), decimal=8)
| TestEstimateSmallestSingularValue |
python | PyCQA__pylint | examples/custom.py | {
"start": 249,
"end": 2307
} | class ____(BaseChecker):
"""Add class member attributes to the class local's dictionary."""
# The name defines a custom section of the config for this checker.
name = "custom"
# This class variable declares the messages (i.e. the warnings and errors)
# that the checker can emit.
msgs = {
# Each message has a code, a message that the user will see,
# a unique symbol that identifies the message,
# and a detailed help message
# that will be included in the documentation.
"W0001": ("Message that will be emitted", "message-symbol", "Message help")
}
# This class variable declares the options
# that are configurable by the user.
options = (
# Each option definition has a name which is used on the command line
# and in config files, and a dictionary of arguments
# (similar to argparse.ArgumentParser.add_argument).
(
"store-locals-indicator",
{
"default": "properties",
"help": (
"The expression name that indicates that the locals should "
"be stored"
),
},
),
)
def visit_call(self, node: nodes.Call) -> None:
"""Called when a :class:`.nodes.Call` node is visited.
See :mod:`astroid` for the description of available nodes.
"""
if not (
isinstance(node.func, nodes.Attribute)
and isinstance(node.func.expr, nodes.Name)
and node.func.expr.name == self.linter.config.store_locals_indicator
and node.func.attrname == "create"
):
return
in_class = node.frame()
for param in node.args:
in_class.locals[param.name] = node
def register(linter: PyLinter) -> None:
"""This required method auto registers the checker during initialization.
:param linter: The linter to register the checker to.
"""
linter.register_checker(MyAstroidChecker(linter))
| MyAstroidChecker |
python | catalyst-team__catalyst | catalyst/callbacks/metric.py | {
"start": 6998,
"end": 9129
} | class ____(BatchMetricCallback):
"""FunctionalBatchMetricCallback implements batch-based metrics update
and computation over loader for ``FunctionalBatchMetric`` metrics.
Args:
metric: metric to calculate in callback
input_key: keys of tensors that should be used as inputs in metric calculation
target_key: keys of tensors that should be used as targets in metric calculation
log_on_batch: boolean flag to log computed metrics every batch
.. note::
The main difference from BatchMetricCallback:
FunctionalBatchMetricCallback also propagates current ``batch_size``
to the FunctionalBatchMetric for correct metric computation.
"""
def __init__(
self,
metric: FunctionalBatchMetric,
input_key: Union[str, Iterable[str], Dict[str, str]],
target_key: Union[str, Iterable[str], Dict[str, str]],
log_on_batch: bool = True,
) -> None:
"""Init."""
assert isinstance(metric, FunctionalBatchMetric)
super().__init__(
metric=metric,
input_key=input_key,
target_key=target_key,
log_on_batch=log_on_batch,
)
def _get_value_inputs(
self, runner: "IRunner"
) -> Tuple[float, torch.Tensor, torch.Tensor]:
"""Get data from batch in value input case
Args:
runner: current runner
Returns:
tuple of tensor of inputs and tensor of targets
"""
return (
runner.batch_size,
runner.batch[self.input_key],
runner.batch[self.target_key],
)
def _get_key_value_inputs(self, runner: "IRunner") -> Dict[str, torch.Tensor]:
"""Get data from batch in key-value input case
Args:
runner: current runner
Returns:
dict of inputs and targets tensors
"""
kv_inputs = {}
for key in self._keys:
kv_inputs[self._keys[key]] = runner.batch[key]
kv_inputs["batch_size"] = runner.batch_size
return kv_inputs
| FunctionalBatchMetricCallback |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_handler_registry/base.py | {
"start": 626,
"end": 2353
} | class ____(IntegrationActionHandler, ABC):
config_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "The configuration schema for a Ticketing Action",
"type": "object",
"properties": {
"target_identifier": {
"type": ["null"],
},
"target_display": {
"type": ["null"],
},
"target_type": {
"type": ["integer"],
"enum": [ActionTarget.SPECIFIC.value],
},
},
}
data_schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"type": "object",
"description": "Schema for ticket creation action data blob",
"properties": {
"dynamic_form_fields": {
"type": "array",
"description": "Dynamic form fields from customer configuration",
"items": {"type": "object"},
"default": [],
},
"additional_fields": {
"type": "object",
"description": "Additional fields that aren't part of standard fields",
"additionalProperties": True,
"default": {},
},
},
"additionalProperties": False,
}
@staticmethod
def get_config_transformer() -> ConfigTransformer | None:
return TargetTypeConfigTransformer.from_config_schema(TicketingActionHandler.config_schema)
@staticmethod
def execute(
job: WorkflowEventData,
action: Action,
detector: Detector,
) -> None:
execute_via_issue_alert_handler(job, action, detector)
| TicketingActionHandler |
python | gevent__gevent | src/greentest/3.10/test_selectors.py | {
"start": 18421,
"end": 18707
} | class ____(BaseSelectorTestCase, ScalableSelectorMixIn,
unittest.TestCase):
SELECTOR = getattr(selectors, 'DevpollSelector', None)
def tearDownModule():
support.reap_children()
if __name__ == "__main__":
unittest.main()
| DevpollSelectorTestCase |
python | rapidsai__cudf | python/cudf/cudf/core/tools/datetimes.py | {
"start": 14759,
"end": 14961
} | class ____:
def _maybe_as_fast_pandas_offset(self):
return pd._libs.tslibs.offsets.MonthEnd()
def __eq__(self, other):
return self._maybe_as_fast_pandas_offset() == other
| MonthEnd |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_web_fetch_tool_result_block.py | {
"start": 469,
"end": 602
} | class ____(BaseModel):
content: Content
tool_use_id: str
type: Literal["web_fetch_tool_result"]
| BetaWebFetchToolResultBlock |
python | pytest-dev__pytest | testing/test_config.py | {
"start": 96076,
"end": 102462
} | class ____:
"""Test native TOML configuration parsing."""
def test_values(self, pytester: Pytester) -> None:
"""Test that values are parsed as expected in TOML mode."""
pytester.makepyprojecttoml(
"""
[tool.pytest]
test_bool = true
test_int = 5
test_float = 30.5
test_args = ["tests", "integration"]
test_paths = ["src", "lib"]
"""
)
pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_bool", "Test boolean config", type="bool", default=False)
parser.addini("test_int", "Test integer config", type="int", default=0)
parser.addini("test_float", "Test float config", type="float", default=0.0)
parser.addini("test_args", "Test args config", type="args")
parser.addini("test_paths", "Test paths config", type="paths")
"""
)
config = pytester.parseconfig()
assert config.getini("test_bool") is True
assert config.getini("test_int") == 5
assert config.getini("test_float") == 30.5
assert config.getini("test_args") == ["tests", "integration"]
paths = config.getini("test_paths")
assert len(paths) == 2
# Paths should be resolved relative to pyproject.toml location.
assert all(isinstance(p, Path) for p in paths)
def test_override_with_list(self, pytester: Pytester) -> None:
"""Test that -o overrides work with INI-style list syntax even when
config uses TOML mode."""
pytester.makepyprojecttoml(
"""
[tool.pytest]
test_override_list = ["tests"]
"""
)
pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_override_list", "Test override list", type="args")
"""
)
# -o uses INI mode, so uses space-separated syntax.
config = pytester.parseconfig("-o", "test_override_list=tests integration")
assert config.getini("test_override_list") == ["tests", "integration"]
def test_conflict_between_native_and_ini_options(self, pytester: Pytester) -> None:
"""Test that using both [tool.pytest] and [tool.pytest.ini_options] fails."""
pytester.makepyprojecttoml(
"""
[tool.pytest]
test_conflict_1 = true
[tool.pytest.ini_options]
test_conflict_2 = true
""",
)
pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("test_conflict_1", "Test conflict config 1", type="bool")
parser.addini("test_conflict_2", "Test conflict config 2", type="bool")
"""
)
with pytest.raises(UsageError, match="Cannot use both"):
pytester.parseconfig()
def test_type_errors(self, pytester: Pytester) -> None:
"""Test all possible TypeError cases in getini."""
pytester.maketoml(
"""
[pytest]
paths_not_list = "should_be_list"
paths_list_with_int = [1, 2]
args_not_list = 123
args_list_with_int = ["valid", 456]
linelist_not_list = true
linelist_list_with_bool = ["valid", false]
bool_not_bool = "true"
int_not_int = "123"
int_is_bool = true
float_not_float = "3.14"
float_is_bool = false
string_not_string = 123
"""
)
pytester.makeconftest(
"""
def pytest_addoption(parser):
parser.addini("paths_not_list", "test", type="paths")
parser.addini("paths_list_with_int", "test", type="paths")
parser.addini("args_not_list", "test", type="args")
parser.addini("args_list_with_int", "test", type="args")
parser.addini("linelist_not_list", "test", type="linelist")
parser.addini("linelist_list_with_bool", "test", type="linelist")
parser.addini("bool_not_bool", "test", type="bool")
parser.addini("int_not_int", "test", type="int")
parser.addini("int_is_bool", "test", type="int")
parser.addini("float_not_float", "test", type="float")
parser.addini("float_is_bool", "test", type="float")
parser.addini("string_not_string", "test", type="string")
"""
)
config = pytester.parseconfig()
with pytest.raises(
TypeError, match=r"expects a list for type 'paths'.*got str"
):
config.getini("paths_not_list")
with pytest.raises(
TypeError, match=r"expects a list of strings.*item at index 0 is int"
):
config.getini("paths_list_with_int")
with pytest.raises(TypeError, match=r"expects a list for type 'args'.*got int"):
config.getini("args_not_list")
with pytest.raises(
TypeError, match=r"expects a list of strings.*item at index 1 is int"
):
config.getini("args_list_with_int")
with pytest.raises(
TypeError, match=r"expects a list for type 'linelist'.*got bool"
):
config.getini("linelist_not_list")
with pytest.raises(
TypeError, match=r"expects a list of strings.*item at index 1 is bool"
):
config.getini("linelist_list_with_bool")
with pytest.raises(TypeError, match=r"expects a bool.*got str"):
config.getini("bool_not_bool")
with pytest.raises(TypeError, match=r"expects an int.*got str"):
config.getini("int_not_int")
with pytest.raises(TypeError, match=r"expects an int.*got bool"):
config.getini("int_is_bool")
with pytest.raises(TypeError, match=r"expects a float.*got str"):
config.getini("float_not_float")
with pytest.raises(TypeError, match=r"expects a float.*got bool"):
config.getini("float_is_bool")
with pytest.raises(TypeError, match=r"expects a string.*got int"):
config.getini("string_not_string")
| TestNativeTomlConfig |
python | doocs__leetcode | solution/3400-3499/3443.Maximum Manhattan Distance After K Changes/Solution.py | {
"start": 0,
"end": 562
} | class ____:
def maxDistance(self, s: str, k: int) -> int:
def calc(a: str, b: str) -> int:
ans = mx = cnt = 0
for c in s:
if c == a or c == b:
mx += 1
elif cnt < k:
cnt += 1
mx += 1
else:
mx -= 1
ans = max(ans, mx)
return ans
a = calc("S", "E")
b = calc("S", "W")
c = calc("N", "E")
d = calc("N", "W")
return max(a, b, c, d)
| Solution |
python | ray-project__ray | doc/source/rllib/doc_code/advanced_api.py | {
"start": 75,
"end": 4124
} | class ____:
def __init__(self):
self.count = 0
def inc(self, n):
self.count += n
def get(self):
return self.count
# on the driver
counter = Counter.options(name="global_counter").remote()
print(ray.get(counter.get.remote())) # get the latest count
# in your envs
counter = ray.get_actor("global_counter")
counter.inc.remote(1) # async call to increment the global count
# __rllib-adv_api_counter_end__
# __rllib-adv_api_explore_begin__
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
config = AlgorithmConfig().env_runners(
exploration_config={
# Special `type` key provides class information
"type": "StochasticSampling",
# Add any needed constructor args here.
"constructor_arg": "value",
}
)
# __rllib-adv_api_explore_end__
# __rllib-adv_api_evaluation_1_begin__
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
# Run one evaluation step on every 3rd `Algorithm.train()` call.
config = AlgorithmConfig().evaluation(
evaluation_interval=3,
)
# __rllib-adv_api_evaluation_1_end__
# __rllib-adv_api_evaluation_2_begin__
# Every time we run an evaluation step, run it for exactly 10 episodes.
config = AlgorithmConfig().evaluation(
evaluation_duration=10,
evaluation_duration_unit="episodes",
)
# Every time we run an evaluation step, run it for (close to) 200 timesteps.
config = AlgorithmConfig().evaluation(
evaluation_duration=200,
evaluation_duration_unit="timesteps",
)
# __rllib-adv_api_evaluation_2_end__
# __rllib-adv_api_evaluation_3_begin__
# Every time we run an evaluation step, run it for exactly 10 episodes, no matter,
# how many eval workers we have.
config = AlgorithmConfig().evaluation(
evaluation_duration=10,
evaluation_duration_unit="episodes",
# What if number of eval workers is non-dividable by 10?
# -> Run 7 episodes (1 per eval worker), then run 3 more episodes only using
# evaluation workers 1-3 (evaluation workers 4-7 remain idle during that time).
evaluation_num_env_runners=7,
)
# __rllib-adv_api_evaluation_3_end__
# __rllib-adv_api_evaluation_4_begin__
# Run evaluation and training at the same time via threading and make sure they roughly
# take the same time, such that the next `Algorithm.train()` call can execute
# immediately and not have to wait for a still ongoing (e.g. b/c of very long episodes)
# evaluation step:
config = AlgorithmConfig().evaluation(
evaluation_interval=2,
# run evaluation and training in parallel
evaluation_parallel_to_training=True,
# automatically end evaluation when train step has finished
evaluation_duration="auto",
evaluation_duration_unit="timesteps", # <- this setting is ignored; RLlib
# will always run by timesteps (not by complete
# episodes) in this duration=auto mode
)
# __rllib-adv_api_evaluation_4_end__
# __rllib-adv_api_evaluation_5_begin__
# Switching off exploration behavior for evaluation workers
# (see rllib/algorithms/algorithm.py). Use any keys in this sub-dict that are
# also supported in the main Algorithm config.
config = AlgorithmConfig().evaluation(
evaluation_config=AlgorithmConfig.overrides(explore=False),
)
# ... which is a more type-checked version of the old-style:
# config = AlgorithmConfig().evaluation(
# evaluation_config={"explore": False},
# )
# __rllib-adv_api_evaluation_5_end__
# __rllib-adv_api_evaluation_6_begin__
# Having an environment that occasionally blocks completely for e.g. 10min would
# also affect (and block) training. Here is how you can defend your evaluation setup
# against oft-crashing or -stalling envs (or other unstable components on your evaluation
# workers).
config = AlgorithmConfig().evaluation(
evaluation_interval=1,
evaluation_parallel_to_training=True,
evaluation_duration="auto",
evaluation_duration_unit="timesteps", # <- default anyway
evaluation_force_reset_envs_before_iteration=True, # <- default anyway
)
# __rllib-adv_api_evaluation_6_end__
| Counter |
python | huggingface__transformers | src/transformers/modeling_utils.py | {
"start": 33467,
"end": 41299
} | class ____:
"""
A few utilities for `torch.nn.Modules`, to be used as a mixin.
"""
@property
def device(self) -> torch.device:
"""
`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
return next(param.device for param in self.parameters())
@property
def dtype(self) -> torch.dtype:
"""
`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
return next(param.dtype for param in self.parameters() if param.is_floating_point())
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (`torch.Tensor`): An attention mask.
Returns:
`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min
return encoder_extended_attention_mask
@staticmethod
def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None):
if device is not None:
warnings.warn(
"The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
)
else:
device = attention_mask.device
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
return extended_attention_mask
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: tuple[int, ...],
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`tuple[int]`):
The shape of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
if dtype is None:
dtype = self.dtype
if not (attention_mask.dim() == 2 and self.config.is_decoder):
# show warning only if it won't be shown in `create_extended_attention_mask_for_decoder`
if device is not None:
warnings.warn(
"The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(
input_shape, attention_mask, device
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and the dtype's smallest value for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min
return extended_attention_mask
def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
"""
Get number of (optionally, trainable or non-embeddings) parameters in the module.
Args:
only_trainable (`bool`, *optional*, defaults to `False`):
Whether or not to return only the number of trainable parameters
exclude_embeddings (`bool`, *optional*, defaults to `False`):
Whether or not to return only the number of non-embeddings parameters
Returns:
`int`: The number of parameters.
"""
if exclude_embeddings:
embedding_param_names = [
f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)
]
is_loaded_in_4bit = getattr(self, "is_loaded_in_4bit", False)
if is_loaded_in_4bit:
import bitsandbytes as bnb
total_params = 0
for name, param in self.named_parameters():
if exclude_embeddings and name in embedding_param_names:
continue
if param.requires_grad or not only_trainable:
# For 4bit models, we need to multiply the number of parameters by 2 as half of the parameters are
# used for the 4bit quantization (uint8 tensors are stored)
if is_loaded_in_4bit and isinstance(param, bnb.nn.Params4bit):
if hasattr(param, "element_size"):
num_bytes = param.element_size()
elif hasattr(param, "quant_storage"):
num_bytes = param.quant_storage.itemsize
else:
num_bytes = 1
total_params += param.numel() * 2 * num_bytes
else:
total_params += param.numel()
return total_params
| ModuleUtilsMixin |
python | ray-project__ray | python/ray/air/tests/_test_experiment_restore_run.py | {
"start": 1753,
"end": 5667
} | class ____(tune.search.Searcher):
def __init__(
self,
metric: Optional[str] = None,
mode: Optional[str] = None,
):
super().__init__(metric=metric, mode=mode)
self._trial_count = 0
def suggest(self, trial_id: str) -> Optional[Dict]:
self._trial_count += 1
return {"id": self._trial_count}
def on_trial_complete(
self, trial_id: str, result: Optional[Dict] = None, error: bool = False
) -> None:
pass
def save(self, checkpoint_path: str):
with open(checkpoint_path, "w") as f:
json.dump({"trial_count": self._trial_count}, f)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "r") as f:
state = json.load(f)
self._trial_count = state["trial_count"]
def train_fn(config: dict, data: Optional[dict] = None):
checkpoint = train.get_checkpoint()
start = load_dict_checkpoint(checkpoint)["iteration"] + 1 if checkpoint else 1
training_started_marker = Path(
os.environ.get("RUN_STARTED_MARKER", "/tmp/does-not-exist")
)
if training_started_marker.exists():
# Multiple workers may be trying to delete the same marker
try:
training_started_marker.unlink()
except FileNotFoundError:
pass
for iteration in range(start, ITERATIONS_PER_TRIAL + 1):
time.sleep(TIME_PER_ITER_S)
with create_dict_checkpoint({"iteration": iteration}) as checkpoint:
train.report({"score": random.random()}, checkpoint=checkpoint)
def tuner(experiment_path: str, run_config: tune.RunConfig) -> tune.ResultGrid:
trainable = tune.with_resources(train_fn, resources={"CPU": 1})
trainable = tune.with_parameters(trainable, data={"dummy_data": [1, 2, 3]})
if tune.Tuner.can_restore(experiment_path):
tuner = tune.Tuner.restore(
experiment_path, trainable=trainable, resume_errored=True
)
else:
tuner = tune.Tuner(
trainable,
run_config=run_config,
tune_config=tune.TuneConfig(
num_samples=8,
max_concurrent_trials=2,
search_alg=StatefulSearcher(),
),
)
result_grid = tuner.fit()
return result_grid
def trainer(experiment_path: str, run_config: train.RunConfig) -> train.Result:
dataset_size = 128
num_workers = 4
def train_loop_per_worker(config):
# Wrap the other train_fn with a check for the dataset.
assert train.get_dataset_shard("train")
train_fn(config)
datasets = {
"train": ray.data.range(dataset_size),
"valid": ray.data.read_csv(CSV_DATA_FILE),
}
if DataParallelTrainer.can_restore(experiment_path):
trainer = DataParallelTrainer.restore(
experiment_path,
datasets=datasets,
train_loop_per_worker=train_loop_per_worker,
)
else:
trainer = DataParallelTrainer(
train_loop_per_worker,
datasets=datasets,
scaling_config=train.ScalingConfig(
num_workers=num_workers, trainer_resources={"CPU": 0}
),
run_config=run_config,
)
result = trainer.fit()
return result
if __name__ == "__main__":
experiment_path = os.path.join(STORAGE_PATH, EXP_NAME)
ray.init()
run_config = train.RunConfig(
storage_path=STORAGE_PATH,
name=EXP_NAME,
checkpoint_config=train.CheckpointConfig(num_to_keep=1),
callbacks=[StatefulCallback()],
)
if RUNNER_TYPE == "tuner":
tuner(experiment_path, run_config)
elif RUNNER_TYPE == "trainer":
trainer(experiment_path, run_config)
else:
raise NotImplementedError(
"`RUNNER_TYPE` environment var must be one of ['tuner', 'trainer']"
)
| StatefulSearcher |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/data_asset/path/spark/text_asset.py | {
"start": 1416,
"end": 2011
} | class ____(DirectoryDataAsset, TextAssetBase):
type: Literal["directory_text"] = "directory_text"
@classmethod
@override
def _get_reader_method(cls) -> str:
return "text"
@override
def _get_reader_options_include(self) -> set[str]:
"""These options are available as of spark v3.4.0
See https://spark.apache.org/docs/latest/sql-data-sources-text.html for more info.
"""
return (
super()._get_reader_options_include()
| super(DirectoryDataAsset, self)._get_reader_options_include()
)
| DirectoryTextAsset |
python | keras-team__keras | keras/src/layers/attention/attention.py | {
"start": 227,
"end": 13604
} | class ____(Layer):
"""Dot-product attention layer, a.k.a. Luong-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as a `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
seed: A Python integer to use as random seed in case of `dropout`.
score_mode: Function to use to compute attention scores, one of
`{"dot", "concat"}`. `"dot"` refers to the dot product between the
query and key vectors. `"concat"` refers to the hyperbolic tangent
of the concatenation of the `query` and `key` vectors.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=False,
score_mode="dot",
dropout=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.use_scale = use_scale
self.score_mode = score_mode
self.dropout = dropout
if self.dropout > 0:
self.seed_generator = backend.random.SeedGenerator(seed=seed)
if self.score_mode not in ["dot", "concat"]:
raise ValueError(
"Invalid value for argument score_mode. "
"Expected one of {'dot', 'concat'}. "
f"Received: score_mode={score_mode}"
)
self._return_attention_scores = False
def build(self, input_shape):
self._validate_inputs(input_shape)
self.scale = None
self.concat_score_weight = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
if self.score_mode == "concat":
self.concat_score_weight = self.add_weight(
name="concat_score_weight",
shape=(),
initializer="ones",
dtype=self.dtype,
trainable=True,
)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
if self.score_mode == "dot":
scores = ops.matmul(query, ops.transpose(key, axes=[0, 2, 1]))
if self.scale is not None:
scores = ops.multiply(scores, self.scale)
elif self.score_mode == "concat":
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
if self.scale is not None:
scores = self.concat_score_weight * ops.sum(
ops.tanh(self.scale * (q_reshaped + k_reshaped)), axis=-1
)
else:
scores = self.concat_score_weight * ops.sum(
ops.tanh(q_reshaped + k_reshaped), axis=-1
)
else:
raise ValueError("scores not computed")
return scores
def _apply_scores(self, scores, value, scores_mask=None, training=False):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `(batch_size, Tq)` and `key` tensor of
shape `(batch_size, Tv)` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates
`attention_distribution = softmax(scores)`, then returns
`matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `(batch_size, Tq, Tv)`.
value: Value tensor of shape `(batch_size, Tv, dim)`.
scores_mask: A boolean mask tensor of shape `(batch_size, 1, Tv)`
or `(batch_size, Tq, Tv)`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must
contain at least one `True` value in each line along the last
dimension.
training: Python boolean indicating whether the layer should behave
in training mode (adding dropout) or in inference mode
(no dropout).
Returns:
Tensor of shape `(batch_size, Tq, dim)`.
Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
if scores_mask is not None:
padding_mask = ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention
# distribution. Note 65504. is the max float16 value.
max_value = 65504.0 if scores.dtype == "float16" else 1.0e9
if len(padding_mask.shape) == 2:
padding_mask = ops.expand_dims(padding_mask, axis=-2)
scores -= max_value * ops.cast(padding_mask, dtype=scores.dtype)
weights = ops.softmax(scores, axis=-1)
if training and self.dropout > 0:
weights = backend.random.dropout(
weights,
self.dropout,
seed=self.seed_generator,
)
return ops.matmul(weights, value), weights
def _calculate_score_mask(self, scores, v_mask, use_causal_mask):
if use_causal_mask:
# Creates a lower triangular mask, so position i cannot attend to
# positions j > i. This prevents the flow of information from the
# future into the past.
score_shape = ops.shape(scores)
# causal_mask_shape = [1, Tq, Tv].
mask_shape = (1, score_shape[-2], score_shape[-1])
ones_mask = ops.ones(shape=mask_shape, dtype="int32")
row_index = ops.cumsum(ones_mask, axis=-2)
col_index = ops.cumsum(ones_mask, axis=-1)
causal_mask = ops.greater_equal(row_index, col_index)
if v_mask is not None:
# Mask of shape [batch_size, 1, Tv].
v_mask = ops.expand_dims(v_mask, axis=-2)
return ops.logical_and(v_mask, causal_mask)
return causal_mask
else:
# If not using causal mask, return the value mask as is,
# or None if the value mask is not provided.
return v_mask
def call(
self,
inputs,
mask=None,
training=False,
return_attention_scores=False,
use_causal_mask=False,
):
self._validate_inputs(inputs=inputs, mask=mask)
self._return_attention_scores = return_attention_scores
q = inputs[0]
v = inputs[1]
k = inputs[2] if len(inputs) > 2 else v
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
scores = self._calculate_scores(query=q, key=k)
scores_mask = self._calculate_score_mask(
scores, v_mask, use_causal_mask
)
attention_output, attention_scores = self._apply_scores(
scores=scores, value=v, scores_mask=scores_mask, training=training
)
if q_mask is not None:
# Mask of shape [batch_size, Tq, 1].
q_mask = ops.expand_dims(q_mask, axis=-1)
attention_output *= ops.cast(q_mask, dtype=attention_output.dtype)
if return_attention_scores:
return (attention_output, attention_scores)
else:
return attention_output
def compute_mask(self, inputs, mask=None):
self._validate_inputs(inputs=inputs, mask=mask)
if mask is None or mask[0] is None:
return None
return ops.convert_to_tensor(mask[0])
def compute_output_shape(self, input_shape):
query_shape, value_shape, key_shape = input_shape
if key_shape is None:
key_shape = value_shape
output_shape = (*query_shape[:-1], value_shape[-1])
if self._return_attention_scores:
scores_shape = (query_shape[0], query_shape[1], key_shape[1])
return output_shape, scores_shape
return output_shape
def compute_output_spec(
self,
inputs,
mask=None,
return_attention_scores=False,
training=None,
use_causal_mask=False,
):
# Validate and unpack inputs
self._validate_inputs(inputs, mask)
query = inputs[0]
value = inputs[1]
key = inputs[2] if len(inputs) > 2 else value
# Compute primary output shape
output_shape = self.compute_output_shape(
[query.shape, value.shape, key.shape]
)
output_spec = KerasTensor(output_shape, dtype=self.compute_dtype)
# Handle attention scores if requested
if self._return_attention_scores or return_attention_scores:
scores_shape = (
query.shape[0],
query.shape[1],
key.shape[1],
) # (batch_size, Tq, Tv)
attention_scores_spec = KerasTensor(
scores_shape, dtype=self.compute_dtype
)
return (output_spec, attention_scores_spec)
return output_spec
def _validate_inputs(self, inputs, mask=None):
"""Validates arguments of the call method."""
class_name = self.__class__.__name__
if not isinstance(inputs, list):
raise ValueError(
f"{class_name} layer must be called on a list of inputs, "
"namely [query, value] or [query, value, key]. "
f"Received: inputs={inputs}."
)
if len(inputs) < 2 or len(inputs) > 3:
raise ValueError(
f"{class_name} layer accepts inputs list of length 2 or 3, "
"namely [query, value] or [query, value, key]. "
f"Received length: {len(inputs)}."
)
if mask is not None:
if not isinstance(mask, list):
raise ValueError(
f"{class_name} layer mask must be a list, "
f"namely [query_mask, value_mask]. Received: mask={mask}."
)
if len(mask) < 2 or len(mask) > 3:
raise ValueError(
f"{class_name} layer accepts mask list of length 2 or 3. "
f"Received: inputs={inputs}, mask={mask}."
)
def get_config(self):
base_config = super().get_config()
config = {
"use_scale": self.use_scale,
"score_mode": self.score_mode,
"dropout": self.dropout,
}
return {**base_config, **config}
| Attention |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 27427,
"end": 39830
} | class ____(NamedTuple):
jaxpr_replicas: int
num_local_replicas: int
num_global_replicas: int
_initial_style_primitives: set[core.Primitive] = set()
def register_initial_style_primitive(prim: core.Primitive):
_initial_style_primitives.add(prim)
def _jaxpr_replicas(jaxpr: core.Jaxpr) -> int:
"""The number of replicas needed for a jaxpr.
For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the
subjaxprs. For a list of eqns, take the maximum number of replicas.
"""
return max(unsafe_map(_eqn_replicas, jaxpr.eqns), default=1)
# TODO(mattjj): this function assumes that only pmap has a parameter named
# axis_size, and that it corresponds to cross-replica mapping
def _eqn_replicas(eqn: core.JaxprEqn) -> int:
call_jaxpr = eqn.params.get("call_jaxpr")
if call_jaxpr:
return eqn.params.get('axis_size', 1) * _jaxpr_replicas(call_jaxpr)
elif eqn.primitive in _initial_style_primitives:
return _initial_style_primitive_replicas(eqn.params)
else:
return 1
def _initial_style_primitive_replicas(params: dict[str, Any]) -> int:
return max(core.traverse_jaxpr_params(_jaxpr_replicas, params).values(),
default=1)
def find_replicas(
jaxpr: core.Jaxpr, axis_size: int, global_axis_size: int
) -> ReplicaInfo:
# TODO(skyewm): replace this with a chain of pmaps and/or sharded_jits
jaxpr_replicas = _jaxpr_replicas(jaxpr)
num_local_replicas = axis_size * jaxpr_replicas
num_global_replicas = global_axis_size * jaxpr_replicas
return ReplicaInfo(jaxpr_replicas, num_local_replicas, num_global_replicas)
@lu.transformation2
def _change_argument_ranks(f, in_axes, out_axes_thunk, *args):
from jax._src.lax import lax # pytype: disable=import-error
args = tuple(
arg if in_axis is None else lax.squeeze(arg, dimensions=(in_axis,))
for in_axis, arg in zip(in_axes, args)
)
results = f(*args)
out_axes = out_axes_thunk()
return tuple(
x if axis is None else lax.expand_dims(x, dimensions=(axis,))
for x, axis in zip(results, out_axes)
)
def stage_parallel_callable(
pci: ParallelCallableInfo, fun: lu.WrappedFun
) -> tuple[core.Jaxpr, list[Any], ReplicaInfo, ShardInfo]:
sharded_avals = tuple(
_shard_aval(pci.axis_size, axis, aval) if axis is not None else aval
for axis, aval in safe_zip(pci.in_axes, pci.avals))
orig_fun = fun
if config.pmap_no_rank_reduction.value:
fun = _change_argument_ranks(fun, pci.in_axes, pci.out_axes_thunk)
else:
fun = orig_fun
with core.extend_axis_env_nd([(pci.axis_name, pci.global_axis_size)]):
with dispatch.log_elapsed_time(
"Finished tracing + transforming {fun_name} for pmap in {elapsed_time} sec",
fun_name=fun.__name__, event=dispatch.JAXPR_TRACE_EVENT):
jaxpr, out_sharded_avals, consts = pe.trace_to_jaxpr_dynamic(
fun.with_unknown_names(), sharded_avals)
assert len(out_sharded_avals) == len(pci.out_axes), (
len(out_sharded_avals), len(pci.out_axes))
replicas = find_replicas(jaxpr, pci.axis_size, pci.global_axis_size)
num_local_shards = replicas.num_local_replicas
num_global_shards = replicas.num_global_replicas
shards = ShardInfo(
sharded_avals, out_sharded_avals, sharded_avals,
num_local_shards, num_global_shards)
return jaxpr, consts, replicas, shards
def get_pmap_jaxpr(
fun: lu.WrappedFun,
backend_name: str | None,
axis_name: core.AxisName,
axis_size: int,
global_axis_size: int,
devices: Sequence[xc.Device] | None,
name: str,
in_axes: Iterable[int | None],
out_axes_thunk: Callable[[], Sequence[int | None]],
avals: Sequence[core.AbstractValue]):
if devices is not None and backend_name is None:
backend = xb.get_device_backend(devices[0])
else:
backend = xb.get_backend(backend_name)
pci = ParallelCallableInfo(
name, backend, axis_name, axis_size, global_axis_size, devices,
in_axes, out_axes_thunk, avals)
with core.extend_axis_env_nd([(axis_name, axis_size)]):
jaxpr, consts, replicas, shards = stage_parallel_callable(pci, fun)
jaxpr = core.remove_named_axis_effects(jaxpr, {axis_name})
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
return closed_jaxpr, backend, replicas, shards, pci
@profiler.annotate_function
def lower_parallel_callable(
fun: lu.WrappedFun,
axis_name: core.AxisName,
axis_size: int,
global_axis_size: int,
devices: Sequence[xc.Device] | None,
name: str,
in_axes: Iterable[int | None],
donated_invars: Sequence[bool],
is_explicit_global_axis_size: bool,
avals: Sequence[core.AbstractValue],
*,
lowering_platforms: tuple[str, ...] | None,
lowering_parameters: mlir.LoweringParameters,
closed_jaxpr: core.ClosedJaxpr,
backend: xc.Client,
replicas: ReplicaInfo,
shards: ShardInfo,
pci: ParallelCallableInfo) -> PmapComputation:
# Determine global_axis_size for use in AxisEnv.
# TODO(mattjj,skyewm): revive this check (inner_pmap always False now)
# if xb.process_count() > 1 and global_axis_size is None and inner_pmap:
# raise ValueError("'axis_size' must be specified for nested multi-host pmaps")
if (xb.process_count() == 1 and is_explicit_global_axis_size
and global_axis_size != axis_size):
raise ValueError(
f"Specified axis_size {global_axis_size} doesn't match received "
f"axis_size {axis_size}.")
jaxpr = closed_jaxpr.jaxpr
arg_names = jaxpr._debug_info.safe_arg_names(len(closed_jaxpr.in_avals))
const_args: Sequence[ArrayLike]
if lowering_parameters.hoist_constants_as_args:
const_args_and_avals = core.jaxpr_const_args(jaxpr)
const_args, const_arg_avals = unzip2(const_args_and_avals)
num_const_args = len(const_arg_avals)
in_axes = (None,) * num_const_args + in_axes # type: ignore
donated_invars = (False,) * num_const_args + donated_invars # type: ignore
jaxpr_avals = list(const_arg_avals) + closed_jaxpr.in_avals # type: ignore
shards = ShardInfo(
tuple(const_arg_avals) + shards.sharded_avals, # type: ignore
shards.out_sharded_avals,
tuple(const_arg_avals) + shards.global_sharded_avals, # type: ignore
shards.num_local_shards, shards.num_global_shards)
pci = dataclasses.replace(pci, in_axes=in_axes,
avals=tuple(const_arg_avals) + tuple(pci.avals))
arg_names = ("",) * num_const_args + arg_names
else:
jaxpr_avals = closed_jaxpr.in_avals
const_args = []
num_const_args = 0
no_nested_sharding = False
must_run_on_all_devices = False
if not is_explicit_global_axis_size:
if xb.process_count(backend) > 1:
if devices:
# This allows each host in a multi-host pmap to run on a different number
# of devices, but precludes nested sharding (i.e. inner pmaps).
no_nested_sharding = True
else:
# This assumes all hosts run on the same number of devices. We make sure
# this assumption is true by requiring that the pmap is run on all devices
# (and making the further assumption that each host has the same number of
# devices). Nested sharding is ok in this case.
must_run_on_all_devices = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug("sharded_avals: %s", shards.sharded_avals)
logger.debug("global_sharded_avals: %s", shards.global_sharded_avals)
logger.debug("num_replicas: %d num_local_replicas: %d",
replicas.num_global_replicas, replicas.num_local_replicas)
logger.debug("devices: %s", devices)
logger.debug("local_devices: %s", pci.local_devices)
if (xb.process_count(backend) > 1 and must_run_on_all_devices and
shards.num_local_shards != xb.local_device_count(backend)):
if shards.num_local_shards == axis_size:
raise ValueError(
f"On multi-host platforms, the input to pmapped functions must have "
f"leading axis size equal to the number of local devices if no "
f"`devices` argument is specified. Got {axis_size=}, "
f"num_local_devices={xb.local_device_count(backend)}")
else:
raise ValueError(
f"On multi-host platforms, pmapped functions must run across all "
f"devices, i.e. num_replicas * num_partitions should equal the "
f"number of local devices. Got "
f"num_replicas={replicas.num_local_replicas}, and "
f"num_local_devices={xb.local_device_count(backend)}")
if no_nested_sharding and replicas.jaxpr_replicas > 1:
raise ValueError(
f"On multi-host platforms, pmapped functions that both have `devices` "
f"specified and contain an inner_pmap must specify an "
f"`axis_size` (or remove the `devices` argument). Got nested_replicas="
f"{replicas.jaxpr_replicas}")
log_priority = logging.WARNING if config.log_compiles.value else logging.DEBUG
if logger.isEnabledFor(log_priority):
logger.log(log_priority,
"Compiling %s (%d) for %d devices with args %s. (num_replicas=%d)",
fun.__name__, id(fun),
shards.num_global_shards, avals, replicas.num_global_replicas)
axis_env = sharding_impls.AxisEnv(
replicas.num_global_replicas, (axis_name,), (global_axis_size,))
replicated_args = [axis is None for axis in in_axes]
tuple_args = dispatch.should_tuple_args(len(shards.global_sharded_avals),
backend.platform)
module_name = wrap_name('pmap', name)
platforms = lowering_platforms or (backend.platform,)
with core.extend_axis_env_nd([(axis_name, global_axis_size)]):
ordered_effects = list(
effects.ordered_effects.filter_in(closed_jaxpr.effects))
if ordered_effects:
raise ValueError("Ordered effects not supported in `pmap`.")
unordered_effects = list(
effects.ordered_effects.filter_not_in(closed_jaxpr.effects))
with dispatch.log_elapsed_time(
"Finished jaxpr to MLIR module conversion {fun_name} in {elapsed_time:.9f} sec",
fun_name=module_name, event=dispatch.JAXPR_TO_MLIR_MODULE_EVENT):
lowering_result = mlir.lower_jaxpr_to_module(
module_name,
closed_jaxpr,
num_const_args=num_const_args,
in_avals=jaxpr_avals,
ordered_effects=ordered_effects,
backend=backend,
platforms=platforms,
axis_context=sharding_impls.ReplicaAxisContext(axis_env),
donated_args=donated_invars,
replicated_args=replicated_args,
arg_shardings=None,
result_shardings=None,
arg_names=arg_names,
result_names=jaxpr._debug_info.safe_result_paths(len(jaxpr.outvars)),
num_replicas=replicas.num_global_replicas,
lowering_parameters=lowering_parameters)
return PmapComputation(lowering_result.module,
list(const_args),
platforms=platforms,
pci=pci, replicas=replicas,
shards=shards, tuple_args=tuple_args,
unordered_effects=unordered_effects,
ordered_effects=ordered_effects,
keepalive=lowering_result.keepalive,
host_callbacks=lowering_result.host_callbacks,
jaxpr_debug_info=closed_jaxpr.jaxpr._debug_info,
shape_poly_state=lowering_result.shape_poly_state)
def _pmap_unmap_shaped_array(size: int, axis: int | None, aval: ShapedArray
) -> ShapedArray:
if axis is None: return aval
elif type(axis) is int:
return ShapedArray(tuple_update(aval.shape, axis, size), aval.dtype,
weak_type=aval.weak_type)
else: raise TypeError(axis)
AvalMapHandlerPair = tuple[Any, Callable]
_pmap_aval_mapping_handlers: dict[type, AvalMapHandlerPair] = {
ShapedArray: (Any, _pmap_unmap_shaped_array),
}
def _pmap_unmapped_aval(size: core.AxisSize, axis: int | None,
aval: core.AbstractValue) -> core.AbstractValue:
if not config.pmap_no_rank_reduction.value:
return core.unmapped_aval(size, axis, aval)
_, handler = _pmap_aval_mapping_handlers.get(type(aval), (None, None))
if handler is not None:
return handler(size, axis, aval)
else:
raise TypeError(f"no unmapping handler for {aval} of type {type(aval)}")
| ReplicaInfo |
python | PrefectHQ__prefect | src/prefect/infrastructure/provisioners/ecs.py | {
"start": 1460,
"end": 4959
} | class ____:
"""
Represents an IAM policy resource for managing ECS tasks.
Args:
policy_name: The name of the IAM policy. Defaults to "prefect-ecs-policy".
"""
def __init__(
self,
policy_name: str,
):
self._iam_client = boto3.client("iam")
self._policy_name = policy_name
self._requires_provisioning = None
async def get_task_count(self) -> int:
"""
Returns the number of tasks that will be executed to provision this resource.
Returns:
int: The number of tasks to be provisioned.
"""
return 1 if await self.requires_provisioning() else 0
def _get_policy_by_name(self, name: str) -> dict[str, Any] | None:
paginator = self._iam_client.get_paginator("list_policies")
page_iterator = paginator.paginate(Scope="Local")
for page in page_iterator:
for policy in page["Policies"]:
if policy["PolicyName"] == name:
return policy
return None
async def requires_provisioning(self) -> bool:
"""
Check if this resource requires provisioning.
Returns:
bool: True if provisioning is required, False otherwise.
"""
if self._requires_provisioning is not None:
return self._requires_provisioning
policy = await anyio.to_thread.run_sync(
partial(self._get_policy_by_name, self._policy_name)
)
if policy is not None:
self._requires_provisioning = False
return False
self._requires_provisioning = True
return True
async def get_planned_actions(self) -> List[str]:
"""
Returns a description of the planned actions for provisioning this resource.
Returns:
Optional[str]: A description of the planned actions for provisioning the resource,
or None if provisioning is not required.
"""
if await self.requires_provisioning():
return [
"Creating and attaching an IAM policy for managing ECS tasks:"
f" [blue]{self._policy_name}[/]"
]
return []
async def provision(
self,
policy_document: dict[str, Any],
advance: Callable[[], None],
) -> str:
"""
Provisions an IAM policy.
Args:
advance: A callback function to indicate progress.
Returns:
str: The ARN (Amazon Resource Name) of the created IAM policy.
"""
if await self.requires_provisioning():
console = current_console.get()
console.print("Creating IAM policy")
policy = await anyio.to_thread.run_sync(
partial(
self._iam_client.create_policy,
PolicyName=self._policy_name,
PolicyDocument=json.dumps(policy_document),
)
)
policy_arn = policy["Policy"]["Arn"]
advance()
return policy_arn
else:
policy = await anyio.to_thread.run_sync(
partial(self._get_policy_by_name, self._policy_name)
)
# This should never happen, but just in case
assert policy is not None, "Could not find expected policy"
return policy["Arn"]
@property
def next_steps(self) -> list[str]:
return []
| IamPolicyResource |
python | ray-project__ray | rllib/examples/connectors/single_agent_observation_preprocessor.py | {
"start": 4414,
"end": 6480
} | class ____(SingleAgentObservationPreprocessor):
def recompute_output_observation_space(
self,
input_observation_space: gym.Space,
input_action_space: gym.Space,
) -> gym.Space:
# The new observation space only has a shape of (2,), not (4,).
return gym.spaces.Box(
-5.0,
5.0,
(input_observation_space.shape[0] - 2,),
np.float32,
)
def preprocess(self, observation, episode: SingleAgentEpisode):
# Extract only the positions (x-position and angular-position).
return np.array([observation[0], observation[2]], np.float32)
if __name__ == "__main__":
args = parser.parse_args()
# Define the AlgorithmConfig used.
base_config = (
get_trainable_cls(args.algo)
.get_default_config()
# You use the normal CartPole-v1 env here and your env-to-module preprocessor
# converts this into a non-Markovian version of CartPole.
.environment("CartPole-v1")
.env_runners(
env_to_module_connector=(
lambda env, spaces, device: ReduceCartPoleObservationsToNonMarkovian()
),
)
.training(
gamma=0.99,
lr=0.0003,
)
.rl_module(
model_config=DefaultModelConfig(
# Solve the non-Markovian env through using an LSTM-enhanced model.
use_lstm=True,
vf_share_layers=True,
),
)
)
# PPO-specific settings (for better learning behavior only).
if args.algo == "PPO":
base_config.training(
num_epochs=6,
vf_loss_coeff=0.01,
)
# IMPALA-specific settings (for better learning behavior only).
elif args.algo == "IMPALA":
base_config.training(
lr=0.0005,
vf_loss_coeff=0.05,
entropy_coeff=0.0,
)
# Run everything as configured.
run_rllib_example_script_experiment(base_config, args)
| ReduceCartPoleObservationsToNonMarkovian |
python | tensorflow__tensorflow | tensorflow/compiler/tests/scan_ops_test.py | {
"start": 2018,
"end": 5129
} | class ____(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32, np.int64]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, np.zeros_like, x, axis, exclusive,
reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumsum(p, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testMixedPrecision(self):
with self.session(), self.test_scope():
y = math_ops.cumsum(
constant_op.constant([1., 2., 3., 4.], dtypes.bfloat16),
-1,
exclusive=True).eval()
self.assertAllEqual(y, [0., 1., 3., 6.])
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
| CumsumTest |
python | run-llama__llama_index | llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/df.py | {
"start": 2182,
"end": 2876
} | class ____(BaseModel):
"""
Data-frame as a list of column objects.
Each column object contains a list of values. Note that they can be
of variable length, and so may not be able to be converted to a dataframe.
"""
columns: List[DataFrameRow] = Field(..., description="""List of column objects.""")
DEFAULT_FULL_DF_PARSER_TMPL = """
Please extract the following query into a structured data.
Query: {input_str}.
Please extract both the set of column names and row names.
"""
DEFAULT_ROWS_DF_PARSER_TMPL = """
Please extract the following query into structured data.
Query: {input_str}.
The column schema is the following: {column_schema}.
"""
| DataFrameValuesPerColumn |
python | fluentpython__example-code | 11-iface-abc/drum.py | {
"start": 58,
"end": 326
} | class ____(Tombola):
def __init__(self, iterable):
self._balls = []
self.load(iterable)
def load(self, iterable):
self._balls.extend(iterable)
shuffle(self._balls)
def pick(self):
return self._balls.pop()
| TumblingDrum |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeParams3.py | {
"start": 117,
"end": 1443
} | class ____[S]:
s: S
class ClassB[T](dict[S, T]):
s: S
t: T
def method1[U](self):
s: S
t: T
u: U
lambda: (S, T, U)
# This should generate an error because T is out of scope.
t: T
# This should generate an error because S is out of scope.
s: S
# This should generate an error because T is out of scope.
t: T
def func1[A]():
def func2[B]():
a: A
b: B
class ClassC[C](dict[B, C]):
a: A
b: B
c: C
def method1[D](self):
a: A
b: B
c: C
d: D
e = lambda: (A, B, C, D)
a: A
# This should generate an error because B is out of scope.
b: B
# This should generate an error because A is out of scope.
a: A
# This should generate an error because B is out of scope.
b: B
type TA1[A] = list[A]
# This should generate an error because B is out of scope.
type TA2[A] = list[B]
S = 0
def outer1[S]():
S = ""
T = 1
def outer2[T]():
def inner1():
nonlocal S # OK
reveal_type(S, expected_text="Literal['']")
def inner2():
global S # OK
reveal_type(S, expected_text="Literal[0]")
T = 0
| ClassA |
python | gevent__gevent | src/greentest/3.14/test_socket.py | {
"start": 282693,
"end": 283371
} | class ____(unittest.TestCase):
def test_close_detach_race(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def close():
for _ in range(1000):
s.close()
def detach():
for _ in range(1000):
s.detach()
t1 = threading.Thread(target=close)
t2 = threading.Thread(target=detach)
with threading_helper.start_threads([t1, t2]):
pass
def setUpModule():
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| FreeThreadingTests |
python | doocs__leetcode | solution/1600-1699/1640.Check Array Formation Through Concatenation/Solution.py | {
"start": 0,
"end": 439
} | class ____:
def canFormArray(self, arr: List[int], pieces: List[List[int]]) -> bool:
i = 0
while i < len(arr):
k = 0
while k < len(pieces) and pieces[k][0] != arr[i]:
k += 1
if k == len(pieces):
return False
j = 0
while j < len(pieces[k]) and arr[i] == pieces[k][j]:
i, j = i + 1, j + 1
return True
| Solution |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-longrag/llama_index/packs/longrag/base.py | {
"start": 9943,
"end": 12767
} | class ____(BaseLlamaPack):
"""
Implements Long RAG.
This implementation is based on the following paper: https://arxiv.org/pdf/2406.15319
"""
def __init__(
self,
data_dir: str,
llm: t.Optional[LLM] = None,
chunk_size: t.Optional[int] = DEFAULT_CHUNK_SIZE,
similarity_top_k: int = DEFAULT_TOP_K,
small_chunk_size: int = DEFAULT_SMALL_CHUNK_SIZE,
index: t.Optional[VectorStoreIndex] = None,
index_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
verbose: bool = False,
):
"""
Constructor.
Args:
data_dir (str): Data directory
llm (t.Optional[LLM]): LLM
chunk_size (Optional[int], optional): Splits each doc to chunk_size to demonstrate grouping. Set to None to disable splitting then grouping. Defaults to DEFAULT_CHUNK_SIZE.
similarity_top_k (int, optional): Top k. Defaults to DEFAULT_TOP_K.
small_chunk_size (int, optional): Small chunk size to split large documents into smaller embeddings of small_chunk_size. Defaults to DEFAULT_SMALL_CHUNK_SIZE.
index (Optional[VectorStoreIndex], optional): Vector index to use (from persist dir). If None, creates a new vector index. Defaults to None
index_kwargs (Optional[Dict[str, Any]], optional): Kwargs to use when constructing VectorStoreIndex. Defaults to None.
verbose (bool, Optional): Verbose mode. Defaults to False
"""
# initialize workflow
self._wf = LongRAGWorkflow(verbose=verbose)
# initialize vars
self._data_dir = data_dir
self._llm = llm or Settings.llm
self._chunk_size = chunk_size
self._similarity_top_k = similarity_top_k
self._small_chunk_size = small_chunk_size
# run wf initialization
result = asyncio_run(
self._wf.run(
data_dir=self._data_dir,
llm=self._llm,
chunk_size=self._chunk_size,
similarity_top_k=self._similarity_top_k,
small_chunk_size=self._small_chunk_size,
index=index,
index_kwargs=index_kwargs,
)
)
self._retriever = result["retriever"]
self._query_eng = result["query_engine"]
self._index = result["index"]
def get_modules(self) -> t.Dict[str, t.Any]:
"""Get Modules."""
return {
"query_engine": self._query_eng,
"llm": self._llm,
"retriever": self._retriever,
"index": self._index,
"workflow": self._wf,
}
def run(self, query: str, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Runs pipeline."""
return asyncio_run(self._wf.run(query_str=query))
| LongRAGPack |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 31319,
"end": 31522
} | class ____(Interface):
"""Objects that have a structural location"""
__parent__ = Attribute("The parent in the location hierarchy")
__name__ = Attribute("The name within the parent")
| ILocation |
python | Netflix__metaflow | metaflow/metadata_provider/metadata.py | {
"start": 530,
"end": 1448
} | class ____(type):
def __new__(metaname, classname, bases, attrs):
return type.__new__(metaname, classname, bases, attrs)
def _get_info(classobject):
if not classobject._INFO:
classobject._INFO = classobject.default_info()
return classobject._INFO
def _set_info(classobject, val):
v = classobject.compute_info(val)
classobject._INFO = v
def __init__(classobject, classname, bases, attrs):
classobject._INFO = None
INFO = property(_get_info, _set_info)
# From https://stackoverflow.com/questions/22409430/portable-meta-class-between-python2-and-python3
def with_metaclass(mcls):
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop("__dict__", None)
body.pop("__weakref__", None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
| MetadataProviderMeta |
python | lepture__authlib | authlib/oauth2/rfc9101/authorization_server.py | {
"start": 528,
"end": 10624
} | class ____:
"""Authorization server extension implementing the support
for JWT secured authentication request, as defined in :rfc:`RFC9101 <9101>`.
:param support_request: Whether to enable support for the ``request`` parameter.
:param support_request_uri: Whether to enable support for the ``request_uri`` parameter.
This extension is intended to be inherited and registered into the authorization server::
class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest):
def resolve_client_public_key(self, client: ClientMixin):
return get_jwks_for_client(client)
def get_request_object(self, request_uri: str):
try:
return requests.get(request_uri).text
except requests.Exception:
return None
def get_server_metadata(self):
return {
"issuer": ...,
"authorization_endpoint": ...,
"require_signed_request_object": ...,
}
def get_client_require_signed_request_object(self, client: ClientMixin):
return client.require_signed_request_object
authorization_server.register_extension(JWTAuthenticationRequest())
"""
def __init__(self, support_request: bool = True, support_request_uri: bool = True):
self.support_request = support_request
self.support_request_uri = support_request_uri
def __call__(self, authorization_server: AuthorizationServer):
authorization_server.register_hook(
"before_get_authorization_grant", self.parse_authorization_request
)
def parse_authorization_request(
self, authorization_server: AuthorizationServer, request: OAuth2Request
):
client = _validate_client(
authorization_server.query_client, request.payload.client_id
)
if not self._shoud_proceed_with_request_object(
authorization_server, request, client
):
return
raw_request_object = self._get_raw_request_object(authorization_server, request)
request_object = self._decode_request_object(
request, client, raw_request_object
)
payload = BasicOAuth2Payload(request_object)
request.payload = payload
def _shoud_proceed_with_request_object(
self,
authorization_server: AuthorizationServer,
request: OAuth2Request,
client: ClientMixin,
) -> bool:
if "request" in request.payload.data and "request_uri" in request.payload.data:
raise InvalidRequestError(
"The 'request' and 'request_uri' parameters are mutually exclusive.",
state=request.payload.state,
)
if "request" in request.payload.data:
if not self.support_request:
raise RequestNotSupportedError(state=request.payload.state)
return True
if "request_uri" in request.payload.data:
if not self.support_request_uri:
raise RequestUriNotSupportedError(state=request.payload.state)
return True
# When the value of it [require_signed_request_object] as client metadata is true,
# then the server MUST reject the authorization request
# from the client that does not conform to this specification.
if self.get_client_require_signed_request_object(client):
raise InvalidRequestError(
"Authorization requests for this client must use signed request objects.",
state=request.payload.state,
)
# When the value of it [require_signed_request_object] as server metadata is true,
# then the server MUST reject the authorization request
# from any client that does not conform to this specification.
metadata = self.get_server_metadata()
if metadata and metadata.get("require_signed_request_object", False):
raise InvalidRequestError(
"Authorization requests for this server must use signed request objects.",
state=request.payload.state,
)
return False
def _get_raw_request_object(
self, authorization_server: AuthorizationServer, request: OAuth2Request
) -> str:
if "request_uri" in request.payload.data:
raw_request_object = self.get_request_object(
request.payload.data["request_uri"]
)
if not raw_request_object:
raise InvalidRequestUriError(state=request.payload.state)
else:
raw_request_object = request.payload.data["request"]
return raw_request_object
def _decode_request_object(
self, request, client: ClientMixin, raw_request_object: str
):
jwks = self.resolve_client_public_key(client)
try:
request_object = jwt.decode(raw_request_object, jwks)
request_object.validate()
except JoseError as error:
raise InvalidRequestObjectError(
description=error.description or InvalidRequestObjectError.description,
state=request.payload.state,
) from error
# It MUST also reject the request if the Request Object uses an
# alg value of none when this server metadata value is true.
# If omitted, the default value is false.
if (
self.get_client_require_signed_request_object(client)
and request_object.header["alg"] == "none"
):
raise InvalidRequestError(
"Authorization requests for this client must use signed request objects.",
state=request.payload.state,
)
# It MUST also reject the request if the Request Object uses an
# alg value of none. If omitted, the default value is false.
metadata = self.get_server_metadata()
if (
metadata
and metadata.get("require_signed_request_object", False)
and request_object.header["alg"] == "none"
):
raise InvalidRequestError(
"Authorization requests for this server must use signed request objects.",
state=request.payload.state,
)
# The client ID values in the client_id request parameter and in
# the Request Object client_id claim MUST be identical.
if request_object["client_id"] != request.payload.client_id:
raise InvalidRequestError(
"The 'client_id' claim from the request parameters "
"and the request object claims don't match.",
state=request.payload.state,
)
# The Request Object MAY be sent by value, as described in Section 5.1,
# or by reference, as described in Section 5.2. request and
# request_uri parameters MUST NOT be included in Request Objects.
if "request" in request_object or "request_uri" in request_object:
raise InvalidRequestError(
"The 'request' and 'request_uri' parameters must not be included in the request object.",
state=request.payload.state,
)
return request_object
def get_request_object(self, request_uri: str):
"""Download the request object at ``request_uri``.
This method must be implemented if the ``request_uri`` parameter is supported::
class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest):
def get_request_object(self, request_uri: str):
try:
return requests.get(request_uri).text
except requests.Exception:
return None
"""
raise NotImplementedError()
def resolve_client_public_keys(self, client: ClientMixin):
"""Resolve the client public key for verifying the JWT signature.
A client may have many public keys, in this case, we can retrieve it
via ``kid`` value in headers. Developers MUST implement this method::
class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest):
def resolve_client_public_key(self, client):
if client.jwks_uri:
return requests.get(client.jwks_uri).json
return client.jwks
"""
raise NotImplementedError()
def get_server_metadata(self) -> dict:
"""Return server metadata which includes supported grant types,
response types and etc.
When the ``require_signed_request_object`` claim is :data:`True`,
all clients require that authorization requests
use request objects, and an error will be returned when the authorization
request payload is passed in the request body or query string::
class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest):
def get_server_metadata(self):
return {
"issuer": ...,
"authorization_endpoint": ...,
"require_signed_request_object": ...,
}
"""
return {} # pragma: no cover
def get_client_require_signed_request_object(self, client: ClientMixin) -> bool:
"""Return the 'require_signed_request_object' client metadata.
When :data:`True`, the client requires that authorization requests
use request objects, and an error will be returned when the authorization
request payload is passed in the request body or query string::
class JWTAuthenticationRequest(rfc9101.JWTAuthenticationRequest):
def get_client_require_signed_request_object(self, client):
return client.require_signed_request_object
If not implemented, the value is considered as :data:`False`.
"""
return False # pragma: no cover
| JWTAuthenticationRequest |
python | catalyst-team__catalyst | catalyst/metrics/_classification.py | {
"start": 11405,
"end": 16879
} | class ____(ICallbackBatchMetric):
"""
This metric accumulates true positive, false positive, true negative,
false negative, support statistics from multilabel data.
Args:
compute_on_call: if True, computes and returns metric value during metric call
prefix: metric prefix
suffix: metric suffix
num_classes: number of classes
Raises:
ValueError: if mode is incorrect
Examples:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples,) * num_classes).to(torch.int64)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=3,
valid_loader="valid",
valid_metric="accuracy03",
minimize_valid_metric=False,
verbose=True,
callbacks=[
dl.AccuracyCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.PrecisionRecallF1SupportCallback(
input_key="logits", target_key="targets", num_classes=num_classes
),
dl.AUCCallback(input_key="logits", target_key="targets"),
],
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
num_classes: Optional[int] = None,
):
"""Init params"""
super().__init__(compute_on_call=compute_on_call, prefix=prefix, suffix=suffix)
self.statistics = None
self.num_classes = num_classes
self._ddp_backend = None
self.reset()
# multiprocessing could not handle lamdas, so..
def _mp_hack(self):
return np.zeros(shape=(self.num_classes,))
def reset(self) -> None:
"""Reset all the statistics."""
self.statistics = defaultdict(self._mp_hack)
self._ddp_backend = get_backend()
def update(
self, outputs: torch.Tensor, targets: torch.Tensor
) -> Union[Tuple[int, int, int, int, int, int], Tuple[Any, Any, Any, Any, Any, int]]:
"""
Compute statistics from outputs and targets,
update accumulated statistics with new values.
Args:
outputs: prediction values
targets: true answers
Returns:
Tuple of int or array: true negative, false positive, false
negative, true positive, support statistics and num_classes
"""
tn, fp, fn, tp, support, num_classes = get_multilabel_statistics(
outputs=outputs.cpu().detach(), targets=targets.cpu().detach()
)
tn = tn.numpy()
fp = fp.numpy()
fn = fn.numpy()
tp = tp.numpy()
support = support.numpy()
if self.num_classes is None:
self.num_classes = num_classes
self.statistics["tn"] += tn
self.statistics["fp"] += fp
self.statistics["fn"] += fn
self.statistics["tp"] += tp
self.statistics["support"] += support
return tn, fp, fn, tp, support, self.num_classes
def update_key_value(
self, outputs: torch.Tensor, targets: torch.Tensor
) -> Dict[str, float]:
"""
Update statistics and return statistics intermediate result
Args:
outputs: prediction values
targets: true answers
Returns:
dict of statistics for current input
"""
tn, fp, fn, tp, support, _ = self.update(outputs=outputs, targets=targets)
return {"fn": fn, "fp": fp, "support": support, "tn": tn, "tp": tp}
def compute(self) -> Dict[str, Union[int, np.array]]:
"""
Return accumulated statistics
Returns:
dict of statistics
"""
return self.statistics
def compute_key_value(self) -> Dict[str, float]:
"""
Return accumulated statistics
Returns:
dict of statistics
Examples:
>>> {"tp": np.array([1, 2, 1]), "fp": np.array([2, 1, 0]), ...}
"""
result = self.compute()
return {k: result[k] for k in sorted(result.keys())}
| MultilabelStatisticsMetric |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 92661,
"end": 92893
} | class ____(
AddMMConfigMixin, XPUPersistentTMATemplateConfigHeuristic
):
"""Addmm specific mixin for XPU"""
@register_template_heuristic(mm_template.uid, "xpu", op_name="scaled_mm")
| XPUAddmmPersistentTMATemplateConfigHeuristic |
python | ray-project__ray | python/ray/_private/accelerators/neuron.py | {
"start": 882,
"end": 4503
} | class ____(AcceleratorManager):
"""AWS Inferentia and Trainium accelerators."""
@staticmethod
def get_resource_name() -> str:
return "neuron_cores"
@staticmethod
def get_visible_accelerator_ids_env_var() -> str:
return NEURON_RT_VISIBLE_CORES_ENV_VAR
@staticmethod
def get_current_process_visible_accelerator_ids() -> Optional[List[str]]:
neuron_visible_cores = os.environ.get(
NeuronAcceleratorManager.get_visible_accelerator_ids_env_var(), None
)
if neuron_visible_cores is None:
return None
if neuron_visible_cores == "":
return []
return list(neuron_visible_cores.split(","))
@staticmethod
def get_current_node_num_accelerators() -> int:
"""
Attempt to detect the number of Neuron cores on this machine.
Returns:
The number of Neuron cores if any were detected, otherwise 0.
"""
nc_count: int = 0
neuron_path = "/opt/aws/neuron/bin/"
if sys.platform.startswith("linux") and os.path.isdir(neuron_path):
result = subprocess.run(
[os.path.join(neuron_path, "neuron-ls"), "--json-output"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if result.returncode == 0 and result.stdout:
neuron_devices = json.loads(result.stdout)
for neuron_device in neuron_devices:
nc_count += neuron_device.get("nc_count", 0)
return nc_count
@staticmethod
def get_current_node_accelerator_type() -> Optional[str]:
from ray.util.accelerators import AWS_NEURON_CORE
return AWS_NEURON_CORE
@staticmethod
def validate_resource_request_quantity(
quantity: float,
) -> Tuple[bool, Optional[str]]:
if isinstance(quantity, float) and not quantity.is_integer():
return (
False,
f"{NeuronAcceleratorManager.get_resource_name()} resource quantity"
" must be whole numbers. "
f"The specified quantity {quantity} is invalid.",
)
else:
return (True, None)
@staticmethod
def set_current_process_visible_accelerator_ids(
visible_neuron_core_ids: List[str],
) -> None:
"""Set the NEURON_RT_VISIBLE_CORES environment variable based on
given visible_neuron_core_ids.
Args:
visible_neuron_core_ids (List[str]): List of int representing core IDs.
"""
if os.environ.get(NOSET_AWS_NEURON_RT_VISIBLE_CORES_ENV_VAR):
return
os.environ[
NeuronAcceleratorManager.get_visible_accelerator_ids_env_var()
] = ",".join([str(i) for i in visible_neuron_core_ids])
@staticmethod
def get_ec2_instance_num_accelerators(
instance_type: str, instances: dict
) -> Optional[int]:
# TODO: AWS SDK (public API) doesn't yet expose the NeuronCore
# information. It will be available (work-in-progress)
# as xxAcceleratorInfo in InstanceTypeInfo.
# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceTypeInfo.html
# See https://github.com/ray-project/ray/issues/38473
return AWS_NEURON_INSTANCE_MAP.get(instance_type.lower(), None)
@staticmethod
def get_ec2_instance_accelerator_type(
instance_type: str, instances: dict
) -> Optional[str]:
from ray.util.accelerators import AWS_NEURON_CORE
return AWS_NEURON_CORE
| NeuronAcceleratorManager |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold_details.py | {
"start": 912,
"end": 1102
} | class ____(TypedDict):
threshold_type: int
trigger_type: int
value: int
window_in_seconds: int
logger = logging.getLogger("sentry.release_thresholds")
| ReleaseThresholdPUTData |
python | automl__auto-sklearn | test/test_pipeline/components/regression/test_gradient_boosting.py | {
"start": 178,
"end": 726
} | class ____(BaseRegressionComponentTest):
__test__ = True
res = dict()
res["default_boston"] = 0.7491382574462079
res["default_boston_iterative"] = 0.7491382574462079
res["default_boston_sparse"] = None
res["boston_n_calls"] = 9
res["default_diabetes"] = 0.2872735632261877
res["default_diabetes_iterative"] = 0.2872735632261877
res["default_diabetes_sparse"] = None
res["diabetes_n_call"] = 11
sk_mod = sklearn.ensemble.GradientBoostingRegressor
module = GradientBoosting
| GradientBoostingComponentTest |
python | pytorch__pytorch | test/inductor/test_decompose_mem_bound_mm.py | {
"start": 984,
"end": 1184
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input1, input2):
output = torch.bmm(input1, input2)
return output
| MyModule2 |
python | docker__docker-py | tests/integration/context_api_test.py | {
"start": 183,
"end": 2086
} | class ____(BaseAPIIntegrationTest):
def test_lifecycle(self):
assert ContextAPI.get_context().Name == "default"
assert not ContextAPI.get_context("test")
assert ContextAPI.get_current_context().Name == "default"
dirpath = tempfile.mkdtemp()
ca = tempfile.NamedTemporaryFile(
prefix=os.path.join(dirpath, "ca.pem"), mode="r")
cert = tempfile.NamedTemporaryFile(
prefix=os.path.join(dirpath, "cert.pem"), mode="r")
key = tempfile.NamedTemporaryFile(
prefix=os.path.join(dirpath, "key.pem"), mode="r")
# create context 'test
docker_tls = TLSConfig(
client_cert=(cert.name, key.name),
ca_cert=ca.name)
ContextAPI.create_context(
"test", tls_cfg=docker_tls)
# check for a context 'test' in the context store
assert any(ctx.Name == "test" for ctx in ContextAPI.contexts())
# retrieve a context object for 'test'
assert ContextAPI.get_context("test")
# remove context
ContextAPI.remove_context("test")
with pytest.raises(errors.ContextNotFound):
ContextAPI.inspect_context("test")
# check there is no 'test' context in store
assert not ContextAPI.get_context("test")
ca.close()
key.close()
cert.close()
def test_context_remove(self):
ContextAPI.create_context("test")
assert ContextAPI.inspect_context("test")["Name"] == "test"
ContextAPI.remove_context("test")
with pytest.raises(errors.ContextNotFound):
ContextAPI.inspect_context("test")
def test_load_context_without_orchestrator(self):
ContextAPI.create_context("test")
ctx = ContextAPI.get_context("test")
assert ctx
assert ctx.Name == "test"
assert ctx.Orchestrator is None
| ContextLifecycleTest |
python | realpython__materials | python-selenium/src/bandcamp/app/player.py | {
"start": 198,
"end": 1340
} | class ____:
"""Play tracks from Bandcamp's Discover page."""
def __init__(self) -> None:
self._driver = self._set_up_driver()
self.page = DiscoverPage(self._driver)
self.tracklist = self.page.discover_tracklist
self._current_track = self.tracklist.available_tracks[0]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Close the headless browser."""
self._driver.quit()
def play(self, track_number=None):
"""Play the first track, or one of the available numbered tracks."""
if track_number:
self._current_track = self.tracklist.available_tracks[
track_number - 1
]
self._current_track.play()
def pause(self):
"""Pause the current track."""
self._current_track.pause()
def _set_up_driver(self):
"""Create a headless browser pointing to Bandcamp."""
options = Options()
options.add_argument("--headless")
browser = Firefox(options=options)
browser.get(BANDCAMP_DISCOVER_URL)
return browser
| Player |
python | dask__dask | dask/layers.py | {
"start": 3262,
"end": 10690
} | class ____(Layer):
"""Simple HighLevelGraph array overlap layer.
Lazily computed High-level graph layer for a array overlap operations.
Parameters
----------
name : str
Name of new output overlap array.
array : Dask array
axes: Mapping
Axes dictionary indicating overlap in each dimension,
e.g. ``{'0': 1, '1': 1}``
"""
def __init__(
self,
name,
axes,
chunks,
numblocks,
token,
):
super().__init__()
self.name = name
self.axes = axes
self.chunks = chunks
self.numblocks = numblocks
self.token = token
self._cached_keys = None
def __repr__(self):
return f"ArrayOverlapLayer<name='{self.name}'"
@property
def _dict(self):
"""Materialize full dict representation"""
if hasattr(self, "_cached_dict"):
return self._cached_dict
else:
dsk = self._construct_graph()
self._cached_dict = dsk
return self._cached_dict
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def is_materialized(self):
return hasattr(self, "_cached_dict")
def get_output_keys(self):
return self.keys() # FIXME! this implementation materializes the graph
def _dask_keys(self):
if self._cached_keys is not None:
return self._cached_keys
name, chunks, numblocks = self.name, self.chunks, self.numblocks
def keys(*args):
if not chunks:
return [(name,)]
ind = len(args)
if ind + 1 == len(numblocks):
result = [(name,) + args + (i,) for i in range(numblocks[ind])]
else:
result = [keys(*(args + (i,))) for i in range(numblocks[ind])]
return result
self._cached_keys = result = keys()
return result
def _construct_graph(self, deserializing=False):
"""Construct graph for a simple overlap operation."""
axes = self.axes
chunks = self.chunks
name = self.name
dask_keys = self._dask_keys()
getitem_name = "getitem-" + self.token
overlap_name = "overlap-" + self.token
if deserializing:
# Use CallableLazyImport objects to avoid importing dataframe
# module on the scheduler
concatenate_shaped = CallableLazyImport(
"dask.array.core.concatenate_shaped"
)
else:
# Not running on distributed scheduler - Use explicit functions
from dask.array.core import concatenate_shaped
dims = list(map(len, chunks))
expand_key2 = functools.partial(
_expand_keys_around_center, dims=dims, axes=axes
)
# Make keys for each of the surrounding sub-arrays
interior_keys = toolz.pipe(
dask_keys,
flatten,
map(expand_key2),
map(lambda a: a[0]),
map(flatten),
toolz.concat,
list,
)
interior_slices = {}
overlap_blocks = {}
for k in interior_keys:
frac_slice = fractional_slice((name,) + k, axes)
if frac_slice is False:
continue
if (name,) + k != frac_slice:
interior_slices[(getitem_name,) + k] = frac_slice
else:
interior_slices[(getitem_name,) + k] = (name,) + k
overlap_blocks[(overlap_name,) + k] = (
concatenate_shaped,
*(expand_key2((None,) + k, name=getitem_name)),
)
dsk = toolz.merge(interior_slices, overlap_blocks)
return dsk
def _expand_keys_around_center(k, dims, name=None, axes=None):
"""Get all neighboring keys around center
Parameters
----------
k: Key
The key around which to generate new keys
dims: Sequence[int]
The number of chunks in each dimension
name: Option[str]
The name to include in the output keys, or none to include no name
axes: Dict[int, int]
The axes active in the expansion. We don't expand on non-active axes
Examples
--------
>>> _expand_keys_around_center(('x', 2, 3), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # noqa: E501 # doctest: +NORMALIZE_WHITESPACE
([('y', 1.1, 2.1), ('y', 1.1, 3), ('y', 1.1, 3.9), ('y', 2, 2.1), ('y', 2, 3), ('y', 2, 3.9), ('y', 2.9, 2.1), ('y', 2.9, 3), ('y', 2.9, 3.9)], (3, 3))
>>> _expand_keys_around_center(('x', 0, 4), dims=[5, 5], name='y', axes={0: 1, 1: 1}) # noqa: E501 # doctest: +NORMALIZE_WHITESPACE
([('y', 0, 3.1), ('y', 0, 4), ('y', 0.9, 3.1), ('y', 0.9, 4)], (2, 2))
"""
def convert_depth(depth):
if not isinstance(depth, tuple):
depth = (depth, depth)
return depth
def inds(i, ind, depth):
depth = convert_depth(depth)
rv = []
if ind - 0.9 > 0 and depth[0] != 0:
rv.append(ind - 0.9)
rv.append(ind)
if ind + 0.9 < dims[i] - 1 and depth[1] != 0:
rv.append(ind + 0.9)
return rv
shape = []
for i, ind in enumerate(k[1:]):
depth = convert_depth(axes.get(i, 0))
num = 1
if ind > 0 and depth[0] != 0:
num += 1
if ind < dims[i] - 1 and depth[1] != 0:
num += 1
shape.append(num)
def _valid_depth(depth):
if isinstance(depth, tuple):
return any(x != 0 for x in depth)
else:
return depth != 0
args = [
inds(i, ind, axes.get(i, 0)) if _valid_depth(axes.get(i, 0)) else [ind]
for i, ind in enumerate(k[1:])
]
if name is not None:
args = [[name]] + args
seq = list(product(*args))
shape2 = tuple(
d if _valid_depth(axes.get(i, 0)) else 1 for i, d in enumerate(shape)
)
return seq, shape2
def fractional_slice(task, axes):
"""
>>> fractional_slice(('x', 5.1), {0: 2})
(<built-in function getitem>, ('x', 5), (slice(-2, None, None),))
>>> fractional_slice(('x', 3, 5.1), {0: 2, 1: 3})
(<built-in function getitem>, ('x', 3, 5), (slice(None, None, None), slice(-3, None, None)))
>>> fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3})
(<built-in function getitem>, ('x', 3, 5), (slice(0, 2, None), slice(-3, None, None)))
"""
rounded = (task[0],) + tuple(int(round(i)) for i in task[1:])
index = []
for i, (t, r) in enumerate(zip(task[1:], rounded[1:])):
depth = axes.get(i, 0)
if isinstance(depth, tuple):
left_depth = depth[0]
right_depth = depth[1]
else:
left_depth = depth
right_depth = depth
if t == r:
index.append(slice(None, None, None))
elif t < r and right_depth:
index.append(slice(0, right_depth))
elif t > r and left_depth:
index.append(slice(-left_depth, None))
else:
return False
index = tuple(index)
if all(ind == slice(None, None, None) for ind in index):
return task
else:
return (operator.getitem, rounded, index)
#
##
### DataFrame Layers & Utilities
##
#
| ArrayOverlapLayer |
python | great-expectations__great_expectations | tests/core/test_expectation_validation_result.py | {
"start": 25723,
"end": 35600
} | class ____:
@pytest.mark.unit
def test_get_max_severity_failure_no_results(self):
"""Test that None is returned when there are no results."""
result = ExpectationSuiteValidationResult(
suite_name="test_suite",
success=True,
results=[],
)
assert result.get_max_severity_failure() is None
@pytest.mark.unit
def test_get_max_severity_failure_no_failures(self):
"""Test that None is returned when all expectations pass."""
config = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="critical",
)
evr = ExpectationValidationResult(
success=True, expectation_config=config, result={"observed_value": 100}
)
result = ExpectationSuiteValidationResult(
suite_name="test_suite",
success=True,
results=[evr],
)
assert result.get_max_severity_failure() is None
@pytest.mark.unit
def test_get_max_severity_failure_multiple_failures(self):
"""Test that the highest severity is returned among multiple failures."""
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="info",
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_be_between",
kwargs={
"column": "test_column",
"min_value": 0,
"max_value": 100,
},
severity="warning",
)
config3 = ExpectationConfiguration(
type="expect_column_values_to_be_unique",
kwargs={"column": "test_column"},
severity="critical",
)
evr1 = ExpectationValidationResult(success=False, expectation_config=config1, result={})
evr2 = ExpectationValidationResult(success=False, expectation_config=config2, result={})
evr3 = ExpectationValidationResult(success=False, expectation_config=config3, result={})
result = ExpectationSuiteValidationResult(
suite_name="test_suite",
success=False,
results=[evr1, evr2, evr3],
)
assert result.get_max_severity_failure() == FailureSeverity.CRITICAL
@pytest.mark.unit
def test_get_max_severity_failure_mixed_success_failure(self):
"""Test that only failed expectations are considered."""
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="critical",
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_be_between",
kwargs={
"column": "test_column",
"min_value": 0,
"max_value": 100,
},
severity="warning",
)
evr1 = ExpectationValidationResult(success=True, expectation_config=config1, result={})
evr2 = ExpectationValidationResult(success=False, expectation_config=config2, result={})
result = ExpectationSuiteValidationResult(
suite_name="test_suite",
success=False,
results=[evr1, evr2],
)
assert result.get_max_severity_failure() == FailureSeverity.WARNING
@pytest.mark.unit
def test_failure_severity_enum_semantic_ordering(self):
"""Test that FailureSeverity enum values sort semantically."""
from great_expectations.expectations.metadata_types import FailureSeverity
# Test that the enum values sort in semantic order (info < warning < critical)
# This should NOT depend on lexicographical order of the string values
severity_values = [FailureSeverity.CRITICAL, FailureSeverity.WARNING, FailureSeverity.INFO]
# Sort the values - they should now be in the correct semantic order
sorted_severities = sorted(severity_values)
# Verify the semantic order: info < warning < critical
assert sorted_severities[0] == FailureSeverity.INFO, (
f"Expected INFO first, got {sorted_severities[0]}"
)
assert sorted_severities[1] == FailureSeverity.WARNING, (
f"Expected WARNING second, got {sorted_severities[1]}"
)
assert sorted_severities[2] == FailureSeverity.CRITICAL, (
f"Expected CRITICAL third, got {sorted_severities[2]}"
)
# Test individual comparisons - these should work semantically, not lexicographically
assert FailureSeverity.INFO < FailureSeverity.WARNING, (
"INFO should be less than WARNING semantically"
)
assert FailureSeverity.WARNING < FailureSeverity.CRITICAL, (
"WARNING should be less than CRITICAL semantically"
)
assert FailureSeverity.INFO < FailureSeverity.CRITICAL, (
"INFO should be less than CRITICAL semantically"
)
# Test that the string values can be in any order - the semantic ordering should still work
# Even though "critical" < "info" < "warning" lexicographically
assert FailureSeverity.CRITICAL > FailureSeverity.INFO, (
"CRITICAL should be greater than INFO semantically"
)
assert FailureSeverity.CRITICAL > FailureSeverity.WARNING, (
"CRITICAL should be greater than WARNING semantically"
)
assert FailureSeverity.WARNING > FailureSeverity.INFO, (
"WARNING should be greater than INFO semantically"
)
@pytest.mark.unit
def test_get_max_severity_failure_invalid_severity_skipped(self, caplog):
"""Test that expectations with invalid severity are skipped."""
import logging
# Create valid configurations first, then mock returning an invalid severity to
# work around ValueError that is raised when attempting to set an invalid
# severity in the constructor
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="critical", # Start with valid severity
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_be_between",
kwargs={
"column": "test_column",
"min_value": 0,
"max_value": 100,
},
severity="warning",
)
# Mock the get method to return invalid severity for testing
original_get = config1.get
def mock_get_invalid(key, default=None):
if key == "severity":
return "invalid_severity"
return original_get(key, default)
config1.get = mock_get_invalid
evr1 = ExpectationValidationResult(success=False, expectation_config=config1, result={})
evr2 = ExpectationValidationResult(success=False, expectation_config=config2, result={})
result = ExpectationSuiteValidationResult(
suite_name="test_suite",
success=False,
results=[evr1, evr2],
)
# Capture log messages BEFORE calling the method
caplog.set_level(
logging.ERROR, logger="great_expectations.core.expectation_validation_result"
)
# Now call the method that should generate the log
assert result.get_max_severity_failure() == FailureSeverity.WARNING
# Verify that an error was logged about invalid severity
assert any(
"Invalid severity value 'invalid_severity'" in record.message
for record in caplog.records
)
assert any(
"expect_column_values_to_not_be_null" in record.message for record in caplog.records
)
@pytest.mark.unit
def test_get_max_severity_failure_all_invalid_severities(self):
"""Test that None is returned when all failures have invalid severity."""
# Create valid configurations first, then mock returning an invalid severity to
# work around ValueError that is raised when attempting to set an invalid
# severity in the constructor
config1 = ExpectationConfiguration(
type="expect_column_values_to_not_be_null",
kwargs={"column": "test_column"},
severity="critical",
)
config2 = ExpectationConfiguration(
type="expect_column_values_to_be_between",
kwargs={
"column": "test_column",
"min_value": 0,
"max_value": 100,
},
severity="warning",
)
evr1 = ExpectationValidationResult(success=False, expectation_config=config1, result={})
evr2 = ExpectationValidationResult(success=False, expectation_config=config2, result={})
result = ExpectationSuiteValidationResult(
suite_name="test_suite",
success=False,
results=[evr1, evr2],
)
# Mock the get method to return invalid severity for testing
original_get1 = config1.get
def mock_get_invalid1(key, default=None):
if key == "severity":
return "invalid_severity_1"
return original_get1(key, default)
config1.get = mock_get_invalid1
original_get2 = config2.get
def mock_get_invalid2(key, default=None):
if key == "severity":
return "invalid_severity_2"
return original_get2(key, default)
config2.get = mock_get_invalid2
# Test that the method returns None when all severities are invalid
assert result.get_max_severity_failure() is None
| TestGetMaxSeverityFailure |
python | ray-project__ray | doc/source/ray-core/doc_code/anti_pattern_global_variables.py | {
"start": 78,
"end": 450
} | class ____:
def f(self):
return global_var + 3
actor = Actor.remote()
global_var = 4
# This returns 6, not 7. It is because the value change of global_var
# inside a driver is not reflected to the actor
# because they are running in different processes.
assert ray.get(actor.f.remote()) == 6
# __anti_pattern_end__
# __better_approach_start__
@ray.remote
| Actor |
python | google__jax | jax/_src/export/shape_poly.py | {
"start": 52498,
"end": 58894
} | class ____(tuple):
"""Tuple of polymorphic dimension specifications.
See docstring of :func:`jax2tf.convert`.
"""
def __init__(self, *dim_specs):
warnings.warn("PolyShape is deprecated, use string specifications for symbolic shapes",
DeprecationWarning, stacklevel=2)
tuple.__init__(dim_specs)
def __new__(cls, *dim_specs):
warnings.warn("PolyShape is deprecated, use string specifications for symbolic shapes",
DeprecationWarning, stacklevel=2)
for ds in dim_specs:
if not isinstance(ds, (int, str)) and ds != ...:
msg = (f"Invalid polymorphic shape element: {ds!r}; must be a string "
"representing a dimension variable, or an integer, or ...")
raise ValueError(msg)
return tuple.__new__(PolyShape, dim_specs)
def __str__(self):
return "(" + ", ".join(["..." if d is ... else str(d) for d in self]) + ")"
def symbolic_shape(shape_spec: str | None,
*,
constraints: Sequence[str] = (),
scope: SymbolicScope | None = None,
like: Sequence[int | None] | None = None
) -> Sequence[DimSize]:
"""Constructs a symbolic shape from a string representation.
See https://docs.jax.dev/en/latest/export/shape_poly.html for examples.
Args:
shape_spec: a symbolic shape specification. None stands for "...".
A shape specification is the string representation of a tuple (the
parentheses are optional) with comma-separated dimension expressions.
A dimension expression can be either: an integer constant,
a dimension variable (alphanumeric
starting with a letter), e1 + e2, e1 - e2, e1 * e2, floordiv(e1, e2),
mod(e1, e2), max(e1, e2), or min(e1, e2).
constraints: a sequence of constraints on symbolic dimension expressions, of
the form `e1 >= e2` or `e1 <= e2`, or `e1 == e2`.
See [the documentation](https://docs.jax.dev/en/latest/export/shape_poly.html#user-specified-symbolic-constraints)
for usage.
scope: optionally, you can specify that the parsed symbolic expressions
be created in the given scope. If this is missing, then a new
`SymbolicScope` is created with the given `constraints`.
You cannot specify both a `scope` and `constraints`.
See [the documentation](https://docs.jax.dev/en/latest/export/shape_poly.html#user-specified-symbolic-constraints)
for usage.
like: when `shape_spec` contains placeholders ("_", "..."), use this
shape to fill in the placeholders.
The dimensions of `like` that are used for filling
must be not `None`. If a dimension in `like` is not `None` and
the corresponding dimension in `shape_spec` is a constant then they
must be equal.
Returns: a tuple with integers or symbolic expressions involving dimension variables.
"""
shape_spec_repr = repr(shape_spec)
if shape_spec is None:
shape_spec = "..."
elif isinstance(shape_spec, PolyShape): # TODO: deprecate
shape_spec = str(shape_spec)
elif not isinstance(shape_spec, str):
raise ValueError("polymorphic shape spec should be None or a string. "
f"Found {shape_spec_repr}.")
if scope is None:
scope = SymbolicScope(constraints)
elif constraints:
raise ValueError("Cannot specify both a `scope` and `constraints`.")
dimensions = _Parser(shape_spec, like, shape_spec_repr, scope).parse()
return dimensions
def symbolic_args_specs(
args, # pytree of arguments
shapes_specs, # prefix pytree of strings
constraints: Sequence[str] = (),
scope: SymbolicScope | None = None,
):
"""Constructs a pytree of jax.ShapeDtypeStruct arguments specs for `export`.
See the documentation of :func:`jax.export.symbolic_shape` and
the [shape polymorphism documentation](https://docs.jax.dev/en/latest/export/shape_poly.html) for details.
Args:
args: a pytree of arguments. These can be jax.Array, or jax.ShapeDtypeStruct.
They are used to learn the pytree structure of the arguments, their dtypes,
and to fill-in the actual shapes where the `shapes_specs` contains
placeholders. Note that only the shape dimensions for which
`shapes_specs` is a placeholder are used from `args`.
shapes_specs: should be `None` (all arguments have static shapes),
a single string (see `shape_spec` for :func:`jax.export.symbolic_shape`;
applies to all arguments), or a pytree matching a prefix
of the `args`.
See [how optional parameters are matched to
arguments](https://docs.jax.dev/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
constraints: as for :func:`jax.export.symbolic_shape`.
scope: as for :func:`jax.export.symbolic_shape`.
Returns: a pytree of jax.ShapeDtypeStruct matching the `args` with the shapes
replaced with symbolic dimensions as specified by `shapes_specs`.
"""
polymorphic_shapes = shapes_specs
args_flat, args_tree = tree_util.tree_flatten(args)
shapes_and_dtypes = tuple(map(shape_and_dtype_jax_array, args_flat))
shapes, dtypes = util.unzip2(shapes_and_dtypes)
if isinstance(args, tuple) and isinstance(polymorphic_shapes, list):
# TODO: Remove backward-compatibility workaround
polymorphic_shapes_ = tuple(polymorphic_shapes)
else:
polymorphic_shapes_ = polymorphic_shapes
try:
polymorphic_shapes_flat = tree_util.broadcast_prefix(
polymorphic_shapes_, args,
is_leaf=lambda x: x is None)
except ValueError:
e, *_ = tree_util.prefix_errors(
polymorphic_shapes_, args,
is_leaf=lambda x: x is None)
raise e("export.symbolic_args_specs shapes_specs") from None
# Now add in the polymorphic shapes
if scope is None:
scope = SymbolicScope(constraints)
elif constraints:
raise ValueError("Cannot use both `scope` and `constraints`")
args_specs_flat = (
api.ShapeDtypeStruct(symbolic_shape(spec, like=s, scope=scope), t)
for s, t, spec in zip(shapes, dtypes, polymorphic_shapes_flat))
return args_tree.unflatten(args_specs_flat)
def shape_and_dtype_jax_array(a) -> tuple[Sequence[int | None], DType]:
"""Returns the shape and dtype of a jax.Array or a j"""
if isinstance(a, api.ShapeDtypeStruct):
return a.shape, a.dtype
aval = core.get_aval(a)
return aval.shape, aval.dtype
| PolyShape |
python | getsentry__sentry | src/sentry/search/eap/columns.py | {
"start": 5295,
"end": 5528
} | class ____(BaseArgumentDefinition):
# the allowed types of data stored in the attribute
attribute_types: set[constants.SearchType] | None = None
field_allowlist: set[str] | None = None
@dataclass
| AttributeArgumentDefinition |
python | numpy__numpy | numpy/distutils/fcompiler/ibm.py | {
"start": 265,
"end": 3534
} | class ____(FCompiler):
compiler_type = 'ibm'
description = 'IBM XL Fortran Compiler'
version_pattern = r'(xlf\(1\)\s*|)IBM XL Fortran ((Advanced Edition |)Version |Enterprise Edition V|for AIX, V)(?P<version>[^\s*]*)'
#IBM XL Fortran Enterprise Edition V10.1 for AIX \nVersion: 10.01.0000.0004
executables = {
'version_cmd' : ["<F77>", "-qversion"],
'compiler_f77' : ["xlf"],
'compiler_fix' : ["xlf90", "-qfixed"],
'compiler_f90' : ["xlf90"],
'linker_so' : ["xlf95"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
def get_version(self,*args,**kwds):
version = FCompiler.get_version(self,*args,**kwds)
if version is None and sys.platform.startswith('aix'):
# use lslpp to find out xlf version
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
try:
o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
except (OSError, subprocess.CalledProcessError):
pass
else:
m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
# linux:
# If the output of xlf does not contain version info
# (that's the case with xlf 8.1, for instance) then
# let's try another method:
l = sorted(os.listdir(xlf_dir))
l.reverse()
l = [d for d in l if os.path.isfile(os.path.join(xlf_dir, d, 'xlf.cfg'))]
if l:
from distutils.version import LooseVersion
self.version = version = LooseVersion(l[0])
return version
def get_flags(self):
return ['-qextname']
def get_flags_debug(self):
return ['-g']
def get_flags_linker_so(self):
opt = []
if sys.platform=='darwin':
opt.append('-Wl,-bundle,-flat_namespace,-undefined,suppress')
else:
opt.append('-bshared')
version = self.get_version(ok_status=[0, 40])
if version is not None:
if sys.platform.startswith('aix'):
xlf_cfg = '/etc/xlf.cfg'
else:
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
with open(xlf_cfg) as fi:
crt1_match = re.compile(r'\s*crt\s*=\s*(?P<path>.*)/crt1.o').match
for line in fi:
m = crt1_match(line)
if m:
fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
else:
fo.write(line)
fo.close()
opt.append('-F'+new_cfg)
return opt
def get_flags_opt(self):
return ['-O3']
if __name__ == '__main__':
from numpy.distutils import customized_fcompiler
log.set_verbosity(2)
print(customized_fcompiler(compiler='ibm').get_version())
| IBMFCompiler |
python | PyCQA__pylint | doc/data/messages/t/too-few-public-methods/good/larger_api.py | {
"start": 0,
"end": 320
} | class ____:
def __init__(self, name: str, fruit_of_residence: Fruit):
self.name = name
self.fruit_of_residence = fruit_of_residence
def bore(self):
print(f"{self.name} is boring into {self.fruit_of_residence}")
def wiggle(self):
print(f"{self.name} wiggle around wormily.")
| Worm |
python | has2k1__plotnine | plotnine/scales/scale_alpha.py | {
"start": 920,
"end": 1378
} | class ____(scale_discrete):
"""
Ordinal Alpha Scale
"""
_aesthetics = ["alpha"]
range: InitVar[tuple[float, float]] = (0.1, 1)
"""
Range ([Minimum, Maximum]) of output alpha values.
Should be between 0 and 1.
"""
def __post_init__(self, range):
super().__post_init__()
def palette(n):
return np.linspace(range[0], range[1], n)
self.palette = palette
@dataclass
| scale_alpha_ordinal |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/_client_utils.py | {
"start": 680,
"end": 2131
} | class ____(anthropic.DefaultAsyncHttpxClient):
"""Borrowed from anthropic._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception: # noqa: S110
pass
@lru_cache
def _get_default_httpx_client(
*,
base_url: str | None,
timeout: Any = _NOT_GIVEN,
anthropic_proxy: str | None = None,
) -> _SyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
if anthropic_proxy is not None:
kwargs["proxy"] = anthropic_proxy
return _SyncHttpxClientWrapper(**kwargs)
@lru_cache
def _get_default_async_httpx_client(
*,
base_url: str | None,
timeout: Any = _NOT_GIVEN,
anthropic_proxy: str | None = None,
) -> _AsyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
if anthropic_proxy is not None:
kwargs["proxy"] = anthropic_proxy
return _AsyncHttpxClientWrapper(**kwargs)
| _AsyncHttpxClientWrapper |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_checkbox03.py | {
"start": 315,
"end": 2385
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("checkbox03.xlsx")
def test_create_file_with_insert_checkbox(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox(0, 0, False)
worksheet.insert_checkbox(2, 2, True)
worksheet.insert_checkbox(8, 4, False)
worksheet.insert_checkbox(9, 4, True)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox(0, 0, False)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_insert_checkbox_and_manual_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"checkbox": True})
worksheet.insert_checkbox(0, 0, False, cell_format)
worksheet.insert_checkbox(2, 2, True, cell_format)
worksheet.insert_checkbox(8, 4, False, cell_format)
worksheet.insert_checkbox(9, 4, True, cell_format)
worksheet = workbook.add_worksheet()
worksheet.insert_checkbox(0, 0, False, cell_format)
workbook.close()
self.assertExcelEqual()
def test_create_file_with_boolean_and_format(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"checkbox": True})
worksheet.write(0, 0, False, cell_format)
worksheet.write(2, 2, True, cell_format)
worksheet.write(8, 4, False, cell_format)
worksheet.write(9, 4, True, cell_format)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, False, cell_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_managed_kafka.py | {
"start": 11347,
"end": 12470
} | class ____:
@mock.patch(MANAGED_KAFKA_PATH.format("types.Topic.to_dict"))
@mock.patch(MANAGED_KAFKA_PATH.format("ManagedKafkaHook"))
def test_execute(self, mock_hook, to_dict_mock):
op = ManagedKafkaGetTopicOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.get_topic.assert_called_once_with(
location=GCP_LOCATION,
project_id=GCP_PROJECT,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestManagedKafkaGetTopicOperator |
python | huggingface__transformers | tests/models/siglip/test_modeling_siglip.py | {
"start": 2989,
"end": 6125
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=4,
patch_size=2,
num_channels=3,
is_training=True,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=2,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches
# Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return SiglipVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = SiglipVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
# Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs_for_common
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| SiglipVisionModelTester |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/repo_definitions.py | {
"start": 507,
"end": 1202
} | class ____(ConfigurableResource):
"""My description."""
a_string: str = "baz"
an_unset_string: str = "defaulted"
@asset
def my_asset(my_resource: MyResource):
pass
@observable_source_asset
def my_observable_source_asset(my_resource: MyResource):
pass
@sensor(asset_selection=AssetSelection.all())
def my_sensor(my_resource: MyResource):
pass
@sensor(asset_selection=AssetSelection.all())
def my_sensor_two(my_resource: MyResource):
pass
my_asset_job = define_asset_job(name="my_asset_job", selection=AssetSelection.assets(my_asset))
@schedule(job_name="my_asset_job", cron_schedule="* * * * *")
def my_schedule(my_resource: MyResource):
pass
| MyResource |
python | facebookresearch__faiss | tests/test_index_composite.py | {
"start": 7876,
"end": 9141
} | class ____(unittest.TestCase):
def test_update(self):
d = 64
nb = 1000
nt = 1500
nq = 100
np.random.seed(123)
xb = np.random.random(size=(nb, d)).astype('float32')
xt = np.random.random(size=(nt, d)).astype('float32')
xq = np.random.random(size=(nq, d)).astype('float32')
index = faiss.index_factory(d, "IVF64,Flat")
index.train(xt)
index.add(xb)
index.nprobe = 32
D, I = index.search(xq, 5)
index.make_direct_map()
recons_before = np.vstack([index.reconstruct(i) for i in range(nb)])
# revert order of the 200 first vectors
nu = 200
index.update_vectors(np.arange(nu).astype('int64'),
xb[nu - 1::-1].copy())
recons_after = np.vstack([index.reconstruct(i) for i in range(nb)])
# make sure reconstructions remain the same
diff_recons = recons_before[:nu] - recons_after[nu - 1::-1]
assert np.abs(diff_recons).max() == 0
D2, I2 = index.search(xq, 5)
assert np.all(D == D2)
gt_map = np.arange(nb)
gt_map[:nu] = np.arange(nu, 0, -1) - 1
eqs = I.ravel() == gt_map[I2.ravel()]
assert np.all(eqs)
| TestUpdate |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 272094,
"end": 282681
} | class ____(GeneratedAirbyteSource):
class JSON:
@public
def __init__(self, deserialization_type: Optional[str] = None):
self.deserialization_type = check.opt_str_param(
deserialization_type, "deserialization_type"
)
class AVRO:
@public
def __init__(
self,
deserialization_type: Optional[str] = None,
deserialization_strategy: Optional[str] = None,
schema_registry_url: Optional[str] = None,
schema_registry_username: Optional[str] = None,
schema_registry_password: Optional[str] = None,
):
self.deserialization_type = check.opt_str_param(
deserialization_type, "deserialization_type"
)
self.deserialization_strategy = check.opt_str_param(
deserialization_strategy, "deserialization_strategy"
)
self.schema_registry_url = check.opt_str_param(
schema_registry_url, "schema_registry_url"
)
self.schema_registry_username = check.opt_str_param(
schema_registry_username, "schema_registry_username"
)
self.schema_registry_password = check.opt_str_param(
schema_registry_password, "schema_registry_password"
)
class ManuallyAssignAListOfPartitions:
@public
def __init__(self, topic_partitions: str):
self.subscription_type = "assign"
self.topic_partitions = check.str_param(topic_partitions, "topic_partitions")
class SubscribeToAllTopicsMatchingSpecifiedPattern:
@public
def __init__(self, topic_pattern: str):
self.subscription_type = "subscribe"
self.topic_pattern = check.str_param(topic_pattern, "topic_pattern")
class PLAINTEXT:
@public
def __init__(self, security_protocol: str):
self.security_protocol = check.str_param(security_protocol, "security_protocol")
class SASLPLAINTEXT:
@public
def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):
self.security_protocol = check.str_param(security_protocol, "security_protocol")
self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")
self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
class SASLSSL:
@public
def __init__(self, security_protocol: str, sasl_mechanism: str, sasl_jaas_config: str):
self.security_protocol = check.str_param(security_protocol, "security_protocol")
self.sasl_mechanism = check.str_param(sasl_mechanism, "sasl_mechanism")
self.sasl_jaas_config = check.str_param(sasl_jaas_config, "sasl_jaas_config")
@public
def __init__(
self,
name: str,
MessageFormat: Union["KafkaSource.JSON", "KafkaSource.AVRO"],
bootstrap_servers: str,
subscription: Union[
"KafkaSource.ManuallyAssignAListOfPartitions",
"KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern",
],
protocol: Union[
"KafkaSource.PLAINTEXT", "KafkaSource.SASLPLAINTEXT", "KafkaSource.SASLSSL"
],
test_topic: Optional[str] = None,
group_id: Optional[str] = None,
max_poll_records: Optional[int] = None,
polling_time: Optional[int] = None,
client_id: Optional[str] = None,
enable_auto_commit: Optional[bool] = None,
auto_commit_interval_ms: Optional[int] = None,
client_dns_lookup: Optional[str] = None,
retry_backoff_ms: Optional[int] = None,
request_timeout_ms: Optional[int] = None,
receive_buffer_bytes: Optional[int] = None,
auto_offset_reset: Optional[str] = None,
repeated_calls: Optional[int] = None,
max_records_process: Optional[int] = None,
):
"""Airbyte Source for Kafka.
Documentation can be found at https://docs.airbyte.com/integrations/sources/kafka
Args:
name (str): The name of the destination.
MessageFormat (Union[KafkaSource.JSON, KafkaSource.AVRO]): The serialization used based on this
bootstrap_servers (str): A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form host1:port1,host2:port2,.... Since these servers are just used for the initial connection to discover the full cluster membership (which may change dynamically), this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
subscription (Union[KafkaSource.ManuallyAssignAListOfPartitions, KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern]): You can choose to manually assign a list of partitions, or subscribe to all topics matching specified pattern to get dynamically assigned partitions.
test_topic (Optional[str]): The Topic to test in case the Airbyte can consume messages.
group_id (Optional[str]): The Group ID is how you distinguish different consumer groups.
max_poll_records (Optional[int]): The maximum number of records returned in a single call to poll(). Note, that max_poll_records does not impact the underlying fetching behavior. The consumer will cache the records from each fetch request and returns them incrementally from each poll.
polling_time (Optional[int]): Amount of time Kafka connector should try to poll for messages.
protocol (Union[KafkaSource.PLAINTEXT, KafkaSource.SASLPLAINTEXT, KafkaSource.SASLSSL]): The Protocol used to communicate with brokers.
client_id (Optional[str]): An ID string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.
enable_auto_commit (Optional[bool]): If true, the consumer's offset will be periodically committed in the background.
auto_commit_interval_ms (Optional[int]): The frequency in milliseconds that the consumer offsets are auto-committed to Kafka if enable.auto.commit is set to true.
client_dns_lookup (Optional[str]): Controls how the client uses DNS lookups. If set to use_all_dns_ips, connect to each returned IP address in sequence until a successful connection is established. After a disconnection, the next IP is used. Once all IPs have been used once, the client resolves the IP(s) from the hostname again. If set to resolve_canonical_bootstrap_servers_only, resolve each bootstrap address into a list of canonical names. After the bootstrap phase, this behaves the same as use_all_dns_ips. If set to default (deprecated), attempt to connect to the first IP address returned by the lookup, even if the lookup returns multiple IP addresses.
retry_backoff_ms (Optional[int]): The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.
request_timeout_ms (Optional[int]): The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
receive_buffer_bytes (Optional[int]): The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.
auto_offset_reset (Optional[str]): What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server - earliest: automatically reset the offset to the earliest offset, latest: automatically reset the offset to the latest offset, none: throw exception to the consumer if no previous offset is found for the consumer's group, anything else: throw exception to the consumer.
repeated_calls (Optional[int]): The number of repeated calls to poll() if no messages were received.
max_records_process (Optional[int]): The Maximum to be processed per execution
"""
self.MessageFormat = check.inst_param(
MessageFormat, "MessageFormat", (KafkaSource.JSON, KafkaSource.AVRO)
)
self.bootstrap_servers = check.str_param(bootstrap_servers, "bootstrap_servers")
self.subscription = check.inst_param(
subscription,
"subscription",
(
KafkaSource.ManuallyAssignAListOfPartitions,
KafkaSource.SubscribeToAllTopicsMatchingSpecifiedPattern,
),
)
self.test_topic = check.opt_str_param(test_topic, "test_topic")
self.group_id = check.opt_str_param(group_id, "group_id")
self.max_poll_records = check.opt_int_param(max_poll_records, "max_poll_records")
self.polling_time = check.opt_int_param(polling_time, "polling_time")
self.protocol = check.inst_param(
protocol,
"protocol",
(KafkaSource.PLAINTEXT, KafkaSource.SASLPLAINTEXT, KafkaSource.SASLSSL),
)
self.client_id = check.opt_str_param(client_id, "client_id")
self.enable_auto_commit = check.opt_bool_param(enable_auto_commit, "enable_auto_commit")
self.auto_commit_interval_ms = check.opt_int_param(
auto_commit_interval_ms, "auto_commit_interval_ms"
)
self.client_dns_lookup = check.opt_str_param(client_dns_lookup, "client_dns_lookup")
self.retry_backoff_ms = check.opt_int_param(retry_backoff_ms, "retry_backoff_ms")
self.request_timeout_ms = check.opt_int_param(request_timeout_ms, "request_timeout_ms")
self.receive_buffer_bytes = check.opt_int_param(
receive_buffer_bytes, "receive_buffer_bytes"
)
self.auto_offset_reset = check.opt_str_param(auto_offset_reset, "auto_offset_reset")
self.repeated_calls = check.opt_int_param(repeated_calls, "repeated_calls")
self.max_records_process = check.opt_int_param(max_records_process, "max_records_process")
super().__init__("Kafka", name)
| KafkaSource |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-desearch/llama_index/tools/desearch/base.py | {
"start": 179,
"end": 262
} | class ____(BaseModel):
media_url: str = ""
type: str = ""
| TwitterScraperMedia |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_array_ops_test.py | {
"start": 1231,
"end": 2806
} | class ____(test.TestCase):
def testReshapeShapeInference(self):
# Create a tensor with an unknown dim 1.
x = weak_tensor.WeakTensor(random_ops.random_normal([4, 10, 10]))
x.shape.assert_is_compatible_with([4, None, 10])
a = array_ops.reshape(x, array_ops.shape(x))
a.shape.assert_is_compatible_with([4, None, 10])
b = array_ops.reshape(x, math_ops.cast(array_ops.shape(x), dtypes.int64))
b.shape.assert_is_compatible_with([4, None, 10])
# We do not shape-infer across a tf.cast into anything that's not tf.int32
# or tf.int64, since they might end up mangling the shape.
c = array_ops.reshape(
x,
math_ops.cast(
math_ops.cast(array_ops.shape(x), dtypes.float32), dtypes.int32
),
)
c.shape.assert_is_compatible_with([None, None, None])
self.assertIsInstance(c, weak_tensor.WeakTensor)
def testSlicedPartialShapeInference(self):
@def_function.function(autograph=False)
def g(x):
return array_ops.zeros([array_ops.shape(x)[0]])
conc = g.get_concrete_function(tensor_spec.TensorSpec([10, None]))
self.assertAllEqual(conc.output_shapes.as_list(), [10])
def testIdentityOnSlicedPartialShapeInference(self):
@def_function.function(autograph=False)
def g(x):
return array_ops.zeros([array_ops.identity(array_ops.shape(x)[0])])
conc = g.get_concrete_function(tensor_spec.TensorSpec([10, None]))
self.assertAllEqual(conc.output_shapes.as_list(), [10])
if __name__ == "__main__":
ops.set_dtype_conversion_mode("all")
test.main()
| ArrayOpTest |
python | huggingface__transformers | src/transformers/models/pvt/modeling_pvt.py | {
"start": 10319,
"end": 11218
} | class ____(nn.Module):
def __init__(
self, config: PvtConfig, hidden_size: int, num_attention_heads: int, sequences_reduction_ratio: float
):
super().__init__()
self.self = PvtEfficientSelfAttention(
config,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
sequences_reduction_ratio=sequences_reduction_ratio,
)
self.output = PvtSelfOutput(config, hidden_size=hidden_size)
def forward(
self, hidden_states: torch.Tensor, height: int, width: int, output_attentions: bool = False
) -> tuple[torch.Tensor]:
self_outputs = self.self(hidden_states, height, width, output_attentions)
attention_output = self.output(self_outputs[0])
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
| PvtAttention |
python | ansible__ansible | lib/ansible/_internal/_json/__init__.py | {
"start": 959,
"end": 1517
} | class ____(HasCurrent):
"""Mixin for use with `AnsibleVariableVisitor` to track current visitation context."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._stack: list[t.Any] = []
def __enter__(self) -> None:
self._stack.append(self._current)
def __exit__(self, *_args, **_kwargs) -> None:
self._stack.pop()
def _get_stack(self) -> list[t.Any]:
if not self._stack:
return []
return self._stack[1:] + [self._current]
| StateTrackingMixIn |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/scheduler.py | {
"start": 1921,
"end": 7463
} | class ____(abc.ABC):
"""Abstract base class for a scheduler. This component is responsible for interfacing with
an external system such as cron to ensure scheduled repeated execution according.
"""
def start_schedule(
self, instance: DagsterInstance, remote_schedule: RemoteSchedule
) -> InstigatorState:
"""Updates the status of the given schedule to `InstigatorStatus.RUNNING` in schedule storage,.
This should not be overridden by subclasses.
Args:
instance (DagsterInstance): The current instance.
remote_schedule (ExternalSchedule): The schedule to start
"""
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(remote_schedule, "remote_schedule", RemoteSchedule)
stored_state = instance.get_instigator_state(
remote_schedule.get_remote_origin_id(), remote_schedule.selector_id
)
computed_state = remote_schedule.get_current_instigator_state(stored_state)
if computed_state.is_running:
return computed_state
new_instigator_data = ScheduleInstigatorData(
remote_schedule.cron_schedule,
get_current_timestamp(),
)
if not stored_state:
started_state = InstigatorState(
remote_schedule.get_remote_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.RUNNING,
new_instigator_data,
)
instance.add_instigator_state(started_state)
else:
started_state = stored_state.with_status(InstigatorStatus.RUNNING).with_data(
new_instigator_data
)
instance.update_instigator_state(started_state)
return started_state
def stop_schedule(
self,
instance: DagsterInstance,
schedule_origin_id: str,
schedule_selector_id: str,
remote_schedule: Optional[RemoteSchedule],
) -> InstigatorState:
"""Updates the status of the given schedule to `InstigatorStatus.STOPPED` in schedule storage,.
This should not be overridden by subclasses.
Args:
schedule_origin_id (string): The id of the schedule target to stop running.
"""
check.str_param(schedule_origin_id, "schedule_origin_id")
check.opt_inst_param(remote_schedule, "remote_schedule", RemoteSchedule)
stored_state = instance.get_instigator_state(schedule_origin_id, schedule_selector_id)
if not remote_schedule:
computed_state = stored_state
else:
computed_state = remote_schedule.get_current_instigator_state(stored_state)
if computed_state and not computed_state.is_running:
return computed_state
if not stored_state:
assert remote_schedule
stopped_state = InstigatorState(
remote_schedule.get_remote_origin(),
InstigatorType.SCHEDULE,
InstigatorStatus.STOPPED,
ScheduleInstigatorData(
remote_schedule.cron_schedule,
),
)
instance.add_instigator_state(stopped_state)
else:
stopped_state = stored_state.with_status(InstigatorStatus.STOPPED).with_data(
ScheduleInstigatorData(
cron_schedule=computed_state.instigator_data.cron_schedule, # type: ignore
)
)
instance.update_instigator_state(stopped_state)
return stopped_state
def reset_schedule(
self, instance: DagsterInstance, remote_schedule: RemoteSchedule
) -> InstigatorState:
"""If the given schedule has a default schedule status, then update the status to
`InstigatorStatus.DECLARED_IN_CODE` in schedule storage.
This should not be overridden by subclasses.
Args:
instance (DagsterInstance): The current instance.
remote_schedule (ExternalSchedule): The schedule to reset.
"""
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(remote_schedule, "remote_schedule", RemoteSchedule)
stored_state = instance.get_instigator_state(
remote_schedule.get_remote_origin_id(), remote_schedule.selector_id
)
new_status = InstigatorStatus.DECLARED_IN_CODE
if not stored_state:
new_instigator_data = ScheduleInstigatorData(
remote_schedule.cron_schedule,
start_timestamp=None,
)
reset_state = instance.add_instigator_state(
state=InstigatorState(
remote_schedule.get_remote_origin(),
InstigatorType.SCHEDULE,
new_status,
new_instigator_data,
)
)
else:
reset_state = instance.update_instigator_state(
state=stored_state.with_status(new_status)
)
return reset_state
@abc.abstractmethod
def debug_info(self) -> str:
"""Returns debug information about the scheduler."""
@abc.abstractmethod
def get_logs_path(self, instance: DagsterInstance, schedule_origin_id: str) -> str:
"""Get path to store logs for schedule.
Args:
schedule_origin_id (string): The id of the schedule target to retrieve the log path for
"""
DEFAULT_MAX_CATCHUP_RUNS = 5
| Scheduler |
python | pola-rs__polars | py-polars/src/polars/_typing.py | {
"start": 1588,
"end": 9588
} | class ____(Protocol):
"""Type protocol for Arrow C Schema Interface via Arrow PyCapsule Interface."""
def __arrow_c_schema__(self) -> object: ...
# Data types
PolarsDataType: TypeAlias = Union["DataTypeClass", "DataType"]
PolarsTemporalType: TypeAlias = Union[type["TemporalType"], "TemporalType"]
PolarsIntegerType: TypeAlias = Union[type["IntegerType"], "IntegerType"]
OneOrMoreDataTypes: TypeAlias = Union[PolarsDataType, Iterable[PolarsDataType]]
PythonDataType: TypeAlias = Union[
type[int],
type[float],
type[bool],
type[str],
type["date"],
type["time"],
type["datetime"],
type["timedelta"],
type[list[Any]],
type[tuple[Any, ...]],
type[bytes],
type[object],
type["Decimal"],
type[None],
]
SchemaDefinition: TypeAlias = Union[
Mapping[str, Union[PolarsDataType, PythonDataType, None]],
Sequence[Union[str, tuple[str, Union[PolarsDataType, PythonDataType, None]]]],
]
SchemaDict: TypeAlias = Mapping[str, PolarsDataType]
NumericLiteral: TypeAlias = Union[int, float, "Decimal"]
TemporalLiteral: TypeAlias = Union["date", "time", "datetime", "timedelta"]
NonNestedLiteral: TypeAlias = Union[NumericLiteral, TemporalLiteral, str, bool, bytes]
# Python literal types (can convert into a `lit` expression)
PythonLiteral: TypeAlias = Union[NonNestedLiteral, "np.ndarray[Any, Any]", list[Any]]
# Inputs that can convert into a `col` expression
IntoExprColumn: TypeAlias = Union["Expr", "Series", str]
# Inputs that can convert into an expression
IntoExpr: TypeAlias = Union[PythonLiteral, IntoExprColumn, None]
ComparisonOperator: TypeAlias = Literal["eq", "neq", "gt", "lt", "gt_eq", "lt_eq"]
# selector type, and related collection/sequence
SelectorType: TypeAlias = "Selector"
ColumnNameOrSelector: TypeAlias = Union[str, SelectorType]
# User-facing string literal types
# The following all have an equivalent Rust enum with the same name
Ambiguous: TypeAlias = Literal["earliest", "latest", "raise", "null"]
AvroCompression: TypeAlias = Literal["uncompressed", "snappy", "deflate"]
CsvQuoteStyle: TypeAlias = Literal["necessary", "always", "non_numeric", "never"]
CategoricalOrdering: TypeAlias = Literal["physical", "lexical"]
CsvEncoding: TypeAlias = Literal["utf8", "utf8-lossy"]
ColumnMapping: TypeAlias = tuple[
Literal["iceberg-column-mapping"],
# This is "pa.Schema". Not typed as that causes pyright strict type checking
# failures for users who don't have pyarrow-stubs installed.
Any,
]
DefaultFieldValues: TypeAlias = tuple[
Literal["iceberg"], dict[int, Union["Series", str]]
]
DeletionFiles: TypeAlias = tuple[
Literal["iceberg-position-delete"], dict[int, list[str]]
]
FillNullStrategy: TypeAlias = Literal[
"forward", "backward", "min", "max", "mean", "zero", "one"
]
FloatFmt: TypeAlias = Literal["full", "mixed"]
IndexOrder: TypeAlias = Literal["c", "fortran"]
IpcCompression: TypeAlias = Literal["uncompressed", "lz4", "zstd"]
JoinValidation: TypeAlias = Literal["m:m", "m:1", "1:m", "1:1"]
Label: TypeAlias = Literal["left", "right", "datapoint"]
MaintainOrderJoin: TypeAlias = Literal[
"none", "left", "right", "left_right", "right_left"
]
NonExistent: TypeAlias = Literal["raise", "null"]
NullBehavior: TypeAlias = Literal["ignore", "drop"]
ParallelStrategy: TypeAlias = Literal[
"auto", "columns", "row_groups", "prefiltered", "none"
]
ParquetCompression: TypeAlias = Literal[
"lz4", "uncompressed", "snappy", "gzip", "brotli", "zstd"
]
PivotAgg: TypeAlias = Literal[
"min", "max", "first", "last", "sum", "mean", "median", "len", "item"
]
QuantileMethod: TypeAlias = Literal[
"nearest", "higher", "lower", "midpoint", "linear", "equiprobable"
]
RankMethod: TypeAlias = Literal["average", "min", "max", "dense", "ordinal", "random"]
Roll: TypeAlias = Literal["raise", "forward", "backward"]
RoundMode: TypeAlias = Literal["half_to_even", "half_away_from_zero"]
SerializationFormat: TypeAlias = Literal["binary", "json"]
Endianness: TypeAlias = Literal["little", "big"]
SizeUnit: TypeAlias = Literal[
"b",
"kb",
"mb",
"gb",
"tb",
"bytes",
"kilobytes",
"megabytes",
"gigabytes",
"terabytes",
]
StartBy: TypeAlias = Literal[
"window",
"datapoint",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
SyncOnCloseMethod: TypeAlias = Literal["data", "all"]
TimeUnit: TypeAlias = Literal["ns", "us", "ms"]
UnicodeForm: TypeAlias = Literal["NFC", "NFKC", "NFD", "NFKD"]
UniqueKeepStrategy: TypeAlias = Literal["first", "last", "any", "none"]
UnstackDirection: TypeAlias = Literal["vertical", "horizontal"]
MapElementsStrategy: TypeAlias = Literal["thread_local", "threading"]
# The following have a Rust enum equivalent with a different name
AsofJoinStrategy: TypeAlias = Literal["backward", "forward", "nearest"] # AsofStrategy
ClosedInterval: TypeAlias = Literal["left", "right", "both", "none"] # ClosedWindow
InterpolationMethod: TypeAlias = Literal["linear", "nearest"]
JoinStrategy: TypeAlias = Literal[
"inner", "left", "right", "full", "semi", "anti", "cross", "outer"
] # JoinType
ListToStructWidthStrategy: TypeAlias = Literal["first_non_null", "max_width"]
# The following have no equivalent on the Rust side
ConcatMethod = Literal[
"vertical",
"vertical_relaxed",
"diagonal",
"diagonal_relaxed",
"horizontal",
"align",
"align_full",
"align_inner",
"align_left",
"align_right",
]
CorrelationMethod: TypeAlias = Literal["pearson", "spearman"]
DbReadEngine: TypeAlias = Literal["adbc", "connectorx"]
DbWriteEngine: TypeAlias = Literal["sqlalchemy", "adbc"]
DbWriteMode: TypeAlias = Literal["replace", "append", "fail"]
EpochTimeUnit = Literal["ns", "us", "ms", "s", "d"]
JaxExportType: TypeAlias = Literal["array", "dict"]
Orientation: TypeAlias = Literal["col", "row"]
SearchSortedSide: TypeAlias = Literal["any", "left", "right"]
TorchExportType: TypeAlias = Literal["tensor", "dataset", "dict"]
TransferEncoding: TypeAlias = Literal["hex", "base64"]
WindowMappingStrategy: TypeAlias = Literal["group_to_rows", "join", "explode"]
ExplainFormat: TypeAlias = Literal["plain", "tree"]
# type signature for allowed frame init
FrameInitTypes: TypeAlias = Union[
Mapping[str, Union[Sequence[object], Mapping[str, Sequence[object]], "Series"]],
Sequence[Any],
"np.ndarray[Any, Any]",
"pa.Table",
"pd.DataFrame",
"ArrowArrayExportable",
"ArrowStreamExportable",
"torch.Tensor",
]
# Excel IO
ColumnFormatDict: TypeAlias = Mapping[
# dict of colname(s) or selector(s) to format string or dict
Union[ColumnNameOrSelector, tuple[ColumnNameOrSelector, ...]],
Union[str, Mapping[str, str]],
]
ConditionalFormatDict: TypeAlias = Mapping[
# dict of colname(s) to str, dict, or sequence of str/dict
Union[ColumnNameOrSelector, Collection[str]],
Union[str, Union[Mapping[str, Any], Sequence[Union[str, Mapping[str, Any]]]]],
]
ColumnTotalsDefinition: TypeAlias = Union[
# dict of colname(s) to str, a collection of str, or a boolean
Mapping[Union[ColumnNameOrSelector, tuple[ColumnNameOrSelector]], str],
Sequence[str],
bool,
]
ColumnWidthsDefinition: TypeAlias = Union[
Mapping[ColumnNameOrSelector, Union[tuple[str, ...], int]], int
]
RowTotalsDefinition: TypeAlias = Union[
# dict of colname to str(s), a collection of str, or a boolean
Mapping[str, Union[str, Collection[str]]],
Collection[str],
bool,
]
# standard/named hypothesis profiles used for parametric testing
ParametricProfileNames: TypeAlias = Literal["fast", "balanced", "expensive"]
# typevars for core polars types
PolarsType = TypeVar("PolarsType", "DataFrame", "LazyFrame", "Series", "Expr")
FrameType = TypeVar("FrameType", "DataFrame", "LazyFrame")
BufferInfo: TypeAlias = tuple[int, int, int]
# type alias for supported spreadsheet engines
ExcelSpreadsheetEngine: TypeAlias = Literal["calamine", "openpyxl", "xlsx2csv"]
| ArrowSchemaExportable |
python | coleifer__peewee | playhouse/dataset.py | {
"start": 13193,
"end": 14285
} | class ____(Importer):
def load(self, file_obj, header=True, **kwargs):
count = 0
reader = csv.reader(file_obj, **kwargs)
if header:
try:
header_keys = next(reader)
except StopIteration:
return count
if self.strict:
header_fields = []
for idx, key in enumerate(header_keys):
if key in self.columns:
header_fields.append((idx, self.columns[key]))
else:
header_fields = list(enumerate(header_keys))
else:
header_fields = list(enumerate(self.model._meta.sorted_fields))
if not header_fields:
return count
for row in reader:
obj = {}
for idx, field in header_fields:
if self.strict:
obj[field.name] = field.python_value(row[idx])
else:
obj[field] = row[idx]
self.table.insert(**obj)
count += 1
return count
| CSVImporter |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 18732,
"end": 19457
} | class ____(fixtures.TestBase):
def test_from_string(self):
eq_(util.to_list("xyz"), ["xyz"])
def test_from_set(self):
spec = util.to_list({1, 2, 3})
assert isinstance(spec, list)
eq_(sorted(spec), [1, 2, 3])
def test_from_dict(self):
spec = util.to_list({1: "a", 2: "b", 3: "c"})
assert isinstance(spec, list)
eq_(sorted(spec), [1, 2, 3])
def test_from_tuple(self):
eq_(util.to_list((1, 2, 3)), [1, 2, 3])
def test_from_bytes(self):
eq_(util.to_list(compat.b("abc")), [compat.b("abc")])
eq_(
util.to_list([compat.b("abc"), compat.b("def")]),
[compat.b("abc"), compat.b("def")],
)
| ToListTest |
python | getsentry__sentry | src/sentry/models/groupredirect.py | {
"start": 342,
"end": 1683
} | class ____(Model):
"""
Maintains a reference from a group that has been merged (and subsequently
deleted) to the group that superseded it.
"""
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(null=True)
group = FlexibleForeignKey(
"sentry.Group", related_name="primary_group_of_redirect", db_constraint=False
)
previous_group_id = BoundedBigIntegerField(unique=True)
previous_short_id = BoundedBigIntegerField(null=True)
previous_project_slug = models.SlugField(null=True)
date_added = models.DateTimeField(default=timezone.now, null=True)
class Meta:
db_table = "sentry_groupredirect"
app_label = "sentry"
unique_together = (("organization_id", "previous_short_id", "previous_project_slug"),)
__repr__ = sane_repr(
"group_id", "previous_group_id", "previous_short_id", "previous_project_slug"
)
@classmethod
def create_for_group(cls, from_group: Group, to_group: Group) -> GroupRedirect:
return cls.objects.create(
organization_id=to_group.project.organization_id,
group=to_group,
previous_group_id=from_group.id,
previous_short_id=from_group.short_id,
previous_project_slug=from_group.project.slug,
)
| GroupRedirect |
python | huggingface__transformers | src/transformers/models/prophetnet/modeling_prophetnet.py | {
"start": 18681,
"end": 20798
} | class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting
based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to
the forward function.
"""
def __init__(self, config: ProphetNetConfig) -> None:
self.max_length = config.max_position_embeddings
super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id)
def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None):
assert (position_ids is None) or (self.padding_idx is None), (
"If position_ids is pre-computed then padding_idx should not be set."
)
if position_ids is None:
if past_key_values is not None and past_key_values.get_seq_length() != 0:
# position_ids is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
prev_num_input_ids = past_key_values.get_seq_length()
num_input_ids = inputs_shape[1] + prev_num_input_ids
position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * (
int(self.padding_idx + num_input_ids)
)
else:
if attention_mask is None:
attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device)
# retrieve position_ids from input_ids / attention_mask
position_ids = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() + self.padding_idx
# make sure position_ids are not bigger then max_length
position_ids = position_ids.clamp(0, self.max_length - 1)
return super().forward(position_ids), position_ids
def _forward(self, position_ids):
return super().forward(position_ids)
| ProphetNetPositionalEmbeddings |
python | falconry__falcon | tests/test_utils.py | {
"start": 52225,
"end": 53195
} | class ____:
def test_method(self, recwarn):
class C:
@deprecation.deprecated_args(allowed_positional=0)
def a_method(self, a=1, b=2):
pass
C().a_method(a=1, b=2)
assert len(recwarn) == 0
C().a_method(1, b=2)
assert len(recwarn) == 1
assert 'C.a_method(...)' in str(recwarn[0].message)
def test_function(self, recwarn):
@deprecation.deprecated_args(allowed_positional=0, is_method=False)
def a_function(a=1, b=2):
pass
a_function(a=1, b=2)
assert len(recwarn) == 0
a_function(1, b=2)
assert len(recwarn) == 1
assert 'a_function(...)' in str(recwarn[0].message)
def test_TimezoneGMT():
with pytest.warns(deprecation.DeprecatedWarning):
tz = TimezoneGMT()
z = timedelta(0)
assert tz.tzname(None) == 'GMT'
assert tz.dst(None) == z
assert tz.utcoffset(None) == z
| TestDeprecatedArgs |
python | huggingface__transformers | src/transformers/models/chameleon/image_processing_chameleon.py | {
"start": 1356,
"end": 16684
} | class ____(BaseImageProcessor):
r"""
Constructs a Chameleon image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 512}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to 1):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to {"height": 512, "width": 512}):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to 0.0078):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `[1.0, 1.0, 1.0]`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PIL.Image.LANCZOS,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 0.0078,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 512}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 512, "width": 512}
crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else [1.0, 1.0, 1.0]
self.image_std = image_std if image_std is not None else [1.0, 1.0, 1.0]
self.do_convert_rgb = do_convert_rgb
# Copied from transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
default_to_square = True
if "shortest_edge" in size:
size = size["shortest_edge"]
default_to_square = False
elif "height" in size and "width" in size:
size = (size["height"], size["width"])
else:
raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
output_size = get_resize_output_image_size(
image,
size=size,
default_to_square=default_to_square,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, param_name="size", default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [self.blend_rgba(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
def blend_rgba(self, image: ImageInput) -> ImageInput:
"""
Convert image to RGB by blending the transparency layer if it's in RGBA format.
If image is not `PIL.Image`, it si simply returned without modifications.
Args:
image (`ImageInput`):
Image to convert.
"""
if not isinstance(image, PIL.Image.Image):
return image
elif image.mode == "RGB":
return image
img_rgba = np.array(image.convert("RGBA"))
# If there is no transparency layer, simple convert and return.
if not (img_rgba[:, :, 3] < 255).any():
return image.convert("RGB")
# There is a transparency layer, blend it with a white background.
# Calculate the alpha proportion for blending.
alpha = img_rgba[:, :, 3] / 255.0
img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3]
return PIL.Image.fromarray(img_rgb.astype("uint8"), "RGB")
__all__ = ["ChameleonImageProcessor"]
| ChameleonImageProcessor |
python | ansible__ansible | lib/ansible/module_utils/facts/system/user.py | {
"start": 814,
"end": 1862
} | class ____(BaseFactCollector):
name = 'user'
_fact_ids = set(['user_id', 'user_uid', 'user_gid',
'user_gecos', 'user_dir', 'user_shell',
'real_user_id', 'effective_user_id',
'effective_group_ids']) # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
user_facts = {}
user_facts['user_id'] = getpass.getuser()
try:
pwent = pwd.getpwnam(getpass.getuser())
except KeyError:
pwent = pwd.getpwuid(os.getuid())
user_facts['user_uid'] = pwent.pw_uid
user_facts['user_gid'] = pwent.pw_gid
user_facts['user_gecos'] = pwent.pw_gecos
user_facts['user_dir'] = pwent.pw_dir
user_facts['user_shell'] = pwent.pw_shell
user_facts['real_user_id'] = os.getuid()
user_facts['effective_user_id'] = os.geteuid()
user_facts['real_group_id'] = os.getgid()
user_facts['effective_group_id'] = os.getgid()
return user_facts
| UserFactCollector |
python | mahmoud__boltons | boltons/tbutils.py | {
"start": 13334,
"end": 16848
} | class ____:
"""An ExceptionInfo object ties together three main fields suitable
for representing an instance of an exception: The exception type
name, a string representation of the exception itself (the
exception message), and information about the traceback (stored as
a :class:`TracebackInfo` object).
These fields line up with :func:`sys.exc_info`, but unlike the
values returned by that function, ExceptionInfo does not hold any
references to the real exception or traceback. This property makes
it suitable for serialization or long-term retention, without
worrying about formatting pitfalls, circular references, or leaking memory.
Args:
exc_type (str): The exception type name.
exc_msg (str): String representation of the exception value.
tb_info (TracebackInfo): Information about the stack trace of the
exception.
Like the :class:`TracebackInfo`, ExceptionInfo is most commonly
instantiated from one of its classmethods: :meth:`from_exc_info`
or :meth:`from_current`.
"""
#: Override this in inherited types to control the TracebackInfo type used
tb_info_type = TracebackInfo
def __init__(self, exc_type, exc_msg, tb_info):
# TODO: additional fields for SyntaxErrors
self.exc_type = exc_type
self.exc_msg = exc_msg
self.tb_info = tb_info
@classmethod
def from_exc_info(cls, exc_type, exc_value, traceback):
"""Create an :class:`ExceptionInfo` object from the exception's type,
value, and traceback, as returned by :func:`sys.exc_info`. See
also :meth:`from_current`.
"""
type_str = exc_type.__name__
type_mod = exc_type.__module__
if type_mod not in ("__main__", "__builtin__", "exceptions", "builtins"):
type_str = f'{type_mod}.{type_str}'
val_str = _some_str(exc_value)
tb_info = cls.tb_info_type.from_traceback(traceback)
return cls(type_str, val_str, tb_info)
@classmethod
def from_current(cls):
"""Create an :class:`ExceptionInfo` object from the current exception
being handled, by way of :func:`sys.exc_info`. Will raise an
exception if no exception is currently being handled.
"""
return cls.from_exc_info(*sys.exc_info())
def to_dict(self):
"""Get a :class:`dict` representation of the ExceptionInfo, suitable
for JSON serialization.
"""
return {'exc_type': self.exc_type,
'exc_msg': self.exc_msg,
'exc_tb': self.tb_info.to_dict()}
def __repr__(self):
cn = self.__class__.__name__
try:
len_frames = len(self.tb_info.frames)
last_frame = f', last={self.tb_info.frames[-1]!r}'
except Exception:
len_frames = 0
last_frame = ''
args = (cn, self.exc_type, self.exc_msg, len_frames, last_frame)
return '<%s [%s: %s] (%s frames%s)>' % args
def get_formatted(self):
"""Returns a string formatted in the traditional Python
built-in style observable when an exception is not caught. In
other words, mimics :func:`traceback.format_exception`.
"""
# TODO: add SyntaxError formatting
tb_str = self.tb_info.get_formatted()
return ''.join([tb_str, f'{self.exc_type}: {self.exc_msg}'])
def get_formatted_exception_only(self):
return f'{self.exc_type}: {self.exc_msg}'
| ExceptionInfo |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 2047,
"end": 10482
} | class ____(NonStrictDataModel):
"""
:param id: Project id
:type id: str
:param name: Project name
:type name: str
:param basename: Project base name
:type basename: str
:param description: Project description
:type description: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Creation time
:type created: datetime.datetime
:param last_update: Last project update time. Reflects the last time the
project metadata was changed or a task in this project has changed status
:type last_update: datetime.datetime
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param default_output_destination: The default output destination URL for new
tasks under this project
:type default_output_destination: str
"""
_schema = {
"properties": {
"basename": {
"description": "Project base name",
"type": ["string", "null"],
},
"company": {"description": "Company id", "type": ["string", "null"]},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"last_update": {
"description": "Last project update time. Reflects the last time the project metadata was changed or a task in this project has changed status",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Project name", "type": ["string", "null"]},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
basename: Optional[str] = None,
description: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
created: Optional[str] = None,
last_update: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
default_output_destination: Optional[str] = None,
**kwargs: Any
) -> None:
super(Project, self).__init__(**kwargs)
self.id = id
self.name = name
self.basename = basename
self.description = description
self.user = user
self.company = company
self.created = created
self.last_update = last_update
self.tags = tags
self.system_tags = system_tags
self.default_output_destination = default_output_destination
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("basename")
def basename(self) -> Optional[str]:
return self._property_basename
@basename.setter
def basename(self, value: Optional[str]) -> None:
if value is None:
self._property_basename = None
return
self.assert_isinstance(value, "basename", six.string_types)
self._property_basename = value
@schema_property("description")
def description(self) -> Optional[str]:
return self._property_description
@description.setter
def description(self, value: Optional[str]) -> None:
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("last_update")
def last_update(self) -> Optional[str]:
return self._property_last_update
@last_update.setter
def last_update(self, value: Optional[str]) -> None:
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("default_output_destination")
def default_output_destination(self) -> Optional[str]:
return self._property_default_output_destination
@default_output_destination.setter
def default_output_destination(self, value: Optional[str]) -> None:
if value is None:
self._property_default_output_destination = None
return
self.assert_isinstance(value, "default_output_destination", six.string_types)
self._property_default_output_destination = value
| Project |
python | PrefectHQ__prefect | src/prefect/logging/highlighters.py | {
"start": 1500,
"end": 1811
} | class ____(RegexHighlighter):
"""Applies style from multiple highlighters."""
base_style = "log."
highlights: list[str] = (
LevelHighlighter.highlights
+ UrlHighlighter.highlights
+ NameHighlighter.highlights
+ StateHighlighter.highlights
)
| PrefectConsoleHighlighter |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/oss/guides/expectations/creating_custom_expectations/expect_column_pair_values_to_have_a_difference_of_three.py | {
"start": 954,
"end": 2540
} | class ____(ColumnPairMapMetricProvider):
# </snippet>
"""MetricProvider Class for Pair Values Diff Three MetricProvider"""
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_pair_values_to_have_a_difference_of_three.py condition_metric_name">
condition_metric_name = "column_pair_values.diff_three"
# </snippet>
condition_domain_keys = (
"column_A",
"column_B",
)
condition_value_keys = ()
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_pair_values_to_have_a_difference_of_three.py _pandas">
@column_pair_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_A, column_B, **kwargs):
return abs(column_A - column_B) == 3
# </snippet>
@column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column_A, column_B, **kwargs):
row_wise_cond = sa.and_(
sa.func.abs(column_A - column_B) == 3,
sa.not_(sa.or_(column_A == None, column_B == None)),
)
return row_wise_cond
@column_pair_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column_A, column_B, **kwargs):
row_wise_cond = F.abs(column_A - column_B) == 3
return row_wise_cond
# <snippet name="docs/docusaurus/docs/oss/guides/expectations/creating_custom_expectations/expect_column_pair_values_to_have_a_difference_of_three.py ExpectColumnPairValuesToHaveADifferenceOfThree class_def">
| ColumnPairValuesDiffThree |
python | numba__numba | numba/tests/test_target_extension.py | {
"start": 9065,
"end": 9465
} | class ____(Dispatcher):
targetdescr = dpu_target
def compile(self, sig):
with target_override('dpu'):
return super().compile(sig)
# Register a dispatcher for the DPU target, a lot of the code uses this
# internally to work out what to do RE compilation
dispatcher_registry[target_registry["dpu"]] = DPUDispatcher
# Implement a dispatcher for the DPU target
| DPUDispatcher |
python | pytorch__pytorch | test/fx/test_partitioner_order.py | {
"start": 922,
"end": 2491
} | class ____(TestCase):
# partitioner test to check graph node order remains the same with the original graph after partitioning
def test_partitioner_graph_node_order(self):
m = AddModule()
traced_m = torch.fx.symbolic_trace(m)
origin_node_order = [n.name for n in traced_m.graph.nodes]
partitions = DummyPartitioner(traced_m).propose_partitions()
partition_nodes = [list(partition.nodes) for partition in partitions]
partition_node_order = [n.name for n in partition_nodes[0]]
self.assertTrue(partition_node_order == origin_node_order)
# partitioner test to check graph node order remains the same during multiple runs
def test_partitioner_multiple_runs_order(self):
m = AddModule()
traced_m = torch.fx.symbolic_trace(m)
partitions = DummyPartitioner(traced_m).propose_partitions()
partition_nodes = [list(partition.nodes) for partition in partitions]
node_order = [n.name for n in partition_nodes[0]]
for _ in range(10):
traced_m = torch.fx.symbolic_trace(m)
new_partition = DummyPartitioner(traced_m).propose_partitions()
new_partition_nodes = [list(partition.nodes) for partition in new_partition]
new_node_order = [n.name for n in new_partition_nodes[0]]
self.assertTrue(node_order == new_node_order)
if __name__ == "__main__":
raise RuntimeError(
"This test is not currently used and should be "
"enabled in discover_tests.py if required."
)
| TestPartitionerOrder |
python | ansible__ansible | lib/ansible/_internal/_templating/_lazy_containers.py | {
"start": 22616,
"end": 27240
} | class ____(_AnsibleTaggedTuple, _AnsibleLazyTemplateMixin):
"""
A tagged tuple subclass that provides only managed access for existing lazy values.
Since tuples are immutable, they cannot support lazy templating (which would change the tuple's value as templates were resolved).
When this type is created, each value in the source tuple is lazified:
* template strings are templated immediately (possibly resulting in lazy containers)
* non-tuple containers are lazy-wrapped
* tuples are immediately recursively lazy-wrapped
* transformations are applied immediately
The resulting object provides only managed access to its values (e.g., deprecation warnings, tripwires), and propagates to new lazy containers
created as a results of managed access.
"""
# DTFIX5: ensure we have tests that explicitly verify this behavior
# nonempty __slots__ not supported for subtype of 'tuple'
def __new__(cls, contents: t.Iterable | _LazyValueSource, /) -> t.Self:
if isinstance(contents, _AnsibleLazyAccessTuple):
return super().__new__(cls, tuple.__iter__(contents))
if isinstance(contents, _LazyValueSource):
return super().__new__(cls, contents.source)
raise UnsupportedConstructionMethodError()
def __init__(self, contents: t.Iterable | _LazyValueSource, /) -> None:
_AnsibleLazyTemplateMixin.__init__(self, contents)
def __getitem__(self, key: t.SupportsIndex | slice, /) -> t.Any:
if type(key) is slice: # pylint: disable=unidiomatic-typecheck
return _AnsibleLazyAccessTuple(_LazyValueSource(source=super().__getitem__(key), templar=self._templar, lazy_options=self._lazy_options))
value = super().__getitem__(key)
if self._lazy_options.access:
AnsibleAccessContext.current().access(value)
return value
@staticmethod
def _item_source(value: tuple) -> tuple | _LazyValueSource:
if isinstance(value, _AnsibleLazyAccessTuple):
return _LazyValueSource(source=tuple.__iter__(value), templar=value._templar, lazy_options=value._lazy_options)
return value
@staticmethod
def _lazy_values(values: t.Any, lazy_options: LazyOptions) -> _LazyValueSource:
templar = TemplateContext.current().templar
return _LazyValueSource(source=(templar.template(value, lazy_options=lazy_options) for value in values), templar=templar, lazy_options=lazy_options)
def _non_lazy_copy(self) -> tuple:
return AnsibleTagHelper.tag_copy(self, self, value_type=tuple)
def __deepcopy__(self, memo):
return _AnsibleLazyAccessTuple(
_LazyValueSource(
source=(copy.deepcopy(v) for v in super().__iter__()),
templar=copy.deepcopy(self._templar),
lazy_options=copy.deepcopy(self._lazy_options),
)
)
def lazify_container(value: t.Any) -> t.Any:
"""
If the given value is a supported container type, return its lazy version, otherwise return the value as-is.
This is used to ensure that managed access and templating occur on args and kwargs to a callable, even if they were sourced from Jinja constants.
Since both variable access and plugin output are already lazified, this mostly affects Jinja constant containers.
However, plugins that directly invoke other plugins (e.g., `Environment.call_filter`) are another potential source of non-lazy containers.
In these cases, templating will occur for trusted templates automatically upon access.
Sets, tuples, and dictionary keys cannot be lazy, since their correct operation requires hashability and equality.
These properties are mutually exclusive with the following lazy features:
- managed access on encrypted strings - may raise errors on both operations when decryption fails
- managed access on markers - must raise errors on both operations
- templating - mutates values
That leaves non-raising managed access as the only remaining feature, which is insufficient to warrant lazy support.
"""
return _AnsibleLazyTemplateMixin._try_create(value)
def lazify_container_args(item: tuple) -> tuple:
"""Return the given args with values converted to lazy containers as needed."""
return tuple(lazify_container(value) for value in item)
def lazify_container_kwargs(item: dict[str, t.Any]) -> dict[str, t.Any]:
"""Return the given kwargs with values converted to lazy containers as needed."""
return {key: lazify_container(value) for key, value in item.items()}
| _AnsibleLazyAccessTuple |
python | kamyu104__LeetCode-Solutions | Python/xor-operation-in-an-array.py | {
"start": 29,
"end": 609
} | class ____(object):
def xorOperation(self, n, start):
"""
:type n: int
:type start: int
:rtype: int
"""
def xorNums(n, start):
def xorNumsBeginEven(n, start):
assert(start%2 == 0)
# 2*i ^ (2*i+1) = 1
return ((n//2)%2)^((start+n-1) if n%2 else 0)
return start^xorNumsBeginEven(n-1, start+1) if start%2 else xorNumsBeginEven(n, start)
return int(n%2 and start%2) + 2*xorNums(n, start//2)
# Time: O(n)
# Space: O(1)
import operator
| Solution |
python | lxml__lxml | src/lxml/html/tests/test_html5parser.py | {
"start": 3096,
"end": 4986
} | class ____(unittest.TestCase):
def call_it(self, *args, **kwargs):
if html5lib is None:
raise unittest.SkipTest("html5lib is not installed")
from lxml.html.html5parser import fragments_fromstring
return fragments_fromstring(*args, **kwargs)
def test_basic(self):
parser = DummyParser(fragments='fragments')
fragments = self.call_it(b'dummy input', parser=parser)
self.assertEqual(fragments, 'fragments')
self.assertEqual(parser.parseFragment_kwargs, {'useChardet': False})
def test_guess_charset_arg_gets_passed_to_parser(self):
parser = DummyParser()
elem = self.call_it(b'', guess_charset='gc_arg', parser=parser)
self.assertEqual(parser.parseFragment_kwargs, {'useChardet': 'gc_arg'})
def test_guess_charset_not_used_for_unicode(self):
parser = DummyParser()
elem = self.call_it(b''.decode('ascii'), parser=parser)
self.assertEqual(parser.parseFragment_kwargs, {})
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
def test_no_leading_text_strips_empty_leading_text(self):
parser = DummyParser(fragments=['', 'tail'])
fragments = self.call_it('', parser=parser, no_leading_text=True)
self.assertEqual(fragments, ['tail'])
def test_no_leading_text_raises_error_if_leading_text(self):
parser = DummyParser(fragments=['leading text', 'tail'])
self.assertRaises(ParserError, self.call_it,
'', parser=parser, no_leading_text=True)
def test_integration(self):
fragments = self.call_it('a<b>c</b>')
self.assertEqual(len(fragments), 2)
self.assertEqual(fragments[0], 'a')
self.assertEqual(fragments[1].tag, xhtml_tag('b'))
| Test_fragments_fromstring |
python | pexpect__pexpect | tests/test_run.py | {
"start": 5374,
"end": 6404
} | class ____(RunFuncTestCase):
if sys.platform != 'win32':
runfunc = staticmethod(pexpect.runu)
cr = b'\r'.decode('ascii')
empty = b''.decode('ascii')
prep_subprocess_out = staticmethod(lambda x: x.decode('utf-8', 'replace'))
def test_run_unicode(self):
if pexpect.PY3:
char = chr(254) # þ
pattern = '<in >'
else:
char = unichr(254) # analysis:ignore
pattern = '<in >'.decode('ascii')
def callback(values):
if values['event_count'] == 0:
return char + '\n'
else:
return True # Stop the child process
output = pexpect.runu(self.PYTHONBIN + ' echo_w_prompt.py',
env={'PYTHONIOENCODING': 'utf-8'},
events={pattern: callback})
assert isinstance(output, unicode_type), type(output)
assert ('<out>' + char) in output, output
if __name__ == '__main__':
unittest.main()
| RunUnicodeFuncTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassConverter1.py | {
"start": 2259,
"end": 2464
} | class ____(ModelBase):
field0: int = model_field(converter=overloaded_converter)
reveal_type(
Overloads.__init__,
expected_text="(self: Overloads, field0: str | list[str]) -> None",
)
| Overloads |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 3460,
"end": 20171
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
def testGradients(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[32, 100], name="in")
w = constant(1.0, shape=[100, 10], name="w")
b = constant(1.0, shape=[10], name="b")
xw = math_ops.matmul(inp, w, name="xw")
h = bias_add(xw, b, name="h")
w_grad = gradients.gradients(h, w)[0]
self.assertEqual("MatMul", w_grad.op.type)
self.assertEqual(w_grad.op._original_op, xw.op)
self.assertTrue(w_grad.op.get_attr("transpose_a"))
self.assertFalse(w_grad.op.get_attr("transpose_b"))
def testUnusedOutput(self):
with ops.Graph().as_default():
w = constant(1.0, shape=[2, 2])
x = constant(1.0, shape=[2, 2])
wx = math_ops.matmul(w, x)
split_wx = array_ops.split(value=wx, num_or_size_splits=2, axis=0)
c = math_ops.reduce_sum(split_wx[1])
gw = gradients.gradients(c, [w])[0]
self.assertEqual("MatMul", gw.op.type)
def testColocateGradients(self):
with ops.Graph().as_default() as g:
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
with g.device("/device:GPU:0"):
wx = math_ops.matmul(w, x)
gw = gradients.gradients(wx, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw.op.colocation_groups(), wx.op.colocation_groups())
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/device:GPU:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/device:GPU:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertNotEqual(wx.op.colocation_groups(), gw2.op.colocation_groups())
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/device:GPU:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/device:GPU:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertNotEqual(w.op.colocation_groups(), gw2.op.colocation_groups())
def testColocateGradientsWithGateGradients(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.Graph().as_default() as g:
with g.device("/device:CPU:0"):
x = constant(1.0, shape=[1, 1])
y = constant(1.0, shape=[1, 1])
s = x + y
with g.device("/device:GPU:0"):
z = math_ops.reduce_sum(s)
gz_x = gradients.gradients(z, [x], colocate_gradients_with_ops=True,
gate_gradients=True)[0]
# Make sure the placer doesn't complain.
self.evaluate(gz_x)
def testBoundaryStop(self):
# Test that we don't differentiate 'x'. The gradient function for 'x' is
# set explicitly to None so we will get an exception if the gradient code
# tries to differentiate 'x'.
with ops.Graph().as_default():
c = constant(1.0)
x = array_ops.identity(c)
y = x + 1.0
z = y + 1
grads = gradients.gradients(z, [x])
self.assertTrue(all(x is not None for x in grads))
@test_util.run_v1_only("b/120545219")
def testBoundaryContinue(self):
# Test that we differentiate both 'x' and 'y' correctly when x is a
# predecessor of y.
with self.cached_session():
x = constant(1.0)
y = x * 2.0
z = y * 3.0
grads = gradients.gradients(z, [x, y])
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(6.0, grads[0].eval())
@test_util.run_v1_only("b/120545219")
def testAggregationMethodAccumulateN(self):
with self.cached_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.
EXPERIMENTAL_ACCUMULATE_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
@test_util.run_v1_only("b/120545219")
def testAggregationMethodAddN(self):
with self.cached_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y], aggregation_method=gradients.AggregationMethod.ADD_N)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
@test_util.run_v1_only("b/120545219")
def testAggregationMethodTree(self):
with self.cached_session():
x = constant(1.0)
y = x * 2.0
z = y + y + y + y + y + y + y + y + y + y
grads = gradients.gradients(
z, [x, y],
aggregation_method=gradients.AggregationMethod.EXPERIMENTAL_TREE)
self.assertTrue(all(x is not None for x in grads))
self.assertEqual(20.0, grads[0].eval())
self.assertEqual(10.0, grads[1].eval())
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEqual(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertIsInstance(grads[0], tensor.Tensor)
grads = gradients.gradients(w, [c])
self.assertIsInstance(grads[0], tensor.Tensor)
def testNoGradientForStringOutputsWithOpNamespace(self):
with ops.Graph().as_default():
def _TestOpGrad(_, float_grad, string_grad):
"""Gradient function for TestStringOutput."""
self.assertEqual(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterGradient("Namespace>TestStringOutput")(_TestOpGrad)
c = constant(1.0)
x, _ = test_ops.namespace_test_string_output(c)
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertIsInstance(grads[0], tensor.Tensor)
grads = gradients.gradients(w, [c])
self.assertIsInstance(grads[0], tensor.Tensor)
def testSingletonIndexedSlices(self):
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
y = array_ops.identity(x)
dy = indexed_slices.IndexedSlices(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.int32))
dx, = gradients.gradients(y, x, grad_ys=dy)
# The IndexedSlices gradient of tf.identity is the identity map.
with self.cached_session() as sess:
vdx, vdy = sess.run(
[dx, dy], feed_dict={x: [1.0], dy.indices: [0], dy.values: [2.0]})
self.assertEqual(vdx, vdy)
@test_util.run_v1_only("b/120545219")
def testNonDifferentiableSwitchInWhileLoop(self):
with ops.Graph().as_default():
v = array_ops.placeholder(dtypes.float32, [])
def _Step(i, a, ta):
a += math_ops.cast(v, dtypes.int32)
return (i + 1, a, ta.write(i, a))
n = 4
i, _, ta = while_loop.while_loop(
lambda i, *_: i < n, _Step,
[0, 0, tensor_array_ops.TensorArray(dtypes.int32, size=n)])
target = ta.read(i - 1)
grad, = gradients.gradients(target, v)
self.assertIsNone(grad)
def testVariableReadValueGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(var.read_value(), var)
self.assertIsNotNone(gradient)
@parameterized.parameters(dtypes.float32, dtypes.float64)
def testVariableDefaultGrad(self, dtype):
with ops.Graph().as_default():
init = constant_op.constant(100.0, dtype=dtype)
var = variables.Variable(init)
dummy_const = constant_op.constant(0.0)
gradient = gradients.gradients(
dummy_const,
var,
unconnected_gradients=unconnected_gradients.UnconnectedGradients.ZERO
)[0]
self.assertEqual(gradient.dtype, dtype)
self.assertIsNotNone(gradient)
def testVariableAsGraphElementGradient(self):
with ops.Graph().as_default() as graph:
init = constant_op.constant(100.0)
var = variables.Variable(init)
gradient = gradients.gradients(graph.as_graph_element(var), var)
self.assertIsNotNone(gradient)
@test_util.run_v1_only("b/120545219")
def testVariableRefGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variable_v1.VariableV1(init)
gradient = gradients.gradients(var._ref(), var)
self.assertIsNotNone(gradient)
@test_util.run_v1_only("b/120545219")
def testDependentYs(self):
with self.cached_session():
x = constant_op.constant(3.0)
y = math_ops.square(x)
y1 = math_ops.square(y)
y2 = math_ops.square(y1)
g = gradients.gradients([y, y2], x)
self.assertAllClose(17502.0, g[0])
g = gradients.gradients(y + y2, x)
self.assertAllClose(17502.0, g[0])
z = array_ops.identity(y)
z2 = array_ops.identity(y2)
g = gradients.gradients([z, z2], x)
self.assertAllClose(17502.0, g[0])
@test_util.run_v1_only("b/120545219")
def testPartialDerivatives(self):
with self.cached_session():
x = constant_op.constant(1.)
y = 2 * x
z = x + y
totalg = gradients.gradients(z, [x, y])
self.assertEqual([3.0, 1.0], [g.eval() for g in totalg])
partialg = gradients.gradients(z, [x, y], stop_gradients=[x, y])
self.assertEqual([1.0, 1.0], [g.eval() for g in partialg])
@test_util.run_v1_only("b/120545219")
def testStopGradients(self):
def _MakeGraph(rng, stop_gradients=()):
def _FunctionOf(xs, k=3):
return ops.convert_to_tensor(
sum(math_ops.matmul(rng.rand(k, k), x) for x in xs)
+ rng.rand(k, k))
a = _FunctionOf([])
if "a" in stop_gradients: a = array_ops.stop_gradient(a)
b = _FunctionOf([a])
if "b" in stop_gradients: b = array_ops.stop_gradient(b)
c = _FunctionOf([a, b])
if "c" in stop_gradients: c = array_ops.stop_gradient(c)
d = _FunctionOf([b, c])
if "d" in stop_gradients: d = array_ops.stop_gradient(d)
return dict(a=a, b=b, c=c, d=d)
def _Gradients(ys, xs, **kwargs):
dydxs = gradients.gradients(ys, xs, **kwargs)
dydxs = [0. * x if dydx is None else dydx
for x, dydx in zip(xs, dydxs)]
return dydxs
seed = np.random.randint(1000)
cases = []
subsets = [""] + "a b c d ab ac ad bc bd cd abc abd acd bcd abcd".split()
graph = _MakeGraph(np.random.RandomState(seed))
for constants in subsets:
graph_with_stops = _MakeGraph(np.random.RandomState(seed), constants)
for variables_ in subsets:
# compute the gradient when stopped using tf.stop_gradients
grad1 = _Gradients([graph_with_stops["d"]],
[graph_with_stops[v] for v in variables_])
# compute the gradient when stopped using the stop_gradients kwarg
grad2 = _Gradients([graph["d"]],
[graph[v] for v in variables_],
stop_gradients=[graph[v] for v in constants])
cases.append(dict(grad1=grad1, grad2=grad2,
constants=constants, variables=variables_))
# evaluate all tensors in one call to session.run for speed
with self.cached_session() as sess:
results = sess.run([(case["grad1"], case["grad2"]) for case in cases])
for (npgrad1, npgrad2), case in zip(results, cases):
for a, b in zip(npgrad1, npgrad2):
np.testing.assert_allclose(a, b)
def testUnconnectedGradientsNoneUnconnectedGradients(self):
with ops.Graph().as_default():
x = constant(1.0, shape=[2, 2])
y = constant(3.0, shape=[3, 1])
grad = gradients.gradients(
[y], [x], unconnected_gradients="none")
self.assertIsNone(grad[0])
def testUnconnectedGradientsZerosUnconnectedGradients(self):
with ops.Graph().as_default():
x = constant(1.0, shape=[2, 2])
y = constant(3.0, shape=[3, 1])
grads = gradients.gradients(
[y], [x], unconnected_gradients="zero")
self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(grads)[0])
def testUnconnectedGradientsZeroConnectedGradients(self):
with ops.Graph().as_default():
x = constant(1.0)
y = x * 3.0
grad = gradients.gradients(
[y], [x], unconnected_gradients="zero")
self.assertEqual(3.0, self.evaluate(grad)[0])
def testUnknownUnconnectedGradientsValueGiven(self):
with ops.Graph().as_default():
x = constant(1.0)
y = constant(1.0)
with self.assertRaisesRegex(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
gradients.gradients([y], [x], unconnected_gradients="nonsense")
@parameterized.parameters(unconnected_gradients.UnconnectedGradients.ZERO,
unconnected_gradients.UnconnectedGradients.NONE)
def testUnconnectedOpWithMultipleOutputs(self, unconnected_gradients_val):
with ops.Graph().as_default():
# a b
# | |
# IdentityN
# | |
# c d
# |
# Identity
# |
# e
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
c, d = array_ops.identity_n([a, b])
e = array_ops.identity(c)
# The aggregated grads for the IdentityN node would look like
# [Tensor, None]. We expect this None to be converted to zeros.
output = gradients.gradients(
e, d, unconnected_gradients=unconnected_gradients_val)
if (unconnected_gradients_val ==
unconnected_gradients.UnconnectedGradients.ZERO):
self.assertIsNotNone(output[0])
else:
self.assertIsNone(output[0])
def testOptimizeIdentityN(self):
with ops.Graph().as_default():
# a b
# | |
# IdentityN
# | |
# c d
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
c, d = array_ops.identity_n([a, b])
def OptimizedGradients(y, x):
return gradients_util._GradientsHelper(
ys=y,
xs=x,
optimize_identity_n=True,
)
self.assertIsNone(OptimizedGradients(d, a)[0])
self.assertIsNone(OptimizedGradients(c, b)[0])
self.assertIsNotNone(gradients.gradients(d, a)[0])
self.assertIsNotNone(gradients.gradients(c, b)[0])
for grad_impl in [gradients.gradients, OptimizedGradients]:
self.assertIsNotNone(grad_impl(c, a)[0])
self.assertIsNotNone(grad_impl(d, b)[0])
@parameterized.parameters(unconnected_gradients.UnconnectedGradients.ZERO,
unconnected_gradients.UnconnectedGradients.NONE)
def testUnconnectedOpWithMultipleOutputsStopGradient(
self, unconnected_gradients_val):
with ops.Graph().as_default():
# a b
# | |
# IdentityN
# | |
# c d
# | |
# SG |
# | |
# \ /
# +
# e
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
c, d = array_ops.identity_n([a, b])
e = array_ops.stop_gradient(c) + d
# The aggregated grads for the IdentityN node would look like
# [None, Tensor]. We expect this None to be converted to zeros.
output = gradients.gradients(
e, c, unconnected_gradients=unconnected_gradients_val)
if (unconnected_gradients_val ==
unconnected_gradients.UnconnectedGradients.ZERO):
self.assertIsNotNone(output[0])
else:
self.assertIsNone(output[0])
| GradientsTest |
python | optuna__optuna | optuna/terminator/callback.py | {
"start": 372,
"end": 2720
} | class ____:
"""A callback that terminates the optimization using Terminator.
This class implements a callback which wraps :class:`~optuna.terminator.Terminator`
so that it can be used with the :func:`~optuna.study.Study.optimize` method.
Args:
terminator:
A terminator object which determines whether to terminate the optimization by
assessing the room for optimization and statistical error. Defaults to a
:class:`~optuna.terminator.Terminator` object with default
``improvement_evaluator`` and ``error_evaluator``.
Example:
.. testcode::
from sklearn.datasets import load_wine
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import optuna
from optuna.terminator import TerminatorCallback
from optuna.terminator import report_cross_validation_scores
def objective(trial):
X, y = load_wine(return_X_y=True)
clf = RandomForestClassifier(
max_depth=trial.suggest_int("max_depth", 2, 32),
min_samples_split=trial.suggest_float("min_samples_split", 0, 1),
criterion=trial.suggest_categorical("criterion", ("gini", "entropy")),
)
scores = cross_val_score(clf, X, y, cv=KFold(n_splits=5, shuffle=True))
report_cross_validation_scores(trial, scores)
return scores.mean()
study = optuna.create_study(direction="maximize")
terminator = TerminatorCallback()
study.optimize(objective, n_trials=50, callbacks=[terminator])
.. seealso::
Please refer to :class:`~optuna.terminator.Terminator` for the details of
the terminator mechanism.
"""
def __init__(self, terminator: BaseTerminator | None = None) -> None:
self._terminator = terminator or Terminator()
def __call__(self, study: Study, trial: FrozenTrial) -> None:
should_terminate = self._terminator.should_terminate(study=study)
if should_terminate:
_logger.info("The study has been stopped by the terminator.")
study.stop()
| TerminatorCallback |
python | pytorch__pytorch | test/test_jit.py | {
"start": 576196,
"end": 576818
} | class ____(TestCase):
def test_version(self):
# issue gh-32561
self.assertTrue(torch.__version__.startswith(torch.onnx.producer_version))
for test in get_all_nn_module_tests():
add_nn_module_test(**test)
for test in criterion_tests:
test['no_grad'] = True
add_nn_module_test(**test)
if __name__ == '__main__':
if sys.version_info < (3, 14):
TestCase._default_dtype_check_enabled = True
run_tests()
import jit.test_module_interface
suite = unittest.findTestCases(jit.test_module_interface)
unittest.TextTestRunner().run(suite)
| TestProducerVersion |
python | paramiko__paramiko | tests/test_config.py | {
"start": 17071,
"end": 21713
} | class ____:
# NOTE: this class uses on-disk configs, and ones with real (at time of
# writing) DNS names, so that one can easily test OpenSSH's behavior using
# "ssh -F path/to/file.config -G <target>".
def test_off_by_default(self, socket):
result = load_config("basic").lookup("www")
assert result["hostname"] == "www"
assert "user" not in result
assert not socket.gethostbyname.called
def test_explicit_no_same_as_default(self, socket):
result = load_config("no-canon").lookup("www")
assert result["hostname"] == "www"
assert "user" not in result
assert not socket.gethostbyname.called
@mark.parametrize(
"config_name",
("canon", "canon-always", "canon-local", "canon-local-always"),
)
def test_canonicalization_base_cases(self, socket, config_name):
result = load_config(config_name).lookup("www")
assert result["hostname"] == "www.paramiko.org"
assert result["user"] == "rando"
socket.gethostbyname.assert_called_once_with("www.paramiko.org")
def test_uses_getaddrinfo_when_AddressFamily_given(self, socket):
# Undo default 'always fails' mock
socket.getaddrinfo.side_effect = None
socket.getaddrinfo.return_value = [True] # just need 1st value truthy
result = load_config("canon-ipv4").lookup("www")
assert result["hostname"] == "www.paramiko.org"
assert result["user"] == "rando"
assert not socket.gethostbyname.called
gai_args = socket.getaddrinfo.call_args[0]
assert gai_args[0] == "www.paramiko.org"
assert gai_args[2] is socket.AF_INET # Mocked, but, still useful
@mark.skip
def test_empty_CanonicalDomains_canonicalizes_despite_noop(self, socket):
# Confirmed this is how OpenSSH behaves as well. Bit silly, but.
# TODO: this requires modifying SETTINGS_REGEX, which is a mite scary
# (honestly I'd prefer to move to a real parser lib anyhow) and since
# this is a very dumb corner case, it's marked skip for now.
result = load_config("empty-canon").lookup("www")
assert result["hostname"] == "www" # no paramiko.org
assert "user" not in result # did not discover canonicalized block
def test_CanonicalDomains_may_be_set_to_space_separated_list(self, socket):
# Test config has a bogus domain, followed by paramiko.org
socket.gethostbyname.side_effect = [socket.gaierror, True]
result = load_config("multi-canon-domains").lookup("www")
assert result["hostname"] == "www.paramiko.org"
assert result["user"] == "rando"
assert [x[0][0] for x in socket.gethostbyname.call_args_list] == [
"www.not-a-real-tld",
"www.paramiko.org",
]
def test_canonicalization_applies_to_single_dot_by_default(self, socket):
result = load_config("deep-canon").lookup("sub.www")
assert result["hostname"] == "sub.www.paramiko.org"
assert result["user"] == "deep"
def test_canonicalization_not_applied_to_two_dots_by_default(self, socket):
result = load_config("deep-canon").lookup("subber.sub.www")
assert result["hostname"] == "subber.sub.www"
assert "user" not in result
def test_hostname_depth_controllable_with_max_dots_directive(self, socket):
# This config sets MaxDots of 2, so now canonicalization occurs
result = load_config("deep-canon-maxdots").lookup("subber.sub.www")
assert result["hostname"] == "subber.sub.www.paramiko.org"
assert result["user"] == "deeper"
def test_max_dots_may_be_zero(self, socket):
result = load_config("zero-maxdots").lookup("sub.www")
assert result["hostname"] == "sub.www"
assert "user" not in result
def test_fallback_yes_does_not_canonicalize_or_error(self, socket):
socket.gethostbyname.side_effect = socket.gaierror
result = load_config("fallback-yes").lookup("www")
assert result["hostname"] == "www"
assert "user" not in result
def test_fallback_no_causes_errors_for_unresolvable_names(self, socket):
socket.gethostbyname.side_effect = socket.gaierror
with raises(CouldNotCanonicalize) as info:
load_config("fallback-no").lookup("doesnotexist")
assert str(info.value) == "doesnotexist"
def test_identityfile_continues_being_appended_to(self, socket):
result = load_config("canon").lookup("www")
assert result["identityfile"] == ["base.key", "canonicalized.key"]
@mark.skip
| TestHostnameCanonicalization |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 6147,
"end": 6267
} | class ____(GQLResult):
name: str
organization: Optional[RegistryFragmentEntityOrganization]
| RegistryFragmentEntity |
python | pytorch__pytorch | test/higher_order_ops/test_with_effects.py | {
"start": 2510,
"end": 43551
} | class ____(TestCase):
def setUp(self):
init_torchbind_implementations()
def test_print(self):
class M(torch.nn.Module):
def forward(self, x):
torch.ops.aten._print("moo")
res = x + x
torch.ops.aten._print("moo")
return (res,)
inputs = (torch.randn(3),)
# Without functionalization, print should just appear in the graph directly
gm = make_fx(M())(*inputs)
FileCheck().check_count("torch.ops.aten._print.default", 2, exactly=True).run(
gm.code
)
# With functionalization, it should appear wrapped with with_effects()
gm, gs = aot_export_module(M(), inputs, trace_joint=False)
self.assertExpectedInline(
str(gm.code).strip(),
"""\
def forward(self, arg0_1, arg1_1):
with_effects = torch.ops.higher_order.with_effects(arg0_1, torch.ops.aten._print.default, 'moo'); arg0_1 = None
getitem = with_effects[0]; with_effects = None
add = torch.ops.aten.add.Tensor(arg1_1, arg1_1); arg1_1 = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops.aten._print.default, 'moo'); getitem = None
getitem_2 = with_effects_1[0]; with_effects_1 = None
return (getitem_2, add)""",
)
self.assertEqual(len(gs.input_tokens), 1)
self.assertEqual(len(gs.output_tokens), 1)
with torch._functorch.config.patch(unlift_effect_tokens=True):
gm, gs = aot_export_module(M(), inputs, trace_joint=False)
self.assertExpectedInline(
str(gm.code).strip(),
"""\
def forward(self, arg1_1):
_make_token_default = torch.ops.prims._make_token.default()
with_effects = torch.ops.higher_order.with_effects(_make_token_default, torch.ops.aten._print.default, 'moo'); _make_token_default = None
getitem = with_effects[0]; with_effects = None
add = torch.ops.aten.add.Tensor(arg1_1, arg1_1); arg1_1 = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops.aten._print.default, 'moo'); getitem = None
getitem_2 = with_effects_1[0]; with_effects_1 = None
_sink_tokens_default = torch.ops.prims._sink_tokens.default([getitem_2]); getitem_2 = _sink_tokens_default = None
return (add,)""", # noqa: B950
)
def test_torchbind_custom_op(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.attr = torch.classes._TorchScriptTesting._Foo(10, 20)
def forward(self, x):
return (x + torch.ops._TorchScriptTesting.takes_foo(self.attr, x),)
with enable_torchbind_tracing():
gm, gs = aot_export_module(M(), (torch.ones(2, 3),), trace_joint=False)
self.assertExpectedInline(
str(gm.code).strip(),
"""\
def forward(self, arg0_1, arg1_1):
_torchbind_obj0 = self._torchbind_obj0
with_effects = torch.ops.higher_order.with_effects(arg0_1, torch.ops._TorchScriptTesting.takes_foo.default, _torchbind_obj0, arg1_1); arg0_1 = _torchbind_obj0 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
add = torch.ops.aten.add.Tensor(arg1_1, getitem_1); arg1_1 = getitem_1 = None
return (getitem, add)""", # noqa: B950
)
self.assertEqual(len(gs.input_tokens), 1)
self.assertEqual(len(gs.output_tokens), 1)
def test_print_with_buffer_mutations(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.buf = torch.nn.Buffer(torch.ones(3))
def forward(self, x):
torch.ops.aten._print("moo")
res = x + x
self.buf.add_(res)
res = self.buf + x
torch.ops.aten._print("moo")
return (res,)
inputs = (torch.randn(3),)
# With functionalization, it should appear wrapped with with_effects()
gm, gs = aot_export_module(M(), inputs, trace_joint=False)
self.assertExpectedInline(
str(gm.code).strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1):
with_effects = torch.ops.higher_order.with_effects(arg0_1, torch.ops.aten._print.default, 'moo'); arg0_1 = None
getitem = with_effects[0]; with_effects = None
add = torch.ops.aten.add.Tensor(arg2_1, arg2_1)
add_1 = torch.ops.aten.add.Tensor(arg1_1, add); arg1_1 = add = None
add_2 = torch.ops.aten.add.Tensor(add_1, arg2_1); arg2_1 = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops.aten._print.default, 'moo'); getitem = None
getitem_2 = with_effects_1[0]; with_effects_1 = None
return (getitem_2, add_1, add_2)""",
)
self.assertEqual(len(gs.input_tokens), 1)
self.assertEqual(len(gs.output_tokens), 1)
self.assertEqual(len(gs.buffers_to_mutate), 1)
def test_print_with_input_mutations(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
torch.ops.aten._print("moo")
res = x + x
x.add_(res)
res = x + x
torch.ops.aten._print("moo")
return (res,)
inputs = (torch.randn(3),)
# With functionalization, it should appear wrapped with with_effects()
gm, gs = aot_export_module(M(), inputs, trace_joint=False)
self.assertEqual(len(gs.input_tokens), 1)
self.assertEqual(len(gs.output_tokens), 1)
self.assertEqual(len(gs.user_inputs_to_mutate), 1)
def test_alias_op(self):
def f(token, x):
token, out = with_effects(token, torch.ops.aten.absolute_.default, x)
return token, out
with self.assertRaisesRegex(
AssertionError, r"Ops with aliasing is not supported"
):
make_fx(f)(torch.tensor([]), torch.tensor(4))
def test_compile_aot_eager(self):
def f(x):
torch.ops.aten._print("moo")
res = x + x
torch.ops.aten._print("moo")
return res
inputs = (torch.randn(2, 3),)
res = torch.compile(f, backend="aot_eager")(*inputs)
self.assertTrue(torch.allclose(res, f(*inputs)))
@unittest.skipIf(IS_WINDOWS, "triton")
@unittest.skipIf(not SM70OrLater, "triton")
def test_compile_inductor(self):
def f(x):
torch.ops.aten._print("moo")
res = x + x
torch.ops.aten._print("moo")
return res
inputs = (torch.randn(2, 3),)
res = torch.compile(f, backend="inductor")(*inputs)
self.assertTrue(torch.allclose(res, f(*inputs)))
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@skipIfNoDynamoSupport
def test_compile_inductor_external_op_return_none(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch.library.define(
"mylib::inplace_add",
"(Tensor input, Tensor(a!) output) -> ()",
lib=lib,
)
def inplace_add(input: torch.Tensor, output: torch.Tensor) -> None:
assert input.device == output.device
output.add_(input)
lib.impl("inplace_add", inplace_add, "CompositeExplicitAutograd")
def f(x):
out = torch.empty(3)
out = torch.zeros_like(out)
torch.ops.mylib.inplace_add(x, out)
return out
inputs = (torch.randn(3),)
res = torch.compile(f, backend="inductor")(*inputs)
self.assertTrue(torch.allclose(res, f(*inputs)))
def test_compile_aot_eager_requires_grad(self):
def f(x):
torch.ops.aten._print("moo")
res = x + x
torch.ops.aten._print("moo")
return res
inputs = (torch.randn(2, 3, requires_grad=True),)
res = torch.compile(f, backend="aot_eager")(*inputs)
self.assertTrue(torch.allclose(res, f(*inputs)))
res.sum().backward()
@unittest.skipIf(IS_WINDOWS, "triton")
@unittest.skipIf(TEST_WITH_ROCM, "triton")
@unittest.skipIf(not SM80OrLater, "triton")
@unittest.skipIf(not TEST_CUDA, "triton")
@skipIfNoDynamoSupport
def test_register_effectful_custom_op(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
torch._dynamo.config.capture_scalar_outputs = True
torch._dynamo.config.capture_dynamic_output_shape_ops = True
# global variable to store the recorded tensor and prefix.
recorded_dict = {}
# Pytorch custom op implementation
@torch.library.custom_op("mylib::record_scalar_tensor", mutates_args=())
def record_scalar_tensor(x: torch.Tensor, prefix: str) -> None:
recorded_dict[prefix] = x.clone()
return
# Meta function of the custom op
@record_scalar_tensor.register_fake
def record_scalar_tensor_meta(x, prefix):
return
record_scalar_tensor.register_effect(_EffectType.ORDERED)
self.assertEqual(_get_effect(record_scalar_tensor), _EffectType.ORDERED)
my_config = {}
my_config["MockModule"] = "mean"
my_config["MockModule.linear"] = "mean"
my_config["MockModule.relu"] = "mean"
class MyLinear(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.weight = torch.nn.Parameter(
torch.randn(out_features, in_features), requires_grad=True
)
self.bias = torch.nn.Parameter(
torch.randn(out_features), requires_grad=True
)
def forward(self, x):
return torch.nn.functional.linear(x, self.weight, self.bias)
class MockModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = MyLinear(10, 10)
self.register_buffer(
"buf0", torch.randn(10, 10, requires_grad=True)
)
def forward(self, x):
return torch.nn.functional.relu(self.linear(x) + self.buf0)
def forward_hook(
module: torch.nn.Module,
inputs: torch.Tensor,
output: torch.Tensor,
prefix: str,
aggregate_method: str,
) -> torch.Tensor:
if aggregate_method == "mean":
torch.ops.mylib.record_scalar_tensor(output.mean(), prefix)
elif aggregate_method == "max":
torch.ops.mylib.record_scalar_tensor(output.max(), prefix)
else:
# demo purpose, using "min"
torch.ops.mylib.record_scalar_tensor(output.sum(), prefix)
return output
def add_hooks(module, config):
handles: list[RemovableHandle] = []
q = deque([(module.__class__.__name__, module)])
while q:
name, m = q.pop()
children = [(name + "." + n, y) for (n, y) in m.named_children()]
q.extend(children)
aggregate_method = config.get(name, "mean")
prefix = name + ":" + aggregate_method
handle = m.register_forward_hook(
partial(
forward_hook,
prefix=prefix,
aggregate_method=aggregate_method,
)
)
if handle:
handles.append(handle)
return handles
x = torch.randn(10, 10, device="cuda")
mod = MockModule().to("cuda")
add_hooks(mod, my_config)
opt_mod = torch.compile(backend="inductor")(mod)
y = opt_mod(x)
self.assertTrue(torch.allclose(y, mod(x)))
# Ensure it works well with backward
y.sum().backward()
# Ensure the grad is existing
self.assertTrue(isinstance(opt_mod.linear.weight.grad, torch.Tensor))
self.assertEqual(len(recorded_dict), 2)
self.assertTrue("MockModule.linear:mean" in recorded_dict)
self.assertTrue("MockModule:mean" in recorded_dict)
@skipIfNoDynamoSupport
def test_effectful_custom_op_with_subclasses(self):
with torch.library._scoped_library("_mylib", "FRAGMENT") as lib:
lib.define("zoo(Tensor x) -> Tensor")
lib.define("zoo2(Tensor x) -> Tensor")
d = {"fw": 0, "bw": 0}
def reset_counter():
d["fw"] = 0
d["bw"] = 0
def assert_counter(fw, bw):
self.assertEqual(d["fw"], fw)
self.assertEqual(d["bw"], bw)
def foo_impl(a):
d["fw"] = d["fw"] + 1
return 2 * a.clone()
def foo_meta(a):
return a.clone()
def foo2_impl(x):
d["bw"] = d["bw"] + 1
return x.clone()
def foo2_meta(a):
return a.clone()
for backend in ["CPU", "CUDA"]:
lib.impl("zoo", foo_impl, backend)
lib.impl("zoo2", foo2_impl, backend)
lib.impl("zoo", foo_meta, "Meta")
lib.impl("zoo2", foo2_meta, "Meta")
def foo_bwd(ctx, grad):
torch.ops._mylib.zoo2(grad)
return grad.clone()
torch.library.register_autograd("_mylib::zoo", foo_bwd, lib=lib)
torch.library._register_effectful_op(
torch.ops._mylib.zoo.default, _EffectType.ORDERED
)
torch.library._register_effectful_op(
torch.ops._mylib.zoo2.default, _EffectType.ORDERED
)
def fn(x, y):
return torch.ops._mylib.zoo(x) + y
def ins_sc():
return (
TwoTensor(
torch.tensor([1.0, 2.0, 3.0]), torch.tensor([1.0, 2.0, 3.0])
),
torch.tensor([4.0, 5.0, 6.0]),
)
def ins_dense():
return torch.tensor([1.0, 2.0, 3.0]), torch.tensor([4.0, 5.0, 6.0])
for ins_fn, expected_fw_count in zip([ins_sc, ins_dense], [2, 1]):
reset_counter()
ref_out = fn(*ins_fn())
assert_counter(expected_fw_count, 0)
compiled_fn = torch.compile(fn, backend="aot_eager")
out = compiled_fn(*ins_fn())
reset_counter()
out = compiled_fn(*ins_fn())
assert_counter(expected_fw_count, 0)
self.assertEqual(ref_out, out)
def ins_dense_req_grad():
return (
torch.tensor([1.0, 2.0, 3.0], requires_grad=True),
torch.tensor([4.0, 5.0, 6.0], requires_grad=True),
)
def ins_sc_req_grad():
return (
TwoTensor(
torch.tensor([1.0, 2.0, 3.0], requires_grad=True),
torch.tensor([4.0, 5.0, 6.0], requires_grad=True),
),
TwoTensor(
torch.tensor([7.0, 8.0, 9.0], requires_grad=True),
torch.tensor([10.0, 11.0, 12.0], requires_grad=True),
),
)
for (
ins_fn_req_grad,
(
expected_fw_count,
expected_fw_count_after_bw,
expected_bw_count_after_bw,
),
) in zip([ins_dense_req_grad, ins_sc_req_grad], [(1, 1, 1), (2, 2, 2)]):
ref_ins = ins_fn_req_grad()
reset_counter()
ref_out = fn(*ref_ins)
assert_counter(expected_fw_count, 0)
ref_out.sum().backward()
assert_counter(expected_fw_count_after_bw, expected_bw_count_after_bw)
compiled_fn = torch.compile(fn, fullgraph=True)
ins = ins_fn_req_grad()
out = compiled_fn(*ins)
reset_counter()
out = compiled_fn(*ins)
assert_counter(expected_fw_count, 0)
self.assertEqual(ref_out, out)
out.sum().backward()
assert_counter(expected_fw_count_after_bw, expected_bw_count_after_bw)
self.assertEqual(ref_ins[1].grad, ins[1].grad)
self.assertEqual(ref_ins[0].grad, ins[0].grad)
fw_graph, bw_graph = get_fw_bw_graph(fn, ins_sc_req_grad())
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3, primals_4, primals_5):
with_effects = torch.ops.higher_order.with_effects(primals_1, torch.ops._mylib.zoo.default, primals_2); primals_1 = primals_2 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops._mylib.zoo.default, primals_3); getitem = primals_3 = None
getitem_2 = with_effects_1[0]
getitem_3 = with_effects_1[1]; with_effects_1 = None
add = torch.ops.aten.add.Tensor(getitem_1, primals_4); getitem_1 = primals_4 = None
add_1 = torch.ops.aten.add.Tensor(getitem_3, primals_5); getitem_3 = primals_5 = None
return (getitem_2, add, add_1)""",
)
self.assertExpectedInline(
bw_graph.code.strip(),
"""\
def forward(self, tangents_1, tangents_2, tangents_token):
with_effects_2 = torch.ops.higher_order.with_effects(tangents_token, torch.ops._mylib.zoo2.default, tangents_1); tangents_token = None
getitem_4 = with_effects_2[0]; with_effects_2 = None
with_effects_3 = torch.ops.higher_order.with_effects(getitem_4, torch.ops._mylib.zoo2.default, tangents_2); getitem_4 = None
getitem_6 = with_effects_3[0]; with_effects_3 = None
clone = torch.ops.aten.clone.default(tangents_1)
clone_1 = torch.ops.aten.clone.default(tangents_2)
return (clone, clone_1, tangents_1, tangents_2, getitem_6)""",
)
def test_effects_and_input_mutation_return(self):
def fn(a, b):
torch.ops.aten._print("effect")
return torch.sin(a, out=b)
inp = [torch.randn(3, 3), torch.ones(3, 3)]
ref_out = fn(*inp)
out = torch.compile(fn, fullgraph=True)(*inp)
self.assertEqual(ref_out, out)
fw_graph, bw_graph = get_fw_bw_graph(fn, inp)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1):
with_effects = torch.ops.higher_order.with_effects(arg0_1, torch.ops.aten._print.default, 'effect'); arg0_1 = None
getitem = with_effects[0]; with_effects = None
sin = torch.ops.aten.sin.default(arg1_1); arg1_1 = None
return (getitem, sin, sin)""",
)
def test_effects_and_input_output_view_simple(self):
def fn(a):
return a.view(-1)
inp = [torch.ones(2, 2, requires_grad=False).add(1)]
ref_out = fn(*inp)
out = torch.compile(fn, fullgraph=True)(*inp)
self.assertEqual(ref_out, out)
inp = [torch.ones(2, 2, requires_grad=True).add(1)]
ref_out = fn(*inp)
out = torch.compile(fn, fullgraph=True)(*inp)
self.assertEqual(ref_out, out)
fw_graph, bw_graph = get_fw_bw_graph(fn, inp)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1):
view = torch.ops.aten.view.default(arg0_1, [-1]); arg0_1 = None
return (view,)""",
)
def test_effects_and_aliased_outputs(self):
def fn(a):
b = a.mul(2)
torch.ops.aten._print("effect")
c = b.view(-1)
return b, c
f_compiled = aot_function(fn, nop)
for req_grad in [True, False]:
inp = torch.ones(3, requires_grad=req_grad)
out_ref = fn(inp)
out_test = f_compiled(inp)
self.assertEqual(out_ref[0], out_test[0])
self.assertEqual(out_ref[1], out_test[1])
# Try mutating one of the outputs, which is aliased.
out_ref[0].mul_(3)
out_test[0].mul_(3)
# Assert that the aliasing relationship was preserved
self.assertEqual(out_ref[0], out_test[0])
self.assertEqual(out_ref[1], out_test[1])
def test_effects_and_input_mutation_is_output(self):
def fn(a):
a.mul_(2)
torch.ops.aten._print("effect")
return a
inp = make_inputs_non_leaves([torch.ones(3, 3, requires_grad=True)])
ref_out = fn(*inp)
out = torch.compile(fn, backend="aot_eager", fullgraph=True)(*inp)
self.assertEqual(ref_out, out)
inp = [torch.ones(3, 3, requires_grad=False)]
ref_out = fn(*inp)
out = torch.compile(fn, backend="aot_eager", fullgraph=True)(*inp)
self.assertEqual(ref_out, out)
fw_graph, bw_graph = get_fw_bw_graph(fn, inp)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, arg0_1, arg1_1):
mul = torch.ops.aten.mul.Tensor(arg1_1, 2); arg1_1 = None
with_effects = torch.ops.higher_order.with_effects(arg0_1, torch.ops.aten._print.default, 'effect'); arg0_1 = None
getitem = with_effects[0]; with_effects = None
return (getitem, mul, mul)""",
)
@skipIfTorchDynamo()
def test_effectful_op_in_backward(self):
with torch.library._scoped_library("_mylib", "FRAGMENT") as lib:
lib.define("foo(Tensor x) -> Tensor")
def foo_impl(a):
return a.clone()
def foo_bwd(ctx, grad):
return torch.ops._mylib.foo(grad)
for backend in ["CPU", "CUDA", "Meta"]:
lib.impl("foo", foo_impl, backend)
torch.library.register_autograd("_mylib::foo", foo_bwd, lib=lib)
handle = _register_effectful_op(
torch.ops._mylib.foo.default, _EffectType.ORDERED
)
self.assertEqual(
_get_effect(torch.ops._mylib.foo.default), _EffectType.ORDERED
)
try:
def fn(x, y):
return torch.ops._mylib.foo(x) + y
def ins_dense_req_grad():
return (
torch.tensor([1.0, 2.0, 3.0], requires_grad=True),
torch.tensor([4.0, 5.0, 6.0], requires_grad=True),
)
def ins_sc_req_grad():
return (
TwoTensor(
torch.tensor([1.0, 2.0, 3.0], requires_grad=True),
torch.tensor([4.0, 5.0, 6.0], requires_grad=True),
),
torch.tensor([4.0, 5.0, 6.0], requires_grad=True),
)
for i, ins_fn in enumerate([ins_dense_req_grad, ins_sc_req_grad]):
ref_ins = ins_fn()
ref_out = fn(*ref_ins)
ref_out.sum().backward()
compiled_fn = torch.compile(fn, backend="inductor", fullgraph=True)
ins = ins_fn()
out = compiled_fn(*ins)
self.assertEqual(ref_out, out)
out.sum().backward()
self.assertEqual(ref_ins[1].grad, ins[1].grad)
self.assertEqual(ref_ins[0].grad, ins[0].grad)
fw_graph, bw_graph = get_fw_bw_graph(fn, ins)
if i == 0:
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3):
with_effects = torch.ops.higher_order.with_effects(primals_1, torch.ops._mylib.foo.default, primals_2); primals_1 = primals_2 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
add = torch.ops.aten.add.Tensor(getitem_1, primals_3); getitem_1 = primals_3 = None
return (getitem, add)""",
)
self.assertExpectedInline(
bw_graph.code.strip(),
"""\
def forward(self, tangents_1, tangents_token):
with_effects_1 = torch.ops.higher_order.with_effects(tangents_token, torch.ops._mylib.foo.default, tangents_1); tangents_token = None
getitem_2 = with_effects_1[0]
getitem_3 = with_effects_1[1]; with_effects_1 = None
return (getitem_3, tangents_1, getitem_2)""",
)
elif i == 1:
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, primals_3, primals_4):
with_effects = torch.ops.higher_order.with_effects(primals_1, torch.ops._mylib.foo.default, primals_2); primals_1 = primals_2 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops._mylib.foo.default, primals_3); getitem = primals_3 = None
getitem_2 = with_effects_1[0]
getitem_3 = with_effects_1[1]; with_effects_1 = None
add = torch.ops.aten.add.Tensor(getitem_1, primals_4); getitem_1 = None
add_1 = torch.ops.aten.add.Tensor(getitem_3, primals_4); getitem_3 = primals_4 = None
return (getitem_2, add, add_1)""",
)
self.assertExpectedInline(
bw_graph.code.strip(),
"""\
def forward(self, tangents_1, tangents_2, tangents_token):
with_effects_2 = torch.ops.higher_order.with_effects(tangents_token, torch.ops._mylib.foo.default, tangents_1); tangents_token = None
getitem_4 = with_effects_2[0]
getitem_5 = with_effects_2[1]; with_effects_2 = None
with_effects_3 = torch.ops.higher_order.with_effects(getitem_4, torch.ops._mylib.foo.default, tangents_2); getitem_4 = None
getitem_6 = with_effects_3[0]
getitem_7 = with_effects_3[1]; with_effects_3 = None
return (getitem_5, getitem_7, tangents_1, tangents_2, getitem_6)""",
)
else:
raise NotImplementedError
finally:
handle.destroy()
self.assertEqual(_get_effect(torch.ops._mylib.foo.default), None)
@skipIfNoDynamoSupport
def test_regular_effectful_op_only_in_backward(self):
handle = _register_effectful_op(torch.ops.aten.cos.default, _EffectType.ORDERED)
try:
def fn(x):
return x.sin()
def inps_fn():
return (torch.tensor([1.0, 2.0, 3.0], requires_grad=True),)
torch.compile(fn, backend="inductor", fullgraph=True)(*inps_fn())
fw_graph, bw_graph = get_fw_bw_graph(fn, inps_fn())
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1):
sin = torch.ops.aten.sin.default(primals_1)
return (sin, primals_1)""",
)
self.assertExpectedInline(
bw_graph.code.strip(),
"""\
def forward(self, primals_1, tangents_1, tangents_token):
with_effects = torch.ops.higher_order.with_effects(tangents_token, torch.ops.aten.cos.default, primals_1); tangents_token = primals_1 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
mul = torch.ops.aten.mul.Tensor(tangents_1, getitem_1); tangents_1 = getitem_1 = None
return (mul, getitem)""",
)
def inps_fn_sc():
return (
TwoTensor(
torch.tensor([1.0, 2.0, 3.0], requires_grad=True),
torch.tensor([4.0, 5.0, 6.0], requires_grad=True),
),
)
torch.compile(fn, backend="inductor", fullgraph=True)(*inps_fn_sc())
fw_graph, bw_graph = get_fw_bw_graph(fn, inps_fn_sc())
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
sin = torch.ops.aten.sin.default(primals_1)
sin_1 = torch.ops.aten.sin.default(primals_2)
return (sin, sin_1, primals_1, primals_2)""",
)
self.assertExpectedInline(
bw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2, tangents_1, tangents_2, tangents_token):
with_effects = torch.ops.higher_order.with_effects(tangents_token, torch.ops.aten.cos.default, primals_1); tangents_token = primals_1 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
with_effects_1 = torch.ops.higher_order.with_effects(getitem, torch.ops.aten.cos.default, primals_2); getitem = primals_2 = None
getitem_2 = with_effects_1[0]
getitem_3 = with_effects_1[1]; with_effects_1 = None
mul = torch.ops.aten.mul.Tensor(tangents_1, getitem_1); tangents_1 = getitem_1 = None
mul_1 = torch.ops.aten.mul.Tensor(tangents_2, getitem_3); tangents_2 = getitem_3 = None
return (mul, mul_1, getitem_2)""",
)
finally:
handle.destroy()
@skipIfNoDynamoSupport
def test_regular_effectful_op_in_forward_and_backward(self):
handle = _register_effectful_op(torch.ops.aten.cos.default, _EffectType.ORDERED)
try:
def fn(x):
x = x.cos()
return x.sin()
inps = (torch.tensor([1.0, 2.0, 3.0], requires_grad=True),)
torch.compile(fn, backend="inductor", fullgraph=True)(*inps)
fw_graph, bw_graph = get_fw_bw_graph(fn, inps)
self.assertExpectedInline(
fw_graph.code.strip(),
"""\
def forward(self, primals_1, primals_2):
with_effects = torch.ops.higher_order.with_effects(primals_1, torch.ops.aten.cos.default, primals_2); primals_1 = None
getitem = with_effects[0]
getitem_1 = with_effects[1]; with_effects = None
sin = torch.ops.aten.sin.default(getitem_1)
return (getitem, sin, primals_2, getitem_1)""",
)
self.assertExpectedInline(
bw_graph.code.strip(),
"""\
def forward(self, primals_2, getitem_1, tangents_1, tangents_token):
with_effects_1 = torch.ops.higher_order.with_effects(tangents_token, torch.ops.aten.cos.default, getitem_1); tangents_token = getitem_1 = None
getitem_2 = with_effects_1[0]
getitem_3 = with_effects_1[1]; with_effects_1 = None
mul = torch.ops.aten.mul.Tensor(tangents_1, getitem_3); tangents_1 = getitem_3 = None
sin_1 = torch.ops.aten.sin.default(primals_2); primals_2 = None
neg = torch.ops.aten.neg.default(sin_1); sin_1 = None
mul_1 = torch.ops.aten.mul.Tensor(mul, neg); mul = neg = None
return (mul_1, getitem_2)""",
)
finally:
handle.destroy()
@unittest.skipIf(not TEST_CUDA, "triton")
def test_export_invoke_subgraph(self):
with torch.library._scoped_library("mylib", "FRAGMENT") as lib:
recorded_list = []
@torch.library.custom_op("mylib::record_memory", mutates_args=())
def record_memory(prefix: str, module_name: str) -> None:
torch.cuda.synchronize()
mem_alloc = torch.cuda.memory_allocated() / 1024**2
mem_reserved = torch.cuda.memory_reserved() / 1024**2
memory_str = f"[{prefix}] {module_name}: allocated={mem_alloc:.2f} MB, reserved={mem_reserved:.2f} MB"
recorded_list.append(memory_str)
@record_memory.register_fake
def record_memory_fake(prefix, module_name):
return
record_memory.register_effect(_EffectType.ORDERED)
has_side_effect(torch.ops.mylib.record_memory.default)
class N(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = torch.nn.Linear(1024, 1024)
self.relu = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(1024, 1024)
@torch.compiler.nested_compile_region
def forward(self, x):
torch.ops.mylib.record_memory("forward", "N")
x = self.linear1(x)
x = self.relu(x)
x = self.linear2(x)
return x
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod_list = torch.nn.ModuleList(N() for _ in range(3))
def forward(self, x):
for m in self.mod_list:
x = m(x)
torch.ops.mylib.record_memory("forward", "N")
return (x,)
model = M().to("cuda")
torch.cuda.reset_peak_memory_stats()
x = torch.randn(32, 1024, requires_grad=True, device="cuda")
# Test torch.export
ep = torch.export.export(model, (x,))
decomp = ep.run_decompositions()
self.assertEqual(len(list(ep.graph_module.named_modules())), 2)
self.assertExpectedInline(
decomp.graph_module.code.strip(),
"""\
def forward(self, token, p_mod_list_0_linear1_weight, p_mod_list_0_linear1_bias, p_mod_list_0_linear2_weight, p_mod_list_0_linear2_bias, p_mod_list_1_linear1_weight, p_mod_list_1_linear1_bias, p_mod_list_1_linear2_weight, p_mod_list_1_linear2_bias, p_mod_list_2_linear1_weight, p_mod_list_2_linear1_bias, p_mod_list_2_linear2_weight, p_mod_list_2_linear2_bias, x):
repeated_subgraph0 = self.repeated_subgraph0
invoke_subgraph = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0, 'subgraph_0', token, x, p_mod_list_0_linear1_weight, p_mod_list_0_linear1_bias, p_mod_list_0_linear2_weight, p_mod_list_0_linear2_bias); repeated_subgraph0 = token = x = p_mod_list_0_linear1_weight = p_mod_list_0_linear1_bias = p_mod_list_0_linear2_weight = p_mod_list_0_linear2_bias = None
getitem = invoke_subgraph[0]
getitem_1 = invoke_subgraph[1]; invoke_subgraph = None
repeated_subgraph0_1 = self.repeated_subgraph0
invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_1, 'subgraph_0', getitem, getitem_1, p_mod_list_1_linear1_weight, p_mod_list_1_linear1_bias, p_mod_list_1_linear2_weight, p_mod_list_1_linear2_bias); repeated_subgraph0_1 = getitem = getitem_1 = p_mod_list_1_linear1_weight = p_mod_list_1_linear1_bias = p_mod_list_1_linear2_weight = p_mod_list_1_linear2_bias = None
getitem_2 = invoke_subgraph_1[0]
getitem_3 = invoke_subgraph_1[1]; invoke_subgraph_1 = None
repeated_subgraph0_2 = self.repeated_subgraph0
invoke_subgraph_2 = torch.ops.higher_order.invoke_subgraph(repeated_subgraph0_2, 'subgraph_0', getitem_2, getitem_3, p_mod_list_2_linear1_weight, p_mod_list_2_linear1_bias, p_mod_list_2_linear2_weight, p_mod_list_2_linear2_bias); repeated_subgraph0_2 = getitem_2 = getitem_3 = p_mod_list_2_linear1_weight = p_mod_list_2_linear1_bias = p_mod_list_2_linear2_weight = p_mod_list_2_linear2_bias = None
getitem_4 = invoke_subgraph_2[0]
getitem_5 = invoke_subgraph_2[1]; invoke_subgraph_2 = None
with_effects = torch.ops.higher_order.with_effects(getitem_4, torch.ops.mylib.record_memory.default, 'forward', 'N'); getitem_4 = None
getitem_6 = with_effects[0]; with_effects = None
return (getitem_6, getitem_5)""",
)
self.assertExpectedInline(
decomp.graph_module.repeated_subgraph0.code.strip(),
"""\
def forward(self, arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1):
with_effects = torch.ops.higher_order.with_effects(arg0_1, torch.ops.mylib.record_memory.default, 'forward', 'N'); arg0_1 = None
getitem = with_effects[0]; with_effects = None
permute = torch.ops.aten.permute.default(arg2_1, [1, 0]); arg2_1 = None
addmm = torch.ops.aten.addmm.default(arg3_1, arg1_1, permute); arg3_1 = arg1_1 = permute = None
relu = torch.ops.aten.relu.default(addmm); addmm = None
permute_1 = torch.ops.aten.permute.default(arg4_1, [1, 0]); arg4_1 = None
addmm_1 = torch.ops.aten.addmm.default(arg5_1, relu, permute_1); arg5_1 = relu = permute_1 = None
return (getitem, addmm_1)""",
)
recorded_list.clear()
out2 = ep.module()(x)
self.assertEqual(len(recorded_list), 4)
self.assertTrue(torch.allclose(model(x)[0], out2[0]))
# Test when we unlift the tokens from the graph. This is used in the inductor path.
with (
tracing(TracingContext(None)),
torch._functorch.config.patch(unlift_effect_tokens=True),
):
gm, gs = aot_export_module(ep.module(), (x,), trace_joint=False)
self.assertExpectedInline(
str(gm.code).strip(),
"""\
def forward(self, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, arg7_1, arg8_1, arg9_1, arg10_1, arg11_1, arg12_1, arg13_1):
_make_token_default = torch.ops.prims._make_token.default()
repeated_subgraph0 = self.repeated_subgraph0
with_effects_1 = torch.ops.higher_order.with_effects(_make_token_default, torch.ops.higher_order.invoke_subgraph, repeated_subgraph0, 'subgraph_0', arg13_1, arg1_1, arg2_1, arg3_1, arg4_1); _make_token_default = repeated_subgraph0 = arg13_1 = arg1_1 = arg2_1 = arg3_1 = arg4_1 = None
getitem = with_effects_1[0]
getitem_1 = with_effects_1[1]; with_effects_1 = None
repeated_subgraph0_1 = self.repeated_subgraph0
with_effects_2 = torch.ops.higher_order.with_effects(getitem, torch.ops.higher_order.invoke_subgraph, repeated_subgraph0_1, 'subgraph_0', getitem_1, arg5_1, arg6_1, arg7_1, arg8_1); getitem = repeated_subgraph0_1 = getitem_1 = arg5_1 = arg6_1 = arg7_1 = arg8_1 = None
getitem_2 = with_effects_2[0]
getitem_3 = with_effects_2[1]; with_effects_2 = None
repeated_subgraph0_2 = self.repeated_subgraph0
with_effects_3 = torch.ops.higher_order.with_effects(getitem_2, torch.ops.higher_order.invoke_subgraph, repeated_subgraph0_2, 'subgraph_0', getitem_3, arg9_1, arg10_1, arg11_1, arg12_1); getitem_2 = repeated_subgraph0_2 = getitem_3 = arg9_1 = arg10_1 = arg11_1 = arg12_1 = None
getitem_4 = with_effects_3[0]
getitem_5 = with_effects_3[1]; with_effects_3 = None
with_effects = torch.ops.higher_order.with_effects(getitem_4, torch.ops.mylib.record_memory.default, 'forward', 'N'); getitem_4 = None
getitem_6 = with_effects[0]; with_effects = None
_sink_tokens_default = torch.ops.prims._sink_tokens.default([getitem_6]); getitem_6 = _sink_tokens_default = None
return (getitem_5,)""", # noqa: B950
)
self.assertExpectedInline(
str(gm.repeated_subgraph0.code).strip(),
"""\
def forward(self, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1):
_make_token_default = torch.ops.prims._make_token.default()
with_effects = torch.ops.higher_order.with_effects(_make_token_default, torch.ops.mylib.record_memory.default, 'forward', 'N'); _make_token_default = None
getitem = with_effects[0]; with_effects = None
t = torch.ops.aten.t.default(arg2_1); arg2_1 = None
addmm = torch.ops.aten.addmm.default(arg3_1, arg1_1, t); arg3_1 = arg1_1 = t = None
relu = torch.ops.aten.relu.default(addmm); addmm = None
t_1 = torch.ops.aten.t.default(arg4_1); arg4_1 = None
addmm_1 = torch.ops.aten.addmm.default(arg5_1, relu, t_1); arg5_1 = relu = t_1 = None
_sink_tokens_default = torch.ops.prims._sink_tokens.default([getitem]); getitem = _sink_tokens_default = None
return (addmm_1,)""", # noqa: B950
)
if __name__ == "__main__":
run_tests()
| TestWithEffects |
python | pytorch__pytorch | test/distributed/test_c10d_nccl.py | {
"start": 179443,
"end": 179594
} | class ____(Enum):
TORCH_CUDA_SET = auto() # torch.cuda.set_device
COLLECTIVE_ARGUMENT = auto() # broadcast_object_list(device=)
| SetDeviceMethod |
python | huggingface__transformers | src/transformers/models/olmo2/modular_olmo2.py | {
"start": 14190,
"end": 14609
} | class ____(OlmoModel):
def __init__(self, config: Olmo2Config):
super().__init__(config)
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layers = nn.ModuleList(
[Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
# The heads now only need to redefine the model inside to the correct `RobertaModel`
| Olmo2Model |
python | ray-project__ray | python/ray/job_config.py | {
"start": 297,
"end": 9742
} | class ____:
"""A class used to store the configurations of a job.
Examples:
.. testcode::
:hide:
import ray
ray.shutdown()
.. testcode::
import ray
from ray.job_config import JobConfig
ray.init(job_config=JobConfig(default_actor_lifetime="non_detached"))
Args:
jvm_options: The jvm options for java workers of the job.
code_search_path: A list of directories or jar files that
specify the search path for user code. This will be used as
`CLASSPATH` in Java and `PYTHONPATH` in Python.
See :ref:`Ray cross-language programming <cross_language>` for more details.
runtime_env: A :ref:`runtime environment <runtime-environments>` dictionary.
metadata: An opaque metadata dictionary.
ray_namespace: A :ref:`namespace <namespaces-guide>`
is a logical grouping of jobs and named actors.
default_actor_lifetime: The default value of actor lifetime,
can be "detached" or "non_detached".
See :ref:`actor lifetimes <actor-lifetimes>` for more details.
"""
def __init__(
self,
jvm_options: Optional[List[str]] = None,
code_search_path: Optional[List[str]] = None,
runtime_env: Optional[dict] = None,
_client_job: bool = False,
metadata: Optional[dict] = None,
ray_namespace: Optional[str] = None,
default_actor_lifetime: str = "non_detached",
_py_driver_sys_path: Optional[List[str]] = None,
):
#: The jvm options for java workers of the job.
self.jvm_options = jvm_options or []
#: A list of directories or jar files that
#: specify the search path for user code.
self.code_search_path = code_search_path or []
# It's difficult to find the error that caused by the
# code_search_path is a string. So we assert here.
assert isinstance(self.code_search_path, (list, tuple)), (
f"The type of code search path is incorrect: " f"{type(code_search_path)}"
)
self._client_job = _client_job
#: An opaque metadata dictionary.
self.metadata = metadata or {}
#: A namespace is a logical grouping of jobs and named actors.
self.ray_namespace = ray_namespace
self.set_runtime_env(runtime_env)
self.set_default_actor_lifetime(default_actor_lifetime)
# A list of directories that specify the search path for python workers.
self._py_driver_sys_path = _py_driver_sys_path or []
# Python logging configurations that will be passed to Ray tasks/actors.
self.py_logging_config = None
def set_metadata(self, key: str, value: str) -> None:
"""Add key-value pair to the metadata dictionary.
If the key already exists, the value is overwritten to the new value.
Examples:
.. testcode::
import ray
from ray.job_config import JobConfig
job_config = JobConfig()
job_config.set_metadata("submitter", "foo")
Args:
key: The key of the metadata.
value: The value of the metadata.
"""
self.metadata[key] = value
def _serialize(self) -> str:
"""Serialize the struct into protobuf string"""
return self._get_proto_job_config().SerializeToString()
def set_runtime_env(
self,
runtime_env: Optional[Union[Dict[str, Any], "RuntimeEnv"]],
validate: bool = False,
) -> None:
"""Modify the runtime_env of the JobConfig.
We don't validate the runtime_env by default here because it may go
through some translation before actually being passed to C++ (e.g.,
working_dir translated from a local directory to a URI).
Args:
runtime_env: A :ref:`runtime environment <runtime-environments>` dictionary.
validate: Whether to validate the runtime env.
"""
self.runtime_env = runtime_env if runtime_env is not None else {}
if validate:
self.runtime_env = self._validate_runtime_env()
self._cached_pb = None
def set_py_logging_config(
self,
logging_config: Optional[LoggingConfig] = None,
):
"""Set the logging configuration for the job.
The logging configuration will be applied to the root loggers of
all Ray task and actor processes that belong to this job.
Args:
logging_config: The logging configuration to set.
"""
self.py_logging_config = logging_config
def set_ray_namespace(self, ray_namespace: str) -> None:
"""Set Ray :ref:`namespace <namespaces-guide>`.
Args:
ray_namespace: The namespace to set.
"""
if ray_namespace != self.ray_namespace:
self.ray_namespace = ray_namespace
self._cached_pb = None
def set_default_actor_lifetime(self, default_actor_lifetime: str) -> None:
"""Set the default actor lifetime, which can be "detached" or "non_detached".
See :ref:`actor lifetimes <actor-lifetimes>` for more details.
Args:
default_actor_lifetime: The default actor lifetime to set.
"""
import ray.core.generated.common_pb2 as common_pb2
if default_actor_lifetime == "detached":
self._default_actor_lifetime = common_pb2.JobConfig.ActorLifetime.DETACHED
elif default_actor_lifetime == "non_detached":
self._default_actor_lifetime = (
common_pb2.JobConfig.ActorLifetime.NON_DETACHED
)
else:
raise ValueError(
"Default actor lifetime must be one of `detached`, `non_detached`"
)
def _validate_runtime_env(self):
# TODO(edoakes): this is really unfortunate, but JobConfig is imported
# all over the place so this causes circular imports. We should remove
# this dependency and pass in a validated runtime_env instead.
from ray.runtime_env import RuntimeEnv
from ray.runtime_env.runtime_env import _validate_no_local_paths
runtime_env = self.runtime_env
if not isinstance(runtime_env, RuntimeEnv):
runtime_env = RuntimeEnv(**self.runtime_env)
_validate_no_local_paths(runtime_env)
return runtime_env
def _get_proto_job_config(self):
"""Return the protobuf structure of JobConfig."""
# TODO(edoakes): this is really unfortunate, but JobConfig is imported
# all over the place so this causes circular imports. We should remove
# this dependency and pass in a validated runtime_env instead.
import ray.core.generated.common_pb2 as common_pb2
from ray._private.utils import get_runtime_env_info
if self._cached_pb is None:
pb = common_pb2.JobConfig()
if self.ray_namespace is None:
pb.ray_namespace = str(uuid.uuid4())
else:
pb.ray_namespace = self.ray_namespace
pb.jvm_options.extend(self.jvm_options)
pb.code_search_path.extend(self.code_search_path)
pb.py_driver_sys_path.extend(self._py_driver_sys_path)
for k, v in self.metadata.items():
pb.metadata[k] = v
parsed_env = self._validate_runtime_env()
pb.runtime_env_info.CopyFrom(
get_runtime_env_info(
parsed_env,
is_job_runtime_env=True,
serialize=False,
)
)
if self._default_actor_lifetime is not None:
pb.default_actor_lifetime = self._default_actor_lifetime
if self.py_logging_config:
pb.serialized_py_logging_config = pickle.dumps(self.py_logging_config)
self._cached_pb = pb
return self._cached_pb
def _runtime_env_has_working_dir(self):
return self._validate_runtime_env().has_working_dir()
def _get_serialized_runtime_env(self) -> str:
"""Return the JSON-serialized parsed runtime env dict"""
return self._validate_runtime_env().serialize()
def _get_proto_runtime_env_config(self) -> str:
"""Return the JSON-serialized parsed runtime env info"""
return self._get_proto_job_config().runtime_env_info.runtime_env_config
@classmethod
def from_json(cls, job_config_json):
"""Generates a JobConfig object from json.
Examples:
.. testcode::
from ray.job_config import JobConfig
job_config = JobConfig.from_json(
{"runtime_env": {"working_dir": "uri://abc"}})
Args:
job_config_json: The job config json dictionary.
"""
return cls(
jvm_options=job_config_json.get("jvm_options", None),
code_search_path=job_config_json.get("code_search_path", None),
runtime_env=job_config_json.get("runtime_env", None),
metadata=job_config_json.get("metadata", None),
ray_namespace=job_config_json.get("ray_namespace", None),
_client_job=job_config_json.get("client_job", False),
_py_driver_sys_path=job_config_json.get("py_driver_sys_path", None),
)
| JobConfig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.