language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/base.py | {
"start": 19980,
"end": 20313
} | class ____(Generic[_T_co], TypingOnly):
"""common class for Mapped and similar ORM container classes.
these are classes that can appear on the left side of an ORM declarative
mapping, containing a mapped class or in some cases a collection
surrounding a mapped class.
"""
__slots__ = ()
| _MappedAnnotationBase |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 22936,
"end": 23046
} | class ____(ApeException):
"""
An error to use when installing a plugin fails.
"""
| PluginInstallError |
python | tiangolo__fastapi | tests/test_forms_single_model.py | {
"start": 240,
"end": 3576
} | class ____(BaseModel):
username: str
lastname: str
age: Optional[int] = None
tags: List[str] = ["foo", "bar"]
alias_with: str = Field(alias="with", default="nothing")
@app.post("/form/")
def post_form(user: Annotated[FormModel, Form()]):
return user
client = TestClient(app)
def test_send_all_data():
response = client.post(
"/form/",
data={
"username": "Rick",
"lastname": "Sanchez",
"age": "70",
"tags": ["plumbus", "citadel"],
"with": "something",
},
)
assert response.status_code == 200, response.text
assert response.json() == {
"username": "Rick",
"lastname": "Sanchez",
"age": 70,
"tags": ["plumbus", "citadel"],
"with": "something",
}
def test_defaults():
response = client.post("/form/", data={"username": "Rick", "lastname": "Sanchez"})
assert response.status_code == 200, response.text
assert response.json() == {
"username": "Rick",
"lastname": "Sanchez",
"age": None,
"tags": ["foo", "bar"],
"with": "nothing",
}
def test_invalid_data():
response = client.post(
"/form/",
data={
"username": "Rick",
"lastname": "Sanchez",
"age": "seventy",
"tags": ["plumbus", "citadel"],
},
)
assert response.status_code == 422, response.text
assert response.json() == IsDict(
{
"detail": [
{
"type": "int_parsing",
"loc": ["body", "age"],
"msg": "Input should be a valid integer, unable to parse string as an integer",
"input": "seventy",
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "age"],
"msg": "value is not a valid integer",
"type": "type_error.integer",
}
]
}
)
def test_no_data():
response = client.post("/form/")
assert response.status_code == 422, response.text
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", "username"],
"msg": "Field required",
"input": {"tags": ["foo", "bar"], "with": "nothing"},
},
{
"type": "missing",
"loc": ["body", "lastname"],
"msg": "Field required",
"input": {"tags": ["foo", "bar"], "with": "nothing"},
},
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "username"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", "lastname"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
)
| FormModel |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py | {
"start": 339,
"end": 457
} | class ____:
def __str__(self):
return return_int()
# These testcases should NOT raise errors
| ComplexReturn |
python | pytest-dev__pytest | src/_pytest/_code/code.py | {
"start": 46703,
"end": 46978
} | class ____(ExceptionRepr):
reprtraceback: ReprTraceback
reprcrash: ReprFileLocation | None
def toterminal(self, tw: TerminalWriter) -> None:
self.reprtraceback.toterminal(tw)
super().toterminal(tw)
@dataclasses.dataclass(eq=False)
| ReprExceptionInfo |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/metadata.py | {
"start": 9087,
"end": 21689
} | class ____(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
if self.metadata_version in ('1.0', '1.1'):
return _LINE_PREFIX_PRE_1_2.sub('\n', value)
else:
return _LINE_PREFIX_1_2.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
def get_fullname(self, filesafe=False):
"""
Return the distribution name with version.
If filesafe is true, return a filename-escaped form.
"""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
# PEP 566 specifies that the body be used for the description, if
# available
body = msg.get_payload()
self["Description"] = body if body else self["Description"]
# logger.debug('Attempting to set metadata for %s', self)
# self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
if self.metadata_version in ('1.0', '1.1'):
values = values.replace('\n', '\n ')
else:
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning("'%s': '%s' is not valid (field '%s')", project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append("Wrong value for '%s': %s" % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
This is as per https://www.python.org/dev/peps/pep-0566/#id17.
"""
self.set_metadata_version()
fields = _version2fieldlist(self['Metadata-Version'])
data = {}
for field_name in fields:
if not skip_missing or field_name in self._fields:
key = _FIELD2ATTR[field_name]
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version)
METADATA_FILENAME = 'pydist.json'
WHEEL_METADATA_FILENAME = 'metadata.json'
LEGACY_METADATA_FILENAME = 'METADATA'
| LegacyMetadata |
python | scikit-learn__scikit-learn | sklearn/ensemble/_voting.py | {
"start": 6142,
"end": 18275
} | class ____(ClassifierMixin, _BaseVoting):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
.. versionadded:: 0.17
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
:meth:`set_params`.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
flatten_transform : bool, default=True
Affects shape of transform output only when voting='soft'
If voting='soft' and flatten_transform=True, transform method returns
matrix with shape (n_samples, n_classifiers * n_classes). If
flatten_transform=False, it returns
(n_classifiers, n_samples, n_classes).
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
le_ : :class:`~sklearn.preprocessing.LabelEncoder`
Transformer used to encode the labels during fit and decode during
prediction.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying classifier exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingRegressor : Prediction voting regressor.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> np.array_equal(eclf1.named_estimators_.lr.predict(X),
... eclf1.named_estimators_['lr'].predict(X))
True
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
To drop an estimator, :meth:`set_params` can be used to remove it. Here we
dropped one of the estimators, resulting in 2 fitted estimators:
>>> eclf2 = eclf2.set_params(lr='drop')
>>> eclf2 = eclf2.fit(X, y)
>>> len(eclf2.estimators_)
2
Setting `flatten_transform=True` with `voting='soft'` flattens output shape of
`transform`:
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1],
... flatten_transform=True)
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>> print(eclf3.transform(X).shape)
(6, 6)
"""
_parameter_constraints: dict = {
**_BaseVoting._parameter_constraints,
"voting": [StrOptions({"hard", "soft"})],
"flatten_transform": ["boolean"],
}
def __init__(
self,
estimators,
*,
voting="hard",
weights=None,
n_jobs=None,
flatten_transform=True,
verbose=False,
):
super().__init__(estimators=estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
self.flatten_transform = flatten_transform
self.verbose = verbose
@_fit_context(
# estimators in VotingClassifier.estimators are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
y_type = type_of_target(y, input_name="y")
if y_type in ("unknown", "continuous"):
# raise a specific ValueError for non-classification tasks
raise ValueError(
f"Unknown label type: {y_type}. Maybe you are trying to fit a "
"classifier, which expects discrete classes on a "
"regression target with continuous values."
)
elif y_type not in ("binary", "multiclass"):
# raise a NotImplementedError for backward compatibility for non-supported
# classification tasks
raise NotImplementedError(
f"{self.__class__.__name__} only supports binary or multiclass "
"classification. Multilabel and multi-output classification are not "
"supported."
)
self.le_ = LabelEncoder().fit(y)
self.classes_ = self.le_.classes_
transformed_y = self.le_.transform(y)
return super().fit(X, transformed_y, **fit_params)
def predict(self, X):
"""Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
maj : array-like of shape (n_samples,)
Predicted class labels.
"""
check_is_fitted(self)
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _check_voting(self):
if self.voting == "hard":
raise AttributeError(
f"predict_proba is not available when voting={self.voting!r}"
)
return True
@available_if(_check_voting)
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
avg : array-like of shape (n_samples, n_classes)
Weighted average probability for each class per sample.
"""
check_is_fitted(self)
avg = np.average(
self._collect_probas(X), axis=0, weights=self._weights_not_none
)
return avg
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities_or_labels
If `voting='soft'` and `flatten_transform=True`:
returns ndarray of shape (n_samples, n_classifiers * n_classes),
being class probabilities calculated by each classifier.
If `voting='soft' and `flatten_transform=False`:
ndarray of shape (n_classifiers, n_samples, n_classes)
If `voting='hard'`:
ndarray of shape (n_samples, n_classifiers), being
class labels predicted by each classifier.
"""
check_is_fitted(self)
if self.voting == "soft":
probas = self._collect_probas(X)
if not self.flatten_transform:
return probas
return np.hstack(probas)
else:
return self._predict(X)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
if self.voting == "soft" and not self.flatten_transform:
raise ValueError(
"get_feature_names_out is not supported when `voting='soft'` and "
"`flatten_transform=False`"
)
_check_feature_names_in(self, input_features, generate_names=False)
class_name = self.__class__.__name__.lower()
active_names = [name for name, est in self.estimators if est != "drop"]
if self.voting == "hard":
return np.asarray(
[f"{class_name}_{name}" for name in active_names], dtype=object
)
# voting == "soft"
n_classes = len(self.classes_)
names_out = [
f"{class_name}_{name}{i}" for name in active_names for i in range(n_classes)
]
return np.asarray(names_out, dtype=object)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = []
return tags
| VotingClassifier |
python | pytorch__pytorch | test/inductor/test_loop_ordering.py | {
"start": 1692,
"end": 2228
} | class ____(TestCase):
_exit_stack = None
@classmethod
def setUpClass(cls):
super().setUpClass()
gm = torch.fx.symbolic_trace(lambda: 0)
graph = GraphLowering(gm)
graph.scheduler = MockScheduler
cls._exit_stack = contextlib.ExitStack()
cls._exit_stack.enter_context(V.set_graph_handler(graph))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._exit_stack.close()
@inductor_config.patch(loop_ordering_after_fusion=True)
| MockSchedulerTest |
python | tensorflow__tensorflow | tensorflow/python/training/experimental/loss_scale_test.py | {
"start": 2108,
"end": 3765
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
loss_scale_value = 1000
loss_scale = loss_scale_module.FixedLossScale(loss_scale_value)
update_op, should_apply = loss_scale.update([constant_op.constant(0.)])
self.evaluate(update_op)
# should_apply should be a bool instead of a tensor, so that a tf.cond does
# not have to be built in the graph by the caller.
self.assertIsInstance(should_apply, bool)
self.assertTrue(should_apply)
self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))
update_op, should_apply = loss_scale.update(
[constant_op.constant(float('NaN'))])
self.evaluate(update_op)
self.assertIsInstance(should_apply, bool)
self.assertTrue(should_apply)
self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))
@test_util.run_in_graph_and_eager_modes
def test_serialization(self):
loss_scale = loss_scale_module.get(123)
config = loss_scale.get_config()
loss_scale = loss_scale_module.FixedLossScale.from_config(config)
self.assertEqual(self.evaluate(loss_scale()), 123.)
@test_util.run_in_graph_and_eager_modes
def test_call_type(self):
scalar = loss_scale_module.FixedLossScale(123)
self.assertIsInstance(scalar(), tensor_lib.Tensor)
@test_util.run_in_graph_and_eager_modes
def test_repr(self):
loss_scale = loss_scale_module.FixedLossScale(123)
self.assertEqual(repr(loss_scale), 'FixedLossScale(123.0)')
def _get_example_iter(inputs):
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
return dataset_ops.make_one_shot_iterator(dataset)
| FixedLossScaleTest |
python | huggingface__transformers | src/transformers/pipelines/mask_generation.py | {
"start": 1064,
"end": 15328
} | class ____(ChunkPipeline):
"""
Automatic mask generation for images using `SamForMaskGeneration`. This pipeline predicts binary masks for an
image, given an image. It is a `ChunkPipeline` because you can separate the points in a mini-batch in order to
avoid OOM issues. Use the `points_per_batch` argument to control the number of points that will be processed at the
same time. Default is `64`.
The pipeline works in 3 steps:
1. `preprocess`: A grid of 1024 points evenly separated is generated along with bounding boxes and point
labels.
For more details on how the points and bounding boxes are created, check the `_generate_crop_boxes`
function. The image is also preprocessed using the `image_processor`. This function `yields` a minibatch of
`points_per_batch`.
2. `forward`: feeds the outputs of `preprocess` to the model. The image embedding is computed only once.
Calls both `self.model.get_image_embeddings` and makes sure that the gradients are not computed, and the
tensors and models are on the same device.
3. `postprocess`: The most important part of the automatic mask generation happens here. Three steps
are induced:
- image_processor.postprocess_masks (run on each minibatch loop): takes in the raw output masks,
resizes them according
to the image size, and transforms there to binary masks.
- image_processor.filter_masks (on each minibatch loop): uses both `pred_iou_thresh` and
`stability_scores`. Also
applies a variety of filters based on non maximum suppression to remove bad masks.
- image_processor.postprocess_masks_for_amg applies the NSM on the mask to only keep relevant ones.
Example:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="facebook/sam-vit-base", task="mask-generation")
>>> outputs = generator(
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... )
>>> outputs = generator(
... "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", points_per_batch=128
... )
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"mask-generation"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=mask-generation).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
requires_backends(self, "vision")
requires_backends(self, "torch")
self.check_model_type(MODEL_FOR_MASK_GENERATION_MAPPING_NAMES)
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
postprocess_kwargs = {}
forward_params = {}
# preprocess args
if "points_per_batch" in kwargs:
preprocess_kwargs["points_per_batch"] = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
preprocess_kwargs["points_per_crop"] = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
preprocess_kwargs["crops_n_layers"] = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
preprocess_kwargs["crop_overlap_ratio"] = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
preprocess_kwargs["crop_n_points_downscale_factor"] = kwargs["crop_n_points_downscale_factor"]
if "timeout" in kwargs:
preprocess_kwargs["timeout"] = kwargs["timeout"]
# postprocess args
if "pred_iou_thresh" in kwargs:
forward_params["pred_iou_thresh"] = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
forward_params["stability_score_offset"] = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
forward_params["mask_threshold"] = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
forward_params["stability_score_thresh"] = kwargs["stability_score_thresh"]
if "max_hole_area" in kwargs:
forward_params["max_hole_area"] = kwargs["max_hole_area"]
if "max_sprinkle_area" in kwargs:
forward_params["max_sprinkle_area"] = kwargs["max_sprinkle_area"]
if "crops_nms_thresh" in kwargs:
postprocess_kwargs["crops_nms_thresh"] = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
postprocess_kwargs["output_rle_mask"] = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
postprocess_kwargs["output_bboxes_mask"] = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
@overload
def __call__(self, image: Union[str, "Image.Image"], *args: Any, **kwargs: Any) -> dict[str, Any]: ...
@overload
def __call__(self, image: list[str] | list["Image.Image"], *args: Any, **kwargs: Any) -> list[dict[str, Any]]: ...
def __call__(
self, image: Union[str, "Image.Image", list[str], list["Image.Image"]], *args: Any, **kwargs: Any
) -> dict[str, Any] | list[dict[str, Any]]:
"""
Generates binary segmentation masks
Args:
image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
Image or list of images.
mask_threshold (`float`, *optional*, defaults to 0.0):
Threshold to use when turning the predicted masks into binary values.
pred_iou_thresh (`float`, *optional*, defaults to 0.88):
A filtering threshold in `[0,1]` applied on the model's predicted mask quality.
stability_score_thresh (`float`, *optional*, defaults to 0.95):
A filtering threshold in `[0,1]`, using the stability of the mask under changes to the cutoff used to
binarize the model's mask predictions.
stability_score_offset (`int`, *optional*, defaults to 1):
The amount to shift the cutoff when calculated the stability score.
crops_nms_thresh (`float`, *optional*, defaults to 0.7):
The box IoU cutoff used by non-maximal suppression to filter duplicate masks.
crops_n_layers (`int`, *optional*, defaults to 0):
If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of
layers to run, where each layer has 2**i_layer number of image crops.
crop_overlap_ratio (`float`, *optional*, defaults to `512 / 1500`):
Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (`int`, *optional*, defaults to `1`):
The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
`Dict`: A dictionary with the following keys:
- **mask** (`PIL.Image`) -- A binary mask of the detected object as a PIL Image of shape `(width,
height)` of the original image. Returns a mask filled with zeros if no object is found.
- **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of
the "object" described by the label and the mask.
"""
num_workers = kwargs.pop("num_workers", None)
batch_size = kwargs.pop("batch_size", None)
return super().__call__(image, *args, num_workers=num_workers, batch_size=batch_size, **kwargs)
def preprocess(
self,
image,
points_per_batch=64,
crops_n_layers: int = 0,
crop_overlap_ratio: float = 512 / 1500,
points_per_crop: int = 32,
crop_n_points_downscale_factor: int = 1,
timeout: float | None = None,
):
image = load_image(image, timeout=timeout)
target_size = self.image_processor.size.get("longest_edge", self.image_processor.size.get("height"))
crop_boxes, grid_points, cropped_images, input_labels = self.image_processor.generate_crop_boxes(
image, target_size, crops_n_layers, crop_overlap_ratio, points_per_crop, crop_n_points_downscale_factor
)
model_inputs = self.image_processor(images=cropped_images, return_tensors="pt")
model_inputs = model_inputs.to(self.dtype)
with self.device_placement():
inference_context = self.get_inference_context()
with inference_context():
model_inputs = self._ensure_tensor_on_device(model_inputs, device=self.device)
embeddings = self.model.get_image_embeddings(model_inputs.pop("pixel_values"))
# Handle both SAM (single tensor) and SAM-HQ (tuple) outputs
if isinstance(embeddings, tuple):
image_embeddings, intermediate_embeddings = embeddings
model_inputs["intermediate_embeddings"] = intermediate_embeddings
else:
image_embeddings = embeddings
# TODO: Identifying the model by the type of its returned embeddings is brittle.
# Consider using a more robust method for distinguishing model types here.
model_inputs["image_embeddings"] = image_embeddings
n_points = grid_points.shape[1]
points_per_batch = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None"
)
for i in range(0, n_points, points_per_batch):
batched_points = grid_points[:, i : i + points_per_batch, :, :]
labels = input_labels[:, i : i + points_per_batch]
is_last = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _forward(
self,
model_inputs,
pred_iou_thresh=0.88,
stability_score_thresh=0.95,
mask_threshold=0,
stability_score_offset=1,
max_hole_area=None,
max_sprinkle_area=None,
):
input_boxes = model_inputs.pop("input_boxes")
is_last = model_inputs.pop("is_last")
original_sizes = model_inputs.pop("original_sizes").tolist()
reshaped_input_sizes = model_inputs.pop("reshaped_input_sizes", None)
reshaped_input_sizes = reshaped_input_sizes.tolist() if reshaped_input_sizes is not None else None
model_outputs = self.model(**model_inputs)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
low_resolution_masks = model_outputs["pred_masks"]
postprocess_kwargs = {}
if max_hole_area is not None:
postprocess_kwargs["max_hole_area"] = max_hole_area
if max_sprinkle_area is not None and max_sprinkle_area > 0:
postprocess_kwargs["max_sprinkle_area"] = max_sprinkle_area
if postprocess_kwargs:
low_resolution_masks = self.image_processor.post_process_masks(
low_resolution_masks,
original_sizes,
mask_threshold=mask_threshold,
reshaped_input_sizes=reshaped_input_sizes,
binarize=False,
**postprocess_kwargs,
)
masks = self.image_processor.post_process_masks(
low_resolution_masks,
original_sizes,
mask_threshold=mask_threshold,
reshaped_input_sizes=reshaped_input_sizes,
binarize=False,
)
iou_scores = model_outputs["iou_scores"]
masks, iou_scores, boxes = self.image_processor.filter_masks(
masks[0],
iou_scores[0],
original_sizes[0],
input_boxes[0],
pred_iou_thresh,
stability_score_thresh,
mask_threshold,
stability_score_offset,
)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def postprocess(
self,
model_outputs,
output_rle_mask=False,
output_bboxes_mask=False,
crops_nms_thresh=0.7,
):
all_scores = []
all_masks = []
all_boxes = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores"))
all_masks.extend(model_output.pop("masks"))
all_boxes.append(model_output.pop("boxes"))
all_scores = torch.cat(all_scores)
all_boxes = torch.cat(all_boxes)
output_masks, iou_scores, rle_mask, bounding_boxes = self.image_processor.post_process_for_mask_generation(
all_masks, all_scores, all_boxes, crops_nms_thresh
)
extra = defaultdict(list)
for output in model_outputs:
for k, v in output.items():
extra[k].append(v)
optional = {}
if output_rle_mask:
optional["rle_mask"] = rle_mask
if output_bboxes_mask:
optional["bounding_boxes"] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| MaskGenerationPipeline |
python | ray-project__ray | python/ray/tune/logger/aim.py | {
"start": 623,
"end": 6820
} | class ____(LoggerCallback):
"""Aim Logger: logs metrics in Aim format.
Aim is an open-source, self-hosted ML experiment tracking tool.
It's good at tracking lots (thousands) of training runs, and it allows you to
compare them with a performant and well-designed UI.
Source: https://github.com/aimhubio/aim
Args:
repo: Aim repository directory or a `Repo` object that the Run object will
log results to. If not provided, a default repo will be set up in the
experiment directory (one level above trial directories).
experiment: Sets the `experiment` property of each Run object, which is the
experiment name associated with it. Can be used later to query
runs/sequences.
If not provided, the default will be the Tune experiment name set
by `RunConfig(name=...)`.
metrics: List of metric names (out of the metrics reported by Tune) to
track in Aim. If no metric are specified, log everything that
is reported.
aim_run_kwargs: Additional arguments that will be passed when creating the
individual `Run` objects for each trial. For the full list of arguments,
please see the Aim documentation:
https://aimstack.readthedocs.io/en/latest/refs/sdk.html
"""
VALID_HPARAMS = (str, bool, int, float, list, type(None))
VALID_NP_HPARAMS = (np.bool_, np.float32, np.float64, np.int32, np.int64)
def __init__(
self,
repo: Optional[Union[str, "Repo"]] = None,
experiment_name: Optional[str] = None,
metrics: Optional[List[str]] = None,
**aim_run_kwargs,
):
"""
See help(AimLoggerCallback) for more information about parameters.
"""
assert Run is not None, (
"aim must be installed!. You can install aim with"
" the command: `pip install aim`."
)
self._repo_path = repo
self._experiment_name = experiment_name
if not (bool(metrics) or metrics is None):
raise ValueError(
"`metrics` must either contain at least one metric name, or be None, "
"in which case all reported metrics will be logged to the aim repo."
)
self._metrics = metrics
self._aim_run_kwargs = aim_run_kwargs
self._trial_to_run: Dict["Trial", Run] = {}
def _create_run(self, trial: "Trial") -> Run:
"""Initializes an Aim Run object for a given trial.
Args:
trial: The Tune trial that aim will track as a Run.
Returns:
Run: The created aim run for a specific trial.
"""
experiment_dir = trial.local_experiment_path
run = Run(
repo=self._repo_path or experiment_dir,
experiment=self._experiment_name or trial.experiment_dir_name,
**self._aim_run_kwargs,
)
# Attach a few useful trial properties
run["trial_id"] = trial.trial_id
run["trial_log_dir"] = trial.path
trial_ip = trial.get_ray_actor_ip()
if trial_ip:
run["trial_ip"] = trial_ip
return run
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_to_run:
# Cleanup an existing run if the trial has been restarted
self._trial_to_run[trial].close()
trial.init_local_path()
self._trial_to_run[trial] = self._create_run(trial)
if trial.evaluated_params:
self._log_trial_hparams(trial)
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
tmp_result = result.copy()
step = result.get(TIMESTEPS_TOTAL, None) or result[TRAINING_ITERATION]
for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
tmp_result.pop(k, None) # not useful to log these
# `context` and `epoch` are special keys that users can report,
# which are treated as special aim metrics/configurations.
context = tmp_result.pop("context", None)
epoch = tmp_result.pop("epoch", None)
trial_run = self._trial_to_run[trial]
path = ["ray", "tune"]
flat_result = flatten_dict(tmp_result, delimiter="/")
valid_result = {}
for attr, value in flat_result.items():
if self._metrics and attr not in self._metrics:
continue
full_attr = "/".join(path + [attr])
if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not (
np.isnan(value) or np.isinf(value)
):
valid_result[attr] = value
trial_run.track(
value=value,
name=full_attr,
epoch=epoch,
step=step,
context=context,
)
elif (isinstance(value, (list, tuple, set)) and len(value) > 0) or (
isinstance(value, np.ndarray) and value.size > 0
):
valid_result[attr] = value
def log_trial_end(self, trial: "Trial", failed: bool = False):
trial_run = self._trial_to_run.pop(trial)
trial_run.close()
def _log_trial_hparams(self, trial: "Trial"):
params = flatten_dict(trial.evaluated_params, delimiter="/")
flat_params = flatten_dict(params)
scrubbed_params = {
k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
}
np_params = {
k: v.tolist()
for k, v in flat_params.items()
if isinstance(v, self.VALID_NP_HPARAMS)
}
scrubbed_params.update(np_params)
removed = {
k: v
for k, v in flat_params.items()
if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
}
if removed:
logger.info(
"Removed the following hyperparameter values when "
"logging to aim: %s",
str(removed),
)
run = self._trial_to_run[trial]
run["hparams"] = scrubbed_params
| AimLoggerCallback |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/skip_test.py | {
"start": 7190,
"end": 8676
} | class ____(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
dataset_range=[10],
count=[0, 2],
repetitions=[1, 2],
reshuffle_each_iteration=[True, False],
symbolic_checkpoint=[True, False])))
def testSkip(
self,
verify_fn: Callable[..., None],
dataset_range: int,
count: int,
repetitions: int,
reshuffle_each_iteration: bool,
symbolic_checkpoint: bool):
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.skip(count)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=(dataset_range - count) * repetitions,
assert_items_equal=reshuffle_each_iteration,
)
if __name__ == "__main__":
test.main()
| SkipGlobalShuffleCheckpointTest |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 36094,
"end": 36707
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBasic(self):
gamma = resource_variable_ops.ResourceVariable(
np.random.random((3,)),
dtype="float32", name="gamma")
inputs = array_ops.ones(shape=(3,), dtype="float32")
def TestFn():
output = inputs + gamma
return output
training = array_ops.placeholder_with_default(True, shape=())
output = cond.cond(
training, TestFn, lambda: inputs)
loss = output
grads = gradients.gradients(
loss, [gamma])
self.assertNotIn(None, grads)
| ResourceCondTest |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/_textfont.py | {
"start": 233,
"end": 11059
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.textfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"size",
"sizesrc",
"style",
"stylesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'bold']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Textfont object
Sets the text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | realpython__materials | python-script-structure/iris_summary.py | {
"start": 632,
"end": 675
} | class ____(IntEnum):
IRIS = 53
| UCIDataset |
python | doocs__leetcode | solution/0100-0199/0139.Word Break/Solution2.py | {
"start": 374,
"end": 970
} | class ____:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
trie = Trie()
for w in wordDict:
trie.insert(w)
n = len(s)
f = [False] * (n + 1)
f[n] = True
for i in range(n - 1, -1, -1):
node = trie
for j in range(i, n):
idx = ord(s[j]) - ord('a')
if not node.children[idx]:
break
node = node.children[idx]
if node.isEnd and f[j + 1]:
f[i] = True
break
return f[0]
| Solution |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 211840,
"end": 216303
} | class ____(TestCase):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt, err_msg=f"spec {spec!r} != dtype {wanted!r}")
def test_native_padding(self):
align = np.dtype("i").alignment
for j in range(8):
if j == 0:
s = "bi"
else:
s = f"b{j:d}xi"
self._check(
"@" + s, {"f0": ("i1", 0), "f1": ("i", align * (1 + j // align))}
)
self._check("=" + s, {"f0": ("i1", 0), "f1": ("i", 1 + j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check("x3T{xi}", {"f0": (({"f0": ("i", 4)}, (3,)), 4)})
self._check("^x3T{xi}", {"f0": (({"f0": ("i", 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype("i").alignment
size = np.dtype("i").itemsize
def aligned(n):
return align * (1 + (n - 1) // align)
base = dict(formats=["i"], names=["f0"])
self._check("ix", dict(itemsize=aligned(size + 1), **base))
self._check("ixx", dict(itemsize=aligned(size + 2), **base))
self._check("ixxx", dict(itemsize=aligned(size + 3), **base))
self._check("ixxxx", dict(itemsize=aligned(size + 4), **base))
self._check("i7x", dict(itemsize=aligned(size + 7), **base))
self._check("^ix", dict(itemsize=size + 1, **base))
self._check("^ixx", dict(itemsize=size + 2, **base))
self._check("^ixxx", dict(itemsize=size + 3, **base))
self._check("^ixxxx", dict(itemsize=size + 4, **base))
self._check("^i7x", dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[("a", "b"), ("b", "i"), ("sub", np.dtype("b,i")), ("c", "i")], align=True
)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[
("a", "b"),
("b", "i"),
("c", "b"),
("d", "b"),
("e", "b"),
("sub", np.dtype("b,i", align=True)),
]
)
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[("a", "b"), ("b", "i"), ("c", "b", (3,)), ("d", "i")], align=True
)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check("@T{^i}xi", {"f0": ({"f0": ("i", 0)}, 0), "f1": ("i", 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype("i").alignment
size = np.dtype("i").itemsize
def aligned(n):
return align * (1 + (n - 1) // align)
self._check(
"(3)T{ix}",
(
dict(
names=["f0"], formats=["i"], offsets=[0], itemsize=aligned(size + 1)
),
(3,),
),
)
def test_char_vs_string(self):
dt = np.dtype("c")
self._check("c", dt)
dt = np.dtype([("f0", "S1", (4,)), ("f1", "S4")])
self._check("4c4s", dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [("a", "I", (0,)), ("b", "f")])
self._check("(0)I:b:f:a:", [("b", "I", (0,)), ("a", "f")])
def test_unnamed_fields(self):
self._check("ii", [("f0", "i"), ("f1", "i")])
self._check("ii:f0:", [("f1", "i"), ("f0", "i")])
self._check("i", "i")
self._check("i:f0:", [("f0", "i")])
# NOTE: xpassIfTorchDynamo_np below
# 1. TODO: torch._numpy does not handle/model _CopyMode
# 2. order= keyword not supported (probably won't be)
# 3. Under TEST_WITH_TORCHDYNAMO many of these make it through due
# to a graph break leaving the _CopyMode to only be handled by numpy.
@skipif(numpy.__version__ < "1.23", reason="CopyMode is new in NumPy 1.22")
@xpassIfTorchDynamo_np
@instantiate_parametrized_tests
| TestPEP3118Dtype |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 19833,
"end": 24005
} | class ____(BaseDataset):
"""
Feature: Datasets created with specified fill time property
"""
def test_fill_time_default(self):
""" Fill time default to IFSET """
dset = self.f.create_dataset(make_name(), (10,), fillvalue=4.0)
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_IFSET)
self.assertEqual(dset[0], 4.0)
self.assertEqual(dset[7], 4.0)
@ut.skipIf('gzip' not in h5py.filters.encode, "DEFLATE is not installed")
def test_compressed_default(self):
""" Fill time is IFSET for compressed dataset (chunked) """
dset = self.f.create_dataset(make_name(), (10,), compression='gzip',
fillvalue=4.0)
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_IFSET)
self.assertEqual(dset[0], 4.0)
self.assertEqual(dset[7], 4.0)
def test_fill_time_never(self):
""" Fill time set to NEVER """
dset = self.f.create_dataset(make_name(), (10,), fillvalue=4.0,
fill_time='never')
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_NEVER)
# should not be equal to the explicitly set fillvalue
self.assertNotEqual(dset[0], 4.0)
self.assertNotEqual(dset[7], 4.0)
def test_fill_time_alloc(self):
""" Fill time explicitly set to ALLOC """
dset = self.f.create_dataset(make_name(), (10,), fillvalue=4.0,
fill_time='alloc')
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_ALLOC)
def test_fill_time_ifset(self):
""" Fill time explicitly set to IFSET """
dset = self.f.create_dataset(make_name(), (10,), chunks=(2,), fillvalue=4.0,
fill_time='ifset')
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_IFSET)
def test_invalid_fill_time(self):
""" Choice of fill_time is 'alloc', 'never', 'ifset' """
with self.assertRaises(ValueError):
dset = self.f.create_dataset(make_name(), (10,), fill_time='fill_bad')
def test_non_str_fill_time(self):
""" fill_time must be a string """
with self.assertRaises(ValueError):
dset = self.f.create_dataset(make_name(), (10,), fill_time=2)
def test_resize_chunk_fill_time_default(self):
""" The resize dataset will be filled (by default fill value 0) """
dset = self.f.create_dataset(make_name(), (50, ), maxshape=(100, ),
chunks=(5, ))
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_IFSET)
assert np.isclose(dset[:], 0.0).all()
dset.resize((100, ))
assert np.isclose(dset[:], 0.0).all()
def test_resize_chunk_fill_time_never(self):
""" The resize dataset won't be filled """
dset = self.f.create_dataset(make_name(), (50, ), maxshape=(100, ),
fillvalue=4.0, fill_time='never',
chunks=(5, ))
plist = dset.id.get_create_plist()
self.assertEqual(plist.get_fill_time(), h5py.h5d.FILL_TIME_NEVER)
assert not np.isclose(dset[:], 4.0).any()
dset.resize((100, ))
assert not np.isclose(dset[:], 4.0).any()
@pytest.mark.parametrize('dt,expected', [
(int, 0),
(np.int32, 0),
(np.int64, 0),
(float, 0.0),
(np.float32, 0.0),
(np.float64, 0.0),
(h5py.string_dtype(encoding='utf-8', length=5), b''),
(h5py.string_dtype(encoding='ascii', length=5), b''),
(h5py.string_dtype(encoding='utf-8'), b''),
(h5py.string_dtype(encoding='ascii'), b''),
(h5py.string_dtype(), b''),
])
def test_get_unset_fill_value(dt, expected, writable_file):
dset = writable_file.create_dataset(make_name(), (10,), dtype=dt)
assert dset.fillvalue == expected
| TestFillTime |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 135117,
"end": 137991
} | class ____:
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
self.y = np.array([-1, -2, -3], dtype=dt)
self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt)
self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt)
self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt)
self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt)
self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt)
self.zs = np.array([-3., -14., -30., -48., -66., -84.,
-102., -54., -19.], dtype=dt)
def test_float(self):
self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
assert_array_almost_equal(z, self.z1_4)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
z = np.correlate(self.x[::-1], self.y, 'full')
assert_array_almost_equal(z, self.z1r)
z = np.correlate(self.y, self.x[::-1], 'full')
assert_array_almost_equal(z, self.z2r)
z = np.correlate(self.xs, self.y, 'full')
assert_array_almost_equal(z, self.zs)
def test_object(self):
self._setup(Decimal)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.y, self.x, 'full')
assert_array_almost_equal(z, self.z2)
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.correlate(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_complex(self):
x = np.array([1, 2, 3, 4 + 1j], dtype=complex)
y = np.array([-1, -2j, 3 + 1j], dtype=complex)
r_z = np.array([3 - 1j, 6, 8 + 1j, 11 + 5j, -5 + 8j, -4 - 1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
def test_zero_size(self):
with pytest.raises(ValueError):
np.correlate(np.array([]), np.ones(1000), mode='full')
with pytest.raises(ValueError):
np.correlate(np.ones(1000), np.array([]), mode='full')
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.correlate(d, k, mode='valid')
with assert_raises(ValueError):
np.correlate(d, k, mode='v')
# integer mode
with assert_raises(ValueError):
np.correlate(d, k, mode=-1)
# assert_array_equal(np.correlate(d, k, mode=), default_mode)
# illegal arguments
with assert_raises(TypeError):
np.correlate(d, k, mode=None)
| TestCorrelate |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/spanner.py | {
"start": 8528,
"end": 13413
} | class ____(GoogleCloudBaseOperator):
"""
Executes an arbitrary DML query (INSERT, UPDATE, DELETE).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SpannerQueryDatabaseInstanceOperator`
:param instance_id: The Cloud Spanner instance ID.
:param database_id: The Cloud Spanner database ID.
:param query: The query or list of queries to be executed. Can be a path to a SQL
file.
:param project_id: Optional, the ID of the project that owns the Cloud Spanner
Database. If set to None or missing, the default project_id from the Google Cloud connection is used.
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcp_spanner_query_template_fields]
template_fields: Sequence[str] = (
"project_id",
"instance_id",
"database_id",
"query",
"gcp_conn_id",
"impersonation_chain",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"query": "sql"}
# [END gcp_spanner_query_template_fields]
operator_extra_links = (SpannerDatabaseLink(),)
def __init__(
self,
*,
instance_id: str,
database_id: str,
query: str | list[str],
project_id: str = PROVIDE_PROJECT_ID,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.instance_id = instance_id
self.project_id = project_id
self.database_id = database_id
self.query = query
self.gcp_conn_id = gcp_conn_id
self._validate_inputs()
self.impersonation_chain = impersonation_chain
super().__init__(**kwargs)
@cached_property
def hook(self) -> SpannerHook:
return SpannerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
def _validate_inputs(self) -> None:
if self.project_id == "":
raise AirflowException("The required parameter 'project_id' is empty")
if not self.instance_id:
raise AirflowException("The required parameter 'instance_id' is empty or None")
if not self.database_id:
raise AirflowException("The required parameter 'database_id' is empty or None")
if not self.query:
raise AirflowException("The required parameter 'query' is empty")
def execute(self, context: Context):
if isinstance(self.query, str):
queries = [x.strip() for x in self.query.split(";")]
self.sanitize_queries(queries)
else:
queries = self.query
self.log.info(
"Executing DML query(-ies) on projects/%s/instances/%s/databases/%s",
self.project_id,
self.instance_id,
self.database_id,
)
self.log.info("Executing queries: %s", queries)
result_rows_count_per_query = self.hook.execute_dml(
project_id=self.project_id,
instance_id=self.instance_id,
database_id=self.database_id,
queries=queries,
)
SpannerDatabaseLink.persist(
context=context,
instance_id=self.instance_id,
database_id=self.database_id,
project_id=self.project_id or self.hook.project_id,
)
return result_rows_count_per_query
@staticmethod
def sanitize_queries(queries: list[str]) -> None:
"""
Drop empty query in queries.
:param queries: queries
"""
if queries and queries[-1] == "":
queries.pop()
def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage | None:
"""Build a generic OpenLineage facet, aligned with SQL-based operators."""
from airflow.providers.common.compat.openlineage.utils.sql import get_openlineage_facets_with_sql
return get_openlineage_facets_with_sql(
hook=self.hook,
sql=self.query,
conn_id=self.gcp_conn_id,
database=self.database_id,
)
| SpannerQueryDatabaseInstanceOperator |
python | huggingface__transformers | src/transformers/models/cvt/modeling_cvt.py | {
"start": 6203,
"end": 9568
} | class ____(nn.Module):
def __init__(
self,
num_heads,
embed_dim,
kernel_size,
padding_q,
padding_kv,
stride_q,
stride_kv,
qkv_projection_method,
qkv_bias,
attention_drop_rate,
with_cls_token=True,
**kwargs,
):
super().__init__()
self.scale = embed_dim**-0.5
self.with_cls_token = with_cls_token
self.embed_dim = embed_dim
self.num_heads = num_heads
self.convolution_projection_query = CvtSelfAttentionProjection(
embed_dim,
kernel_size,
padding_q,
stride_q,
projection_method="linear" if qkv_projection_method == "avg" else qkv_projection_method,
)
self.convolution_projection_key = CvtSelfAttentionProjection(
embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
)
self.convolution_projection_value = CvtSelfAttentionProjection(
embed_dim, kernel_size, padding_kv, stride_kv, projection_method=qkv_projection_method
)
self.projection_query = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.projection_key = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.projection_value = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
self.dropout = nn.Dropout(attention_drop_rate)
def rearrange_for_multi_head_attention(self, hidden_state):
batch_size, hidden_size, _ = hidden_state.shape
head_dim = self.embed_dim // self.num_heads
# rearrange 'b t (h d) -> b h t d'
return hidden_state.view(batch_size, hidden_size, self.num_heads, head_dim).permute(0, 2, 1, 3)
def forward(self, hidden_state, height, width):
if self.with_cls_token:
cls_token, hidden_state = torch.split(hidden_state, [1, height * width], 1)
batch_size, hidden_size, num_channels = hidden_state.shape
# rearrange "b (h w) c -> b c h w"
hidden_state = hidden_state.permute(0, 2, 1).view(batch_size, num_channels, height, width)
key = self.convolution_projection_key(hidden_state)
query = self.convolution_projection_query(hidden_state)
value = self.convolution_projection_value(hidden_state)
if self.with_cls_token:
query = torch.cat((cls_token, query), dim=1)
key = torch.cat((cls_token, key), dim=1)
value = torch.cat((cls_token, value), dim=1)
head_dim = self.embed_dim // self.num_heads
query = self.rearrange_for_multi_head_attention(self.projection_query(query))
key = self.rearrange_for_multi_head_attention(self.projection_key(key))
value = self.rearrange_for_multi_head_attention(self.projection_value(value))
attention_score = torch.einsum("bhlk,bhtk->bhlt", [query, key]) * self.scale
attention_probs = torch.nn.functional.softmax(attention_score, dim=-1)
attention_probs = self.dropout(attention_probs)
context = torch.einsum("bhlt,bhtv->bhlv", [attention_probs, value])
# rearrange"b h t d -> b t (h d)"
_, _, hidden_size, _ = context.shape
context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, hidden_size, self.num_heads * head_dim)
return context
| CvtSelfAttention |
python | pdm-project__pdm | src/pdm/installers/installers.py | {
"start": 1358,
"end": 3143
} | class ____(WheelSource):
def __init__(self, package: CachedPackage) -> None:
self.package = package
distribution, version = package.path.name.split("-")[:2]
super().__init__(distribution, version)
@cached_property
def dist_info_dir(self) -> str:
return self.package.dist_info.name
@property
def dist_info_filenames(self) -> list[str]:
return os.listdir(self.package.dist_info)
def read_dist_info(self, filename: str) -> str:
return self.package.dist_info.joinpath(filename).read_text("utf-8")
def iter_files(self) -> Iterable[Path]:
for root, _, files in os.walk(self.package.path):
for file in files:
if Path(root) == self.package.path and file in CachedPackage.cache_files:
continue
yield Path(root, file)
def get_contents(self) -> Iterator[WheelContentElement]:
from installer.records import parse_record_file
record_lines = self.read_dist_info("RECORD").splitlines()
records = parse_record_file(record_lines)
record_mapping = {record[0]: record for record in records}
for item in self.iter_files():
fn = item.relative_to(self.package.path).as_posix()
# Pop record with empty default, because validation is handled by `validate_record`
record = record_mapping.pop(fn, (fn, "", ""))
# Borrowed from:
# https://github.com/pypa/pip/blob/0f21fb92/src/pip/_internal/utils/unpacking.py#L96-L100
mode = item.stat().st_mode
is_executable = bool(mode and stat.S_ISREG(mode) and mode & 0o111)
with item.open("rb") as stream:
yield record, stream, is_executable
| PackageWheelSource |
python | google__jax | jax/experimental/mosaic/gpu/launch_context.py | {
"start": 15490,
"end": 60734
} | class ____:
module: ir.Module
scratch: Scratch
cluster_size: tuple[int, int, int]
profiler: OnDeviceProfiler | None = None
tma_descriptors: dict[
tuple[ir.Value, tuple[int, ...], int | None, tuple[MemRefTransform, ...], Any],
ir.Value,
] = dataclasses.field(default_factory=dict, init=False)
is_device_collective: bool = False
@contextlib.contextmanager
def named_region(self, *args, **kwargs):
if self.profiler is not None:
with self.profiler.record(*args, **kwargs):
yield
else:
yield
def cluster_idx(
self, dim: gpu.Dimension | Sequence[gpu.Dimension] | None = None
) -> ir.Value:
"""Returns the index of a block within a subset of the cluster spanned by the given dimensions."""
if dim is None:
dim = gpu.Dimension
elif isinstance(dim, gpu.Dimension):
dim = (dim,)
index = ir.IndexType.get()
stride = 1
idx = c(0, index)
for d in sorted(dim):
if self.cluster_size[d] == 1: # Optimize a multiply by 0.
continue
idx = arith.addi(idx, arith.muli(gpu.cluster_block_id(d), c(stride, index)))
stride *= self.cluster_size[d]
return idx
def _alloc_scratch(
self,
size: int,
alignment: int | None = None,
host_init: Callable[[ir.Value], None] = lambda _: None,
device_init: Callable[[ir.Value], Any] = lambda x: x,
) -> ir.Value:
"""Allocates a GMEM scratch buffer.
The buffer is initialized on the host and then copied to GMEM before the
kernel launch.
"""
i8 = ir.IntegerType.get_signless(8)
ptr_ty = ir.Type.parse("!llvm.ptr")
if alignment is None:
alignment = size
if self.scratch.next_offset % alignment:
raise NotImplementedError # TODO(apaszke): Pad to match alignment
alloc_base = self.scratch.next_offset
self.scratch.next_offset += size
def host_init_wrapped(host_ptr):
host_init(
llvm.getelementptr(ptr_ty, host_ptr, [], [alloc_base], i8, llvm.GEPNoWrapFlags.none)
)
self.scratch.host_init.append(host_init_wrapped)
# with ir.InsertionPoint(self.gmem_scratch_ptr.owner):
# There is no way to create an insertion point after an operation...
gep = llvm.GEPOp(
ptr_ty, self.scratch.device_ptr(), [], [alloc_base], i8, llvm.GEPNoWrapFlags.none
)
gep.move_after(self.scratch.device_ptr().owner)
return device_init(gep.result)
def _get_tma_desc(
self,
gmem_ref: ir.Value,
gmem_transform: tuple[MemRefTransform, ...],
gmem_peer_id: int | ir.Value | GlobalBroadcast | None,
transformed_slice_shape: tuple[int, ...],
swizzle: int | None,
reduction_op: TMAReductionOp | None,
):
gmem_ref = _find_kernel_argument_for_gmem_ref(gmem_ref)
# Using ir.Values in cache keys is a little sketchy, but I think it should
# be fine. Having it in the key will keep it alive, and if comparison and
# hashing is by identity then it should work out.
tma_desc_key = (gmem_ref, transformed_slice_shape, swizzle, gmem_transform, gmem_peer_id)
if (tma_desc := self.tma_descriptors.get(tma_desc_key, None)) is None:
i32 = ir.IntegerType.get_signless(32)
i64 = ir.IntegerType.get_signless(64)
ptr_ty = ir.Type.parse("!llvm.ptr")
def init_tma_desc(host_ptr):
ref = gmem_ref
for t in gmem_transform:
ref = t.apply(ref)
ref_ty = ir.MemRefType(ref.type)
# TODO(apaszke): Use utils.memref_ptr to compute base_ptr
strides, _ = ref_ty.get_strides_and_offset()
if strides[-1] != 1:
raise ValueError(
"TMA requires the stride of the last dimension after"
" transforming the GMEM reference to be 1, but it is"
f" {strides[-1]}."
)
_, offset, *sizes_and_strides = memref.extract_strided_metadata(ref)
aligned_ptr_idx = memref.extract_aligned_pointer_as_index(ref)
as_i64 = lambda i: arith.index_cast(i64, i)
alloc_ptr = llvm.inttoptr(ptr_ty, as_i64(aligned_ptr_idx))
llvm_dyn = -2147483648 # TODO(apaszke): Improve the MLIR bindings...
base_ptr = llvm.getelementptr(
ptr_ty, alloc_ptr, [as_i64(offset)], [llvm_dyn], ref_ty.element_type, llvm.GEPNoWrapFlags.none,
)
if isinstance(gmem_peer_id, GlobalBroadcast):
self._ensure_nvshmem_decls()
world_team = arith.constant(i32, 0)
base_ptr = llvm.call(
base_ptr.type,
[world_team, base_ptr],
[],
[],
callee="nvshmemx_mc_ptr",
)
elif gmem_peer_id is not None:
if not isinstance(gmem_peer_id, ir.Value):
peer_id = c(gmem_peer_id, i32)
else:
try:
# We try to reproduce the gmem_peer_id computation on the host.
peer_id = _recompute_peer_id(gmem_peer_id, fuel=16)
except ReplicationError as e:
raise ValueError(
"Failed to recompute the async_copy peer id on the host"
) from e
self._ensure_nvshmem_decls()
base_ptr = llvm.call(
base_ptr.type,
[base_ptr, peer_id],
[],
[],
callee="nvshmem_ptr",
)
rank = ref_ty.rank
assert rank * 2 == len(sizes_and_strides)
swizzle_arg = (
mgpu_dialect.SwizzlingMode.kNoSwizzle
if swizzle is None
else swizzle
)
# TODO(apaszke): Better verification (e.g. slice is non-zero)
# TODO(apaszke): We always know strides statically.
if isinstance(ref_ty.element_type, ir.IntegerType):
if reduction_op is not None:
raise ValueError(
f"TMA with reduction_op={reduction_op} is not supported with Integers"
)
bitwidth = utils.bitwidth_impl(ref_ty.element_type)
if bitwidth == 2:
tma_dtype = 8
elif bitwidth == 4:
tma_dtype = 0
elif bitwidth == 8:
tma_dtype = 1
elif bitwidth == 16:
tma_dtype = 2
elif bitwidth == 32:
tma_dtype = 3
elif bitwidth == 64:
tma_dtype = 4
else:
raise ValueError(f"Unsupported integer bitwidth: {bitwidth}")
elif ir.F16Type.isinstance(ref_ty.element_type):
tma_dtype = 5
elif ir.F32Type.isinstance(ref_ty.element_type):
tma_dtype = 6
elif ir.BF16Type.isinstance(ref_ty.element_type):
tma_dtype = 7
# We treat narrow floats as integers
elif ir.Float8E5M2Type.isinstance(ref_ty.element_type):
tma_dtype = 1
elif ir.Float8E4M3FNType.isinstance(ref_ty.element_type):
tma_dtype = 1
elif ir.Float8E8M0FNUType.isinstance(ref_ty.element_type):
tma_dtype = 1
elif ir.Float4E2M1FNType.isinstance(ref_ty.element_type):
tma_dtype = 0
else:
raise ValueError(f"unsupported TMA dtype {ref_ty.element_type}")
dtype_or_bitwidth = c(tma_dtype, i64)
args = [
host_ptr,
base_ptr,
dtype_or_bitwidth,
c(rank, i64),
utils.pack_array([as_i64(i) for i in sizes_and_strides[:rank]]),
utils.pack_array([as_i64(i) for i in sizes_and_strides[rank:]]),
c(swizzle_arg, i64),
utils.pack_array([c(v, i64) for v in transformed_slice_shape]),
]
func.call([], "mosaic_gpu_init_tma_desc", args)
def cast_tma_desc(device_ptr):
# TODO(apaszke): Investigate why prefetching can cause launch failures
# nvvm.prefetch_tensormap(device_ptr)
return device_ptr
tma_desc = self._alloc_scratch(
TMA_DESCRIPTOR_BYTES,
alignment=TMA_DESCRIPTOR_ALIGNMENT,
host_init=init_tma_desc,
device_init=cast_tma_desc,
)
self.tma_descriptors[tma_desc_key] = tma_desc
return tma_desc
def _prepare_async_copy(
self,
gmem_ref: ir.Value,
gmem_slice: Any,
gmem_transform: tuple[MemRefTransform, ...],
collective: Sequence[gpu.Dimension] | None,
partitioned: int | None,
implementation: AsyncCopyImplementation,
):
"""Performs setup common to TMA and CP_ASYNC implementations."""
index = ir.IndexType.get()
gmem_ref_ty = ir.MemRefType(gmem_ref.type)
gmem_strides, _ = gmem_ref_ty.get_strides_and_offset()
if gmem_strides != utils.get_contiguous_strides(gmem_ref_ty.shape):
raise NotImplementedError(
"async_copy assumes the GMEM reference is contiguous"
)
# Look for and verify gather indices in gmem_slice.
is_gathered_dim = [isinstance(s, fa.FragmentedArray) for s in gmem_slice]
gather_indices: fa.FragmentedArray | None = None
if any(is_gathered_dim):
if is_gathered_dim != [True, False]:
raise NotImplementedError(
"Gathers/scatters only supported along the first dimension of 2D"
" arrays"
)
gather_indices = gmem_slice[0]
if not isinstance(gather_indices, fa.FragmentedArray):
raise ValueError("Gather/scatter indices must be a FragmentedArray")
if len(gather_indices.shape) != 1:
raise ValueError("Gather/scatter indices must be 1D")
idx_dtype = gather_indices.mlir_dtype
if not ir.IntegerType.isinstance(idx_dtype) or utils.bitwidth(idx_dtype) > 32:
raise ValueError("Gather/scatter indices must be integers that are at most 32-bit wide")
if gather_indices.is_signed:
raise ValueError("Gather/scatter indices must be unsigned")
gmem_slice = (slice(None), *gmem_slice[1:])
# Analyze the slice (taking gathers into account).
base_indices, slice_shape, is_squeezed = utils.parse_indices(
gmem_slice,
ir.MemRefType(gmem_ref.type).shape,
# NOTE: TMA supports OOB indices, so we skip the check.
check_oob=implementation != AsyncCopyImplementation.TMA,
)
if gather_indices is not None:
slice_shape = [gather_indices.shape[0], *slice_shape[1:]]
del gmem_slice # Use slice_shape, base_indices and is_squeezed from now on!
dyn_base_indices = tuple(
c(i, index) if not isinstance(i, ir.Value) else i for i in base_indices
)
del base_indices # Use the dynamic indices from now on!
# Deal with collective and partitioned loads.
if collective:
if implementation != AsyncCopyImplementation.TMA:
raise ValueError("Only the TMA implementation supports collective copies")
if gather_indices is not None:
raise NotImplementedError("Collective copies with gather/scatter unsupported")
if partitioned is not None:
# Increment partitioned by the number of preceding squeezed dimensions.
partitioned = np.where(
np.cumsum(~np.array(is_squeezed)) == partitioned+1)[0][0]
# Partitioning happens on the logical slice we extract from GMEM, so we do
# it before we apply transforms.
if not collective: # This implies non-gather TMA already.
raise ValueError("Only collective loads can be partitioned")
collective_size = math.prod(self.cluster_size[d] for d in collective)
if collective_size > 1:
if math.prod(self.cluster_size) != 2:
raise NotImplementedError(
"Partitioned loads only supported for clusters of size 2"
)
if slice_shape[partitioned] % collective_size != 0:
raise ValueError(
f"The collective size ({collective_size}) must divide the slice"
" shape along the partitioned dimension, but it has size"
f" {slice_shape[partitioned]}"
)
slice_shape[partitioned] //= collective_size
dyn_base_indices = list(dyn_base_indices) # type: ignore[assignment]
dyn_base_indices[partitioned] = arith.addi( # type: ignore[index]
dyn_base_indices[partitioned],
arith.muli(
self.cluster_idx(collective), c(slice_shape[partitioned], index)
),
)
dyn_base_indices = tuple(dyn_base_indices)
squeezed_dims = tuple(
i for i, squeezed in enumerate(is_squeezed) if squeezed
)
# Indexing is really slicing + squeezing, and user transforms are meant to
# apply after that. However, we actually have to apply the indexing last
# (it's fused into the TMA) and so we need to commute it with all the user
# transforms. For slicing this is done using transform_index and
# transform_shape. For squeezing we actually move all the squeezed dims to
# the front, and then batch each transform, making it ignore the extra dims.
if squeezed_dims and implementation != AsyncCopyImplementation.CP_ASYNC:
sliced_dims = [i for i, squeezed in enumerate(is_squeezed) if not squeezed]
gmem_transform = (TransposeTransform((*squeezed_dims, *sliced_dims)),
*(t.batch(len(squeezed_dims)) for t in gmem_transform))
slice_shape = tuple(slice_shape)
for t in gmem_transform:
dyn_base_indices = t.transform_index(dyn_base_indices)
slice_shape = t.transform_shape(slice_shape)
return (
list(slice_shape),
dyn_base_indices,
squeezed_dims,
gather_indices,
gmem_transform,
)
def _prepare_tma(
self,
gmem_ref: ir.Value,
smem_ref: ir.Value | None,
swizzle: int | None,
slice_shape: list[int],
dyn_base_indices: tuple[ir.Value, ...],
gather_indices,
squeezed_dims: tuple[int, ...],
gmem_transform: tuple[MemRefTransform, ...],
collective: Sequence[gpu.Dimension],
partitioned: int | None,
):
"""Finalizes setup specific to the TMA implementation of async_copy."""
index = ir.IndexType.get()
# The function below is called only to verify the GMEM ref. The output
# is meant to be ignored.
_find_kernel_argument_for_gmem_ref(gmem_ref)
gmem_ref_ty = ir.MemRefType(gmem_ref.type)
element_bitwidth = utils.bitwidth(gmem_ref_ty.element_type)
gmem_strides, _ = gmem_ref_ty.get_strides_and_offset()
if any(s * element_bitwidth % 128 != 0 for s in gmem_strides[:-1]):
raise ValueError(
"async_copy requires all GMEM strides except the last one to be a"
" multiple of 16 bytes"
)
# We don't need to do this for gather TMAs, because we'll unroll the
# transfers ourselves anyway.
num_squeezed_dims = len(squeezed_dims)
if len(slice_shape) > 5 and gather_indices is None:
# We can try to collapse all squeezed dims into one.
if len(slice_shape) - num_squeezed_dims + 1 > 5:
raise ValueError(
"Async copies only support striding up to 5 dimensions"
)
squeezed_dim_strides = tuple(gmem_strides[d] for d in squeezed_dims)
collapse = CollapseLeadingIndicesTransform(squeezed_dim_strides)
gmem_transform = (*gmem_transform, collapse)
dyn_base_indices = collapse.transform_index(dyn_base_indices)
slice_shape = list(collapse.transform_shape(tuple(slice_shape)))
num_squeezed_dims = 1
dyn_base_indices = list(dyn_base_indices)
slice_shape = list(slice_shape)
assert all(d == 1 for d in slice_shape[:num_squeezed_dims])
# Partitioned loads have already been processed (before transforms).
# We process non-partitioned collective loads here, because only here are we
# able to know in what order the data will be written to SMEM. Transposes
# and tiling change that order and if we picked a partition based on the
# untransformed slice shape, we might have ended up with a non-contiguous
# SMEM window, which would no longer be realizable in a single TMA transfer.
collective_size = math.prod(self.cluster_size[d] for d in collective) # type: ignore
if collective_size > 1 and partitioned is None:
assert gather_indices is None # Checked above.
def partition_dim(dim: int, idx: ir.Value, num_chunks: int):
# No need to partition squeezed dims. They don't even exist in smem_ref.
assert dim >= num_squeezed_dims
nonlocal smem_ref
slice_shape[dim] //= num_chunks
block_offset = arith.muli(idx, c(slice_shape[dim], index))
dyn_base_indices[dim] = arith.addi(dyn_base_indices[dim], block_offset) # type: ignore[index]
if smem_ref is not None:
smem_ref = utils.memref_slice(
smem_ref,
(slice(None),) * (dim - num_squeezed_dims)
+ (utils.ds(block_offset, slice_shape[dim]),),
)
idx = self.cluster_idx(collective)
rem_collective_size = collective_size
has_swizzle = (
swizzle is not None
and swizzle != mgpu_dialect.SwizzlingMode.kNoSwizzle
)
# We can partition the minormost dim if there's no swizzling.
for dim, slice_size in enumerate(
slice_shape[:-1] if has_swizzle else slice_shape
):
if slice_size % rem_collective_size == 0:
partition_dim(dim, idx, rem_collective_size)
rem_collective_size = 1
break
elif rem_collective_size % slice_size == 0:
# This is an optimization and it lets us skip squeezed dims.
if slice_size > 1:
dim_idx = arith.remui(idx, c(slice_size, index))
partition_dim(dim, dim_idx, slice_size)
idx = arith.divui(idx, c(slice_size, index))
rem_collective_size //= slice_size
else:
break # We failed to partition the leading dimensions.
del idx # We overwrote the block index in the loop.
if rem_collective_size > 1:
raise ValueError(
"None of the leading dimensions in the transformed slice shape"
f" {slice_shape} is divisible by the collective size"
f" {collective_size}"
)
if max(slice_shape) > 256:
raise ValueError(
"Async copies only support copying <=256 elements along each"
" dimension"
)
if (zeroth_bw := slice_shape[-1] * element_bitwidth) % 128 != 0:
raise ValueError(
"Async copies require the number of bits copied along the last"
f" dimension to be divisible by 128, but got {zeroth_bw}"
)
if (
swizzle is not None
and swizzle != mgpu_dialect.SwizzlingMode.kNoSwizzle
and slice_shape[-1] != (swizzle * 8) // element_bitwidth
):
raise ValueError(
f"Async copies with {swizzle=} require the last dimension of the"
f" slice to be exactly {swizzle} bytes i.e. "
f" {(swizzle * 8) // element_bitwidth} elements, but got"
f" {slice_shape[-1]} elements."
)
return (smem_ref, slice_shape, dyn_base_indices, gmem_transform)
def async_copy(
self,
*,
src_ref: ir.Value,
dst_ref: ir.Value,
gmem_slice: Any = (),
gmem_transform: MemRefTransform | tuple[MemRefTransform, ...] = (),
gmem_peer_id: int | ir.Value | GlobalBroadcast | None = None,
barrier: utils.BarrierRef | None = None,
swizzle: int | None = None,
arrive: bool | None = None,
collective: Sequence[gpu.Dimension] | gpu.Dimension | None = None,
partitioned: int | None = None,
# Should select 0 or 1 threads from the WG.
predicate: ir.Value | None | _DefaultPredicate = _DefaultPredicate(),
reduction_op: TMAReductionOp | None = None,
implementation: AsyncCopyImplementation = AsyncCopyImplementation.TMA,
):
"""Initiates an async copy between GMEM and SMEM.
Exactly one of `src_ref` and `dst_ref` must be in GMEM and in SMEM, and the
SMEM reference must be contiguous. The GMEM window that is read or written
to is specified by the `gmem_slice`. The copy can change the order in which
the data appears in the window by applying a sequence of transforms to the
GMEM reference (as specified by `gmem_transform`).
When `collective` is specified (only allowed for GMEM -> SMEM copies), the
identical async_copy must be scheduled by all blocks that share the same
coordinates along collective dimensions within a cluster. The behavior is
undefined otherwise. The semantics of collective loads depend further on the
`partitioned` argument:
- If `partitioned` is not specified, all blocks load the same data into
their shared memory and all receive the update in their barriers, unless
`arrive` is False. If `arrive` is False, you should expect the barrier to
have expect_tx incremented by the same amount of bytes as if `collective`
was not specified.
- If `partitioned` is specified, each block only loads a separate slice of
the data into SMEM, partitioned into equal tiles along the `partitioned`
dimension. In this case only the barrier of the first block in the
collective will have its expect_tx incremented by the total size of the
transfer across all blocks involved in the collective. Barriers supplied
by other blocks will be ignored (even if `arrive` is True).
"""
index = ir.IndexType.get()
i8 = ir.IntegerType.get_signless(8)
i16 = ir.IntegerType.get_signless(16)
i32 = ir.IntegerType.get_signless(32)
src_ref_ty = ir.MemRefType(src_ref.type)
dst_ref_ty = ir.MemRefType(dst_ref.type)
element_type = src_ref_ty.element_type
element_bitwidth = utils.bitwidth(element_type)
if element_type != dst_ref_ty.element_type:
raise ValueError(
f"Expected same element type, got {element_type} and"
f" {dst_ref_ty.element_type}"
)
if isinstance(collective, gpu.Dimension):
collective = (collective,)
elif collective is None:
collective = ()
if not isinstance(gmem_transform, tuple):
gmem_transform = (gmem_transform,)
if not isinstance(gmem_slice, tuple):
gmem_slice = (gmem_slice,)
if reduction_op is not None:
if implementation != AsyncCopyImplementation.TMA:
raise ValueError("Only the TMA implementation supports reductions")
if not any(
t.isinstance(element_type)
for t in (ir.F32Type, ir.BF16Type, ir.F16Type)
):
raise ValueError(
"TMA with reduction is only supported with f32, f16 and bf16"
)
if reduction_op != "add":
raise ValueError(
"TMA with reduction is only supported with add operation"
)
if src_ref_ty.memory_space is None and utils.is_smem_ref(dst_ref_ty):
gmem_ref, smem_ref = src_ref, dst_ref
if implementation == AsyncCopyImplementation.TMA:
if barrier is None:
raise ValueError("Barriers are required for TMA GMEM -> SMEM copies")
else:
assert implementation == AsyncCopyImplementation.CP_ASYNC
if barrier is not None:
raise NotImplementedError(
"Barriers are unsupported for CP_ASYNC GMEM -> SMEM copies"
)
if arrive is None:
arrive = True # Arrive by default
elif utils.is_smem_ref(src_ref_ty) and dst_ref_ty.memory_space is None:
gmem_ref, smem_ref = dst_ref, src_ref
if barrier is not None:
raise ValueError("Barriers are unsupported for SMEM -> GMEM copies")
if arrive is None:
arrive = True # Commit this copy to the async group by default
else:
raise ValueError("Only SMEM <-> GMEM copies supported")
if collective and gmem_ref is dst_ref:
raise ValueError("Only GMEM -> SMEM copies can be collective")
(
slice_shape,
dyn_base_indices,
squeezed_dims,
gather_indices,
gmem_transform,
) = self._prepare_async_copy(
gmem_ref,
gmem_slice,
gmem_transform,
collective,
partitioned,
implementation,
)
del gmem_slice # Use slice_shape, dyn_base_indices and squeezed_dims instead.
gmem_ref_ty = ir.MemRefType(gmem_ref.type)
smem_ref_ty = ir.MemRefType(smem_ref.type)
# TODO(apaszke): Support squeezed dims for CP_ASYNC.
if implementation == AsyncCopyImplementation.CP_ASYNC and squeezed_dims:
raise NotImplementedError(
"Integer indexing in gmem_slice not supported for CP_ASYNC"
)
# We moved all squeezed dims to the front in _prepare_async_copy.
assert all(d == 1 for d in slice_shape[:len(squeezed_dims)])
if slice_shape[len(squeezed_dims):] != smem_ref_ty.shape:
raise ValueError(
"Expected the SMEM reference to have the same shape as the"
f" transformed slice: {tuple(smem_ref_ty.shape)} !="
f" {slice_shape[len(squeezed_dims):]}"
)
if implementation == AsyncCopyImplementation.CP_ASYNC:
assert not collective
assert partitioned is None
if not isinstance(predicate, _DefaultPredicate):
raise NotImplementedError(
"CP_ASYNC needs to be performed by the whole warpgroup and does not"
" support the predicate argument"
)
# TODO(apaszke): This should be quite easy? The only complication is that
# the indices array needs to have a layout compatible with the way we
# assign lanes to rows/cols.
if gather_indices is not None:
raise NotImplementedError("Gather/scatter unsupported for the CP_ASYNC implementation")
if smem_ref is src_ref:
raise ValueError("CP_ASYNC implementation only supports GMEM -> SMEM copies")
assert swizzle is not None
swizzle_elems = 8 * swizzle // element_bitwidth
if gmem_transform != (TileTransform((8, swizzle_elems)),):
raise NotImplementedError(gmem_transform)
layout = fa.tiled_copy_smem_gmem_layout(
*smem_ref_ty.shape[-4:-2], swizzle, element_bitwidth # type: ignore[call-arg]
)
gmem_strides = gmem_ref_ty.get_strides_and_offset()[0]
dst_tiled_strides = [
arith.constant(i32, s)
for s in layout.tiling.tile_strides(gmem_strides)[gmem_ref_ty.rank :]
]
lane_offset = utils.dyn_dot(layout.lane_indices(), dst_tiled_strides)
warp_offset = utils.dyn_dot(layout.warp_indices(), dst_tiled_strides)
dyn_offset = arith.addi(lane_offset, warp_offset)
offset_scale = 1 if element_bitwidth >= 8 else 8 // element_bitwidth
if element_bitwidth < 8:
gep_type = i8
elif ir.FloatType.isinstance(element_type) and ir.FloatType(element_type).width == 8:
gep_type = i8 # LLVM has no support for f8.
else:
gep_type = element_type
dyn_offset = arith.divui(dyn_offset, c(offset_scale, i32))
if gmem_ref_ty.rank != 2:
raise NotImplementedError("Only 2D copies implemented")
transfers = fa.FragmentedArray.transfer_tiled(
smem_ref, swizzle, layout, tuple(gmem_ref_ty.shape), optimized=False
)
gmem_base_ptr = utils.getelementptr(utils.memref_ptr(gmem_ref), [dyn_offset], gep_type)
gmem_base_ptr = llvm.addrspacecast(ir.Type.parse("!llvm.ptr<1>"), gmem_base_ptr)
bytes_per_transfer = layout.vector_length * element_bitwidth // 8
# Only 16-byte transfers can skip the L1 cache (this is what CG means).
cache_modifier = (
nvvm.LoadCacheModifierKind.CG
if bytes_per_transfer == 16
else nvvm.LoadCacheModifierKind.CA
)
for _get, _update, get_base_idx, smem_ptr in transfers:
constant_offset = sum(i * s for i, s in zip(get_base_idx(), gmem_strides, strict=True))
gmem_ptr = utils.getelementptr(gmem_base_ptr, [constant_offset // offset_scale], gep_type)
nvvm.cp_async_shared_global(smem_ptr, gmem_ptr, bytes_per_transfer, cache_modifier)
if barrier is None:
nvvm.cp_async_commit_group()
else:
raise NotImplementedError
return
assert implementation == AsyncCopyImplementation.TMA
(smem_ref, slice_shape, dyn_base_indices, gmem_transform) = (
self._prepare_tma(
gmem_ref,
smem_ref,
swizzle,
slice_shape,
dyn_base_indices,
gather_indices,
squeezed_dims,
gmem_transform,
collective,
partitioned,
)
)
assert smem_ref is not None # For type checkers.
smem_strides, _ = ir.MemRefType(smem_ref.type).get_strides_and_offset()
if any(
s != cs and d != 1 # Strides don't matter for dims of size 1.
for s, cs, d in zip(
smem_strides,
utils.get_contiguous_strides(smem_ref_ty.shape),
smem_ref_ty.shape,
)
):
raise ValueError(
"async_copy needs the SMEM reference to be contiguous, but got"
f" strides {smem_strides} for shape {smem_ref_ty.shape}"
)
collective_size = math.prod(self.cluster_size[d] for d in collective)
assert math.prod(slice_shape) * element_bitwidth * collective_size % 8 == 0
transfer_bytes = c(
math.prod(slice_shape) * element_bitwidth * collective_size // 8, i32
)
if gather_indices is not None:
import builtins
zips = functools.partial(builtins.zip, strict=True)
# The gather TMA instruction is limited to 2D GMEM references. That means
# that we can't apply the transforms to the GMEM reference and have the
# TMA engine deal with permuting the data, like we do for non-gather TMA.
# Instead, we have to break up the transfer into multiple 2D gathers
# ourselves, which requires us to do more complicated stride math etc.
#
# The minor transformed dim should be a contiguous transfer dim.
# The second minor should be a gather dim of size divisible by 4.
# The rest can be anything, and we will unroll the transfers over them.
if smem_ref is src_ref:
raise NotImplementedError("Scatter unsupported for the TMA implementation")
assert barrier is not None # for pytype
barrier_ptr = barrier.get_ptr()
if squeezed_dims:
raise NotImplementedError("Gather/scatter unsupported when using integer indexing")
if reduction_op is not None:
raise ValueError("Gather/scatter TMA can't perform reductions")
if not isinstance(predicate, _DefaultPredicate):
raise ValueError("Gather/scatter TMA can't use a predicate")
if gather_indices.layout != fa.TMA_GATHER_INDICES_LAYOUT:
raise ValueError(f"Unsupported gather indices layout: {gather_indices.layout}")
ROWS_PER_INSTR = 4
# Make sure we'll always be accessing SMEM with sufficient alignment.
single_tma_bits = ROWS_PER_INSTR * slice_shape[-1] * element_bitwidth
if single_tma_bits % 1024:
raise ValueError(
"Gather/scatter TMA would require breaking it up into transfers of"
f" {single_tma_bits // 8} bytes, but need a multiple of 128 bytes"
)
if arrive:
arrive_predicate = utils.single_thread_predicate(utils.ThreadSubset.WARPGROUP)
nvvm.mbarrier_arrive_expect_tx(
barrier_ptr,
transfer_bytes,
predicate=arrive_predicate,
)
gmem_strides, _ = gmem_ref_ty.get_strides_and_offset()
assert len(gmem_strides) == 2
_, gmem_cols = gmem_ref_ty.shape
slice_gather_strides: tuple[int, ...] = (1, 0) # Each row gets a new index, column has no effect.
for t in gmem_transform:
gmem_strides = t.transform_strides(gmem_strides)
slice_gather_strides = t.transform_strides(slice_gather_strides)
is_gather_dim = [bool(s) for s in slice_gather_strides]
tma_desc = self._get_tma_desc(
gmem_ref, (), gmem_peer_id, (1, slice_shape[-1]), swizzle, reduction_op,
)
# Indices are split over 4 warps, and replicated within each warp.
assert fa.TMA_GATHER_INDICES_LAYOUT.vector_length == ROWS_PER_INSTR
# Index 00 01 02 03 04 05 06 07 08 09 10 11 12 13 14 15 16 17 ...
# Warp <--- 0 ---> <--- 1 ---> <--- 2 ---> <--- 3 ---> <--- 0 --
warp_idx = arith.remui(
utils.warp_idx(sync=True),
arith.constant(i32, utils.WARPS_IN_WARPGROUP),
)
gather_linear_idx_warp = arith.muli(warp_idx, c(ROWS_PER_INSTR, i32))
# Since the TMA instruction is limited to 2D gathers, we flatten all
# non-gather dims into the column index.
max_non_gather_linear_index = sum(
(d - 1) * s
for g, d, s in zip(is_gather_dim[:-1], slice_shape[:-1], gmem_strides[:-1])
if not g
)
# If we ever exceed this then we need to change the size of the GMEM ref,
# to prevent the TMA engine from clipping our indices.
if max_non_gather_linear_index > gmem_cols:
raise NotImplementedError("Non-gather dims don't fit into the columns")
col_base_offset = functools.reduce(
arith.addi,
(
arith.muli(idx, arith.constant(index, stride))
for g, idx, stride in zips(
is_gather_dim, dyn_base_indices, gmem_strides
)
if not g
),
arith.constant(index, 0),
)
col_base_offset = arith.index_cast(i32, col_base_offset)
# TMA instructions are uniform, so we can't use multiple lanes.
predicate = utils.single_thread_predicate(utils.ThreadSubset.WARP)
# We need to unroll over all non-gather dimensions other than the last one
non_gather_slice_shape = tuple(
1 if g else d for d, g in zips(slice_shape[:-1], is_gather_dim[:-1])
)
# First, iterate over gather index registers we have available.
for i, reg in enumerate(gather_indices.registers.flat):
if utils.bitwidth(gather_indices.mlir_dtype) != 32:
reg = arith.extui(ir.VectorType.get((4,), i32), reg)
# Compute which rows within the 2D slice we'll be gathering.
gather_linear_idx_reg = i * ROWS_PER_INSTR * utils.WARPS_IN_WARPGROUP
gather_linear_idx = arith.addi(
gather_linear_idx_warp, arith.constant(i32, gather_linear_idx_reg)
)
# Transform row indices to align with the transformed SMEM shape.
gather_slice_idx = [
arith.remui(arith.divui(gather_linear_idx, c(s, i32)), c(d, i32))
for g, d, s in zip(is_gather_dim, slice_shape, slice_gather_strides)
if g
]
gather_slice_idx = [arith.index_cast(index, i) for i in gather_slice_idx]
gather_rows = [
llvm.extractelement(reg, c(i, i32)) for i in range(ROWS_PER_INSTR)
]
# Second, step over non-gather slice indices.
for non_gather_idxs in np.ndindex(non_gather_slice_shape):
gather_slice_idx_it = iter(gather_slice_idx)
smem_indices = tuple(
next(gather_slice_idx_it) if g else i
for g, i in zip(is_gather_dim[:-1], non_gather_idxs)
)
# We should really take a slice here, but it doesn't matter. We're
# just going to take the base pointer anyway.
transfer_smem_ref = utils.memref_slice(smem_ref, smem_indices)
smem_ptr = utils.memref_ptr(transfer_smem_ref, memory_space=3)
# The slice index needs to be folded into the gather col index.
col_slice_offset = sum(
idx * stride
for g, idx, stride in zips(
is_gather_dim[:-1], non_gather_idxs, gmem_strides[:-1]
)
if not g
)
col_offset = arith.addi(col_base_offset, arith.constant(i32, col_slice_offset))
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[predicate, smem_ptr, tma_desc, barrier_ptr, col_offset, *gather_rows],
"@$0 cp.async.bulk.tensor.2d.shared::cta.global.tile::gather4.mbarrier::complete_tx::bytes [$1], [$2, {$4, $5, $6, $7, $8}], [$3];",
"b,r,l,r" + ",r" * (ROWS_PER_INSTR + 1),
has_side_effects=True,
)
return
assert gather_indices is None # Only tiled TMA handled below.
tma_desc = self._get_tma_desc(
gmem_ref, gmem_transform, gmem_peer_id,
tuple(slice_shape), swizzle, reduction_op,
)
# We construct TMA descriptors in column-major order.
rev_dyn_base_indices = [
arith.index_cast(i32, idx) for idx in reversed(dyn_base_indices)
]
if isinstance(predicate, _DefaultPredicate):
predicate = utils.single_thread_predicate(utils.ThreadSubset.WARPGROUP)
if predicate is None:
predicate = c(1, ir.IntegerType.get_signless(1))
smem_ptr = utils.memref_ptr(smem_ref, memory_space=3)
if gmem_ref is src_ref:
assert barrier is not None # for pytype
barrier_ptr = barrier.get_ptr()
assert reduction_op is None
if collective_size > 1 and partitioned is not None:
assert collective_size == 2
if arrive:
first_block = arith.cmpi(
arith.CmpIPredicate.eq, self.cluster_idx(collective), c(0, index),
)
arrive_predicate = arith.andi(predicate, first_block)
nvvm.mbarrier_arrive_expect_tx(
barrier_ptr, transfer_bytes, predicate=arrive_predicate
)
rank = len(slice_shape)
idx_operands = ",".join(f"${i}" for i in range(4, 4 + rank))
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[predicate, smem_ptr, tma_desc, barrier_ptr, *rev_dyn_base_indices],
f"""
{{
.reg .b32 mapped_addr;
@$0 mapa.shared::cluster.u32 mapped_addr, $3, 0;
@$0 cp.async.bulk.tensor.{rank}d.shared::cta.global.tile.mbarrier::complete_tx::bytes.cta_group::2
[$1], [$2, {{{idx_operands}}}], [mapped_addr];
}}
""",
"b,r,l,r" + ",r" * rank,
has_side_effects=True,
)
else:
if arrive:
nvvm.mbarrier_arrive_expect_tx(
barrier_ptr, transfer_bytes, predicate=predicate
)
if collective_size > 1:
multicast_mask = arith.trunci(
i16, utils.cluster_collective_mask(self.cluster_size, collective)
)
else:
multicast_mask = None
nvvm.cp_async_bulk_tensor_shared_cluster_global(
smem_ptr, tma_desc, rev_dyn_base_indices, barrier_ptr, [],
multicast_mask=multicast_mask, predicate=predicate
)
else:
if reduction_op is not None:
rank = len(slice_shape)
idx_operands = ",".join(f"${i}" for i in range(3, 3 + rank))
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[predicate,smem_ptr,tma_desc,*rev_dyn_base_indices],
f"@$0 cp.reduce.async.bulk.tensor.{rank}d.global.shared::cta.{reduction_op}.tile.bulk_group [$2,{{{idx_operands}}}], [$1];",
"b,r,l" + ",r" * rank,
has_side_effects=True,
)
if arrive:
nvvm.cp_async_bulk_commit_group()
else:
nvvm.cp_async_bulk_tensor_global_shared_cta(
tma_desc, smem_ptr, rev_dyn_base_indices, predicate=predicate
)
if arrive:
nvvm.cp_async_bulk_commit_group()
def async_prefetch(
self,
*,
gmem_ref: ir.Value,
gmem_slice: Any = (),
gmem_transform: MemRefTransform | tuple[MemRefTransform, ...] = (),
gmem_peer_id: int | ir.Value | None = None,
swizzle: int | None = None,
collective: Sequence[gpu.Dimension] | gpu.Dimension | None = None,
partitioned: int | None = None,
# Should select 0 or 1 threads from the WG.
predicate: ir.Value | None | _DefaultPredicate = _DefaultPredicate(),
):
i32 = ir.IntegerType.get_signless(32)
if isinstance(collective, gpu.Dimension):
collective = (collective,)
elif collective is None:
collective = ()
if not isinstance(gmem_transform, tuple):
gmem_transform = (gmem_transform,)
if not isinstance(gmem_slice, tuple):
gmem_slice = (gmem_slice,)
impl = AsyncCopyImplementation.TMA
(
slice_shape,
dyn_base_indices,
squeezed_dims,
gather_indices,
gmem_transform,
) = self._prepare_async_copy(
gmem_ref, gmem_slice, gmem_transform, collective, partitioned, impl
)
del gmem_slice # Use slice_shape, dyn_base_indices and squeezed_dims instead.
(_, slice_shape, dyn_base_indices, gmem_transform) = (
self._prepare_tma(
gmem_ref,
None,
swizzle,
slice_shape,
dyn_base_indices,
gather_indices,
squeezed_dims,
gmem_transform,
collective,
partitioned,
)
)
if gather_indices is not None:
raise NotImplementedError("Gather/scatter prefetch not implemented yet")
tma_desc = self._get_tma_desc(
gmem_ref, gmem_transform, gmem_peer_id,
tuple(slice_shape), swizzle, reduction_op=None,
)
# We construct TMA descriptors in column-major order.
rev_dyn_base_indices = [
arith.index_cast(i32, idx) for idx in reversed(dyn_base_indices)
]
if isinstance(predicate, _DefaultPredicate):
predicate = utils.single_thread_predicate(utils.ThreadSubset.WARPGROUP)
if predicate is None:
predicate = c(1, ir.IntegerType.get_signless(1))
rank = len(slice_shape)
idx_operands = ",".join(f"${i}" for i in range(2, 2 + rank))
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[predicate, tma_desc, *rev_dyn_base_indices],
f"@$0 cp.async.bulk.prefetch.tensor.{rank}d.L2.global.tile [$1, {{{idx_operands}}}];",
"b,l" + ",r" * rank,
has_side_effects=True,
)
def await_async_copy(
self, allow_groups: int, await_read_only: bool = False,
scope: utils.ThreadSubset = utils.ThreadSubset.WARPGROUP,
):
nvvm.cp_async_bulk_wait_group(allow_groups, read=await_read_only)
if scope == utils.ThreadSubset.WARPGROUP:
utils.warpgroup_barrier()
elif scope == utils.ThreadSubset.WARP:
utils.warp_barrier()
else:
raise ValueError(f"Unsupported scope: {scope}")
def await_cp_async_copy(self, allow_groups: int):
nvvm.cp_async_wait_group(allow_groups)
utils.warpgroup_barrier()
def _ensure_nvshmem_decls(self):
if self.is_device_collective:
return
self.is_device_collective = True
with ir.InsertionPoint(self.module.body):
nvshmem_my_pe_type = ir.TypeAttr.get(ir.Type.parse("!llvm.func<i32()>"))
llvm.LLVMFuncOp(
"nvshmem_my_pe", nvshmem_my_pe_type, sym_visibility="private"
)
nvshmem_ptr_type = ir.TypeAttr.get(
ir.Type.parse("!llvm.func<!llvm.ptr(!llvm.ptr,i32)>")
)
llvm.LLVMFuncOp("nvshmem_ptr", nvshmem_ptr_type, sym_visibility="private")
nvshmemx_mc_ptr_type = ir.TypeAttr.get(
ir.Type.parse("!llvm.func<!llvm.ptr(i32,!llvm.ptr)>")
)
llvm.LLVMFuncOp(
"nvshmemx_mc_ptr", nvshmemx_mc_ptr_type, sym_visibility="private"
)
def to_remote(self, ref: ir.Value, peer: ir.Value):
self._ensure_nvshmem_decls()
if ir.MemRefType.isinstance(ref.type):
# We replace the offset in the ref type by 0, because memref_ptr always
# folds the offset into the pointer.
ref_ty = ir.MemRefType(ref.type)
strides, _ = ref_ty.get_strides_and_offset()
result_type = ir.MemRefType.get(
ref_ty.shape,
ref_ty.element_type,
ir.StridedLayoutAttr.get(0, strides),
ref_ty.memory_space,
)
return utils.ptr_as_memref(
self.to_remote(utils.memref_ptr(ref), peer), result_type
)
if ref.type != ir.Type.parse("!llvm.ptr"):
raise ValueError(f"Unsupported type for to_remote: {ref.type}")
if peer.type != ir.IntegerType.get_signless(32):
raise ValueError(f"peer index must be an i32, got {peer.type}")
return llvm.call(ref.type, [ref, peer], [], [], callee="nvshmem_ptr")
def to_remote_multicast(self, ref: ir.Value):
i32 = ir.IntegerType.get_signless(32)
self._ensure_nvshmem_decls()
if not ir.MemRefType.isinstance(ref.type):
raise ValueError(f"Unsupported type for to_remote_multicast: {ref.type}")
# We replace the offset in the ref type by 0, because memref_ptr always
# folds the offset into the pointer.
ref_ty = ir.MemRefType(ref.type)
strides, _ = ref_ty.get_strides_and_offset()
result_type = ir.MemRefType.get(
ref_ty.shape,
ref_ty.element_type,
ir.StridedLayoutAttr.get(0, strides),
ref_ty.memory_space,
)
world_team = arith.constant(i32, 0)
ptr = utils.memref_ptr(ref)
mc_ptr = llvm.call(
ptr.type, [world_team, ptr], [], [], callee="nvshmemx_mc_ptr",
)
return utils.MultimemRef(utils.ptr_as_memref(mc_ptr, result_type))
def device_id(self) -> ir.Value:
self._ensure_nvshmem_decls()
i32 = ir.IntegerType.get_signless(32)
return llvm.call(i32, [], [], [], callee="nvshmem_my_pe")
| LaunchContext |
python | walkccc__LeetCode | solutions/527. Word Abbreviation/527-2.py | {
"start": 47,
"end": 93
} | class ____:
word: str
index: int
| IndexedWord |
python | Farama-Foundation__Gymnasium | tests/test_core.py | {
"start": 3904,
"end": 4106
} | class ____(RewardWrapper):
"""Example reward wrapper for testing."""
def reward(self, reward: SupportsFloat) -> SupportsFloat:
"""Reward function."""
return 1
| ExampleRewardWrapper |
python | mlflow__mlflow | mlflow/cli/genai_eval_utils.py | {
"start": 417,
"end": 824
} | class ____:
"""
Structured assessment data for a trace evaluation.
"""
name: str | None
"""The name of the assessment"""
result: Any | None = None
"""The result value from the assessment"""
rationale: str | None = None
"""The rationale text explaining the assessment"""
error: str | None = None
"""Error message if the assessment failed"""
@dataclass
| Assessment |
python | huggingface__transformers | src/transformers/models/beit/modeling_beit.py | {
"start": 12800,
"end": 15409
} | class ____(BeitSelfAttention):
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
relative_position_bias: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
resolution: Optional[tuple[int]] = None,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
if output_attentions:
logger.warning_once(
f"{self.__class__.__name__} does not support `output_attentions=True`. The returned attention weights will "
"be `None`. If you want to get attention weights, please set `attn_implementation='eager'` when loading the model."
)
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
attn_bias = None
if self.has_relative_position_bias:
height, width = resolution
window_size = (height // self.config.patch_size, width // self.config.patch_size)
attn_bias = self.relative_position_bias(
window_size, interpolate_pos_encoding, dim_size=hidden_states.shape[1]
)
# Add shared relative position bias if provided.
if relative_position_bias is not None:
if attn_bias is None:
attn_bias = relative_position_bias
else:
attn_bias += relative_position_bias
scaling = 1 / math.sqrt(self.attention_head_size)
context_layer = torch.nn.functional.scaled_dot_product_attention(
query_layer,
key_layer,
value_layer,
attn_mask=attn_bias,
dropout_p=self.config.attention_probs_dropout_prob if self.training else 0.0,
is_causal=False,
scale=scaling,
)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, None
| BeitSdpaSelfAttention |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 2277,
"end": 2393
} | class ____(UnknownParent): # [undefined-variable]
def __init__(self) -> None:
print("Called")
| DerivedFrom |
python | django__django | tests/auth_tests/test_checks.py | {
"start": 14648,
"end": 14754
} | class ____(LoginRequiredMiddleware):
redirect_field_name = "redirect_to"
| LoginRequiredMiddlewareSubclass |
python | django__django | tests/utils_tests/test_autoreload.py | {
"start": 10801,
"end": 11412
} | class ____(SimpleTestCase):
def test_is_django_module(self):
for module, expected in ((zoneinfo, False), (sys, False), (autoreload, True)):
with self.subTest(module=module):
self.assertIs(autoreload.is_django_module(module), expected)
def test_is_django_path(self):
for module, expected in (
(zoneinfo.__file__, False),
(contextlib.__file__, False),
(autoreload.__file__, True),
):
with self.subTest(module=module):
self.assertIs(autoreload.is_django_path(module), expected)
| TestUtilities |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 60270,
"end": 61273
} | class ____(Response):
"""
Response of events.get_scalar_metrics_and_variants endpoint.
:param metrics:
:type metrics: dict
"""
_service = "events"
_action = "get_scalar_metrics_and_variants"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"metrics": {"additionalProperties": True, "type": ["object", "null"]}},
"type": "object",
}
def __init__(self, metrics: Optional[dict] = None, **kwargs: Any) -> None:
super(GetScalarMetricsAndVariantsResponse, self).__init__(**kwargs)
self.metrics = metrics
@schema_property("metrics")
def metrics(self) -> Optional[dict]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[dict]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (dict,))
self._property_metrics = value
| GetScalarMetricsAndVariantsResponse |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/exclusions.py | {
"start": 5726,
"end": 7846
} | class ____:
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.enabled_for_config, description)
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate], description
)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, str):
tokens = re.match(
r"([\+\w]+)\s*(?:(>=|==|!=|<=|<|>)\s*([\d\.]+))?", predicate
)
if not tokens:
raise ValueError(
"Couldn't locate DB name in predicate: %r" % predicate
)
db = tokens.group(1)
op = tokens.group(2)
spec = (
tuple(int(d) for d in tokens.group(3).split("."))
if tokens.group(3)
else None
)
return SpecPredicate(db, op, spec, description=description)
elif callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": (
config.db.url.get_driver_name() if config else "<no driver>"
),
"database": (
config.db.url.get_backend_name() if config else "<no database>"
),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support",
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
| Predicate |
python | pytorch__pytorch | torch/_higher_order_ops/base_hop.py | {
"start": 728,
"end": 8808
} | class ____(HigherOrderOperator, abc.ABC):
"""
This is the "Base" HOP implementation for a HOP that looks like:
call_subgraph_hop(subgraph, *operands, **kwargs)
That is:
1) the HOP stays alive until Inductor
2) the HOP's semantics are subgraph(*operands)
3) kwargs may be some config options but aren't passed directly to the subgraph.
To use this, please subclass this class and override methods as necessary:
```
class InvokeQuant(BaseHOP):
def __init__(self):
return super().__init__("invoke_quant")
invoke_quant = InvokeQuant()
def g(x):
return x.sin().cos()
@torch.compile(backend="aot_eager")
def f(x):
return invoke_quant(g, x, scheme="nf4")
```
NOTE: don't subclass BaseHOP out of tree! That is not allowed. All
usages must be in tree.
"""
def __init__(self, hop_name) -> None:
super().__init__(hop_name)
# Set up the registrations
# If you want to override any of these, override them in your subclass.
self.py_autograd_impl(self._call_Autograd)
self.py_functionalize_impl(self._call_Functionalize)
self.py_impl(ProxyTorchDispatchMode)(self._call_ProxyTorchDispatchMode)
self.py_impl(FakeTensorMode)(self._call_FakeTensorMode)
self.py_impl(DispatchKey.CompositeExplicitAutograd)(
self._call_CompositeExplicitAutograd
)
def __call__(self, subgraph, *operands, **kwargs):
if not isinstance(
subgraph,
(
torch.fx.GraphModule,
FunctionWithNoFreeVars,
FunctionalCallableWithEpilogue,
),
):
raise RuntimeError(
f"{self._name}: when calling this API without torch.compile, "
f"we require that the subgraph be a torch.fx.GraphModule (or "
f"a function we know doesn't have free variables)."
)
return super().__call__(subgraph, *operands, **kwargs)
def _call_Autograd(self, subgraph, *operands, **kwargs):
if isinstance(subgraph, torch.fx.GraphModule):
pass
# We assume the subgraph doesn't mutate inputs and there is no aliasing.
# In the PT2 stack, this is Dynamo's responsibility to figure out.
return BaseHOPFunction.apply(self, subgraph, kwargs, *operands)
def _call_CompositeExplicitAutograd(self, subgraph, *operands, **kwargs):
from torch.utils._python_dispatch import _get_current_dispatch_mode
mode = _get_current_dispatch_mode()
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
return subgraph(*operands)
def _call_ProxyTorchDispatchMode(self, proxy_mode, subgraph, *operands, **kwargs):
traced_graph = reenter_make_fx(subgraph)(*operands)
assert isinstance(proxy_mode.tracer, torch.fx.Tracer)
qualname = proxy_mode.tracer.get_fresh_qualname("subgraph")
proxy_mode.tracer.root.register_module(qualname, traced_graph)
node_args = (traced_graph, *operands)
proxy_args = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, node_args) # type: ignore[attr-defined]
proxy_kwargs = pytree.tree_map(proxy_mode.tracer.unwrap_proxy, kwargs) # type: ignore[attr-defined]
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", self, proxy_args, proxy_kwargs
)
out = self(subgraph, *operands, **kwargs)
return track_tensor_tree(
out,
out_proxy,
constant=None,
tracer=proxy_mode.tracer, # type: ignore[arg-type]
)
def _call_FakeTensorMode(self, mode, subgraph, *operands, **kwargs):
# TODO: this should probably route through FakeTensorMode to reuse caching
with mode:
return subgraph(*operands)
# NOTE [Support input mutation of hops]
# To support input mutation, hop's subgraph must be functionalized because many inductor passes are
# applied to subgraph recursively and only work on functional graph. However, we could inline an
# epilogue graph (i.e. the copy_) into the subgraph because this is how input mutation
# is implemented in the top-level graph when no hop is presented. All passes must have been and will be
# aware of the epilogue graph.
#
# Since we've supported input mutation for custom op with auto_functionalized, we share the infra for hops
# The plan is:
# 1. In hop's Functionalization key, it calls do_auto_functionalize_v2 if subgraph mutates input
# 2. In do_auto_functionalize_v2:
# a. we functionalize the callables in hop's argument. This is to make the subgraphs functional so we
# could recursively run passes on them. Also the epilogue graph is inlined at the end.
# b. we call auto_functionalized_v2 and pass in an additional schema in order to properly invoke
# the hop with normalized kwargs.
# 3. In inductor, we decompose the auto_functionalized hop by callilng into the dense implementation, which
# copies the mutated inputs to the hop if necessary and call the hop.
# After these steps, the rest of the inductor stack knows how to fuse the copy_ in subgraph with other ops.
def _call_Functionalize(self, ctx, subgraph, *operands, **kwargs):
from torch._higher_order_ops.auto_functionalize import (
can_auto_functionalize,
do_auto_functionalize_v2,
)
# invoke_quant has non-proxable argument of type InvokeQuant that
# we cannot generate schema for.
if self is not torch.ops.higher_order.invoke_quant_packed:
hop_instance = HopInstance.create(self, subgraph, *operands, **kwargs)
if can_auto_functionalize(hop_instance):
return do_auto_functionalize_v2(
ctx.mode, hop_instance, (subgraph, *operands), kwargs
)
unwrapped_operands = ctx.unwrap_tensors(operands)
with ctx.redispatch_to_next():
# We assume the subgraph doesn't mutate inputs and there is no aliasing.
# In the PT2 stack, this is Dynamo's responsibility to figure out.
functionalized_subgraph = FunctionWithNoFreeVars(
ctx.functionalize(subgraph)
)
out = self(functionalized_subgraph, *unwrapped_operands, **kwargs)
return ctx.wrap_tensors(out)
# pyrefly: ignore [bad-override]
def gen_schema(self, subgraph, *operands, **kwargs):
from .schema import HopSchemaGenerator
subgraph = materialize_as_graph(subgraph, operands)
(
inp_inp_alias,
inp_out_alias,
out_out_alias,
mutated_inp_idx,
output,
) = check_input_alias_and_mutation_return_outputs(subgraph)
if not (
len(inp_inp_alias) == 0
and len(inp_out_alias) == 0
and len(out_out_alias) == 0
):
# TODO: turn this into an error.
# test_foreach_map_backward_binary_foreach_map_addrecip_op fails the alias test.
import warnings
warnings.warn(
"Aliasing is not supported for HOP subgraph.\n"
f"{subgraph.print_readable(print_output=False)}\n"
f"Alias info: inp-inp alias: {inp_inp_alias}, inp-out alias: {inp_out_alias}, out-out alias{out_out_alias}"
f"This may lead to silent incorrectness.",
stacklevel=2,
)
schema_gen = HopSchemaGenerator(self)
schema_gen.add_arg("subgraph", subgraph)
for idx, arg in enumerate(operands):
schema_gen.add_arg(f"arg{idx}", arg, is_mutated=idx in mutated_inp_idx)
for name, arg in kwargs.items():
schema_gen.add_arg(name, arg, default_value=arg, kw_only=True)
for out in output:
schema_gen.add_output(out)
return schema_gen.gen_schema()
| BaseHOP |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 20296,
"end": 22370
} | class ____(unittest.TestCase):
"""Tests person in the en_US locale"""
def setUp(self):
self.fake = Faker("en_US")
Faker.seed(0)
def test_first_names(self):
# General first name
name = self.fake.first_name()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
# Female first name
name = self.fake.first_name_female()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
assert name in EnUSProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
assert name in EnUSProvider.first_names_male
# Nonbinary first name
name = self.fake.first_name_nonbinary()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
assert name in EnUSProvider.first_names_nonbinary
def test_last_names(self):
# General last name
name = self.fake.last_name()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
# Female last name
name = self.fake.last_name_female()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
# Male last name
name = self.fake.last_name_male()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
# Nonbinary last name
name = self.fake.last_name_nonbinary()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
def test_prefix(self):
# Nonbinary prefix
prefix = self.fake.prefix_nonbinary()
self.assertIsInstance(prefix, str)
assert prefix in EnUSProvider.prefixes_nonbinary
def test_suffix(self):
# Nonbinary suffix
suffix = self.fake.suffix_nonbinary()
self.assertIsInstance(suffix, str)
assert suffix in EnUSProvider.suffixes_nonbinary
| TestEnUS |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 50930,
"end": 53861
} | class ____(DashboardComponent):
"""Bar chart showing time spend in action by key prefix"""
@log_errors
def __init__(self, scheduler, **kwargs):
self.last = 0
self.scheduler = scheduler
if TaskStreamPlugin.name not in self.scheduler.plugins:
self.scheduler.add_plugin(TaskStreamPlugin(self.scheduler))
action_data = {
"times": [0.2, 0.1],
"formatted_time": ["0.2 ms", "2.8 us"],
"color": [ts_color_lookup["transfer"], ts_color_lookup["compute"]],
"names": ["transfer", "compute"],
}
self.action_source = ColumnDataSource(data=action_data)
self.root = figure(
title="Aggregate Per Action",
tools="",
name="aggregate_per_action",
x_range=["a", "b"],
**kwargs,
)
rect = self.root.vbar(
source=self.action_source,
x="names",
top="times",
width=0.7,
color="color",
)
self.root.y_range.start = 0
self.root.yaxis[0].formatter = NumeralTickFormatter(format="0")
self.root.yaxis.axis_label = "Time (s)"
self.root.yaxis.ticker = AdaptiveTicker(**TICKS_1024)
self.root.xaxis.major_label_orientation = XLABEL_ORIENTATION
self.root.xaxis.major_label_text_font_size = "16px"
rect.nonselection_glyph = None
self.root.xaxis.minor_tick_line_alpha = 0
self.root.xgrid.visible = False
self.root.toolbar_location = None
hover = HoverTool()
hover.tooltips = """
<div>
<p><b>Name:</b> @names</p>
<p><b>Time:</b> @formatted_time</p>
</div>
"""
hover.point_policy = "follow_mouse"
self.root.add_tools(hover)
@without_property_validation
@log_errors
def update(self):
agg_times = defaultdict(float)
for ts in self.scheduler.task_prefixes.values():
for action, t in ts.all_durations.items():
agg_times[action] += t
# order by largest time first
agg_times = sorted(agg_times.items(), key=lambda x: x[1], reverse=True)
agg_colors = list()
agg_names = list()
agg_time = list()
for action, t in agg_times:
agg_names.append(action)
if action == "compute":
agg_colors.append("purple")
else:
agg_colors.append(ts_color_lookup[action])
agg_time.append(t)
self.root.x_range.factors = agg_names
self.root.title.text = "Aggregate Time Per Action"
action_result = dict(
times=agg_time,
color=agg_colors,
names=agg_names,
formatted_time=[format_time(t) for t in agg_time],
)
update(self.action_source, action_result)
| AggregateAction |
python | django__django | tests/admin_views/models.py | {
"start": 28290,
"end": 28572
} | class ____(models.Model):
title = models.CharField(max_length=100)
next_box = models.ForeignKey(
"self", null=True, on_delete=models.SET_NULL, blank=True
)
next_box = models.ForeignKey(
"self", null=True, on_delete=models.SET_NULL, blank=True
)
| Box |
python | huggingface__transformers | src/transformers/models/gpt2/modeling_gpt2.py | {
"start": 56032,
"end": 60080
} | class ____(GPT2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = GPT2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1).to(start_logits.device)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1).to(end_logits.device)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"GPT2DoubleHeadsModel",
"GPT2ForQuestionAnswering",
"GPT2ForSequenceClassification",
"GPT2ForTokenClassification",
"GPT2LMHeadModel",
"GPT2Model",
"GPT2PreTrainedModel",
]
| GPT2ForQuestionAnswering |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/stateless_random_ops_test.py | {
"start": 9544,
"end": 25042
} | class ____(test.TestCase, parameterized.TestCase):
def _test_match(self, case, seed):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
key = 0x3ec8f720, 0x02461e29
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
with ops.device(get_device().name):
_, stateless_op, stateful_op = case
random_seed.set_random_seed(seed[0])
stateful = stateful_op(seed=seed[1])
pure = stateless_op(seed=preseed)
self.assertAllEqual(stateful, pure)
def _test_match_stateless_cpu_gpu(self, case, seed):
# Stateless ops should produce the same result on CPUs and GPUs.
_, stateless_op, _ = case
with ops.device('CPU'):
result_cpu = stateless_op(seed=seed)
with ops.device(get_device().name):
result_gpu = stateless_op(seed=seed)
self.assertAllClose(result_cpu, result_gpu)
def _test_old_and_new_stateless_match(self, case, seed):
"""Tests that the new stateless ops match the old stateless ones."""
with ops.device(get_device().name):
_, stateless_op, _ = case
with compat.forward_compatibility_horizon(*BEFORE_EXPIRE):
old = stateless_op(seed=seed)
with compat.forward_compatibility_horizon(*AFTER_EXPIRE):
new = stateless_op(seed=seed)
self.assertAllClose(old, new)
def _test_explicit_alg(self, case, seed):
"""Tests that alg=philox and alg=None are the same (on CPU/GPU)."""
with ops.device(get_device().name):
_, stateless_op, _ = case
implicit_alg = stateless_op(seed=seed)
# All device types allowed in this test will result in Philox
explicit_alg = stateless_op(seed=seed, alg='philox')
self.assertAllClose(implicit_alg, explicit_alg)
def _test_determinism(self, case, seed_type):
# Stateless values should be equal iff the seeds are equal (roughly)
seeds = [(x, y) for x in range(5) for y in range(5)] * 3 # pylint: disable=g-complex-comprehension
with self.test_session(), ops.device(get_device().name):
_, stateless_op, _ = case
if context.executing_eagerly():
values = [
(seed, stateless_op(seed=constant_op.constant(seed, seed_type)))
for seed in seeds]
else:
# Have this branch because the above branch is too slow in graph
# mode
seed_t = array_ops.placeholder(seed_type, shape=[2])
pure = stateless_op(seed=seed_t)
values = [
(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds
]
for s0, v0 in values:
for s1, v1 in values:
if dtypes.as_dtype(v0.dtype) != dtypes.bfloat16:
self.assertEqual(s0 == s1, np.all(v0 == v1))
elif s0 == s1:
# Skip the s0 != s1 case because v0 and v1 can be either equal or
# unequal in that case due to bfloat16's low precision
self.assertAllEqual(v0, v1)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchFloat(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Skip on XLA because XLA kernels do not support int64 '
'seeds needed by this test.')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(int_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchInt(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Skip on XLA because XLA kernels do not support int64 '
'seeds needed by this test.')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(multinomial_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchMultinomial(self, case, seed):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchGamma(self, case, seed):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testStatelessGammaCpuGpuMatch(self, case, seed):
if get_device().device_type != 'GPU':
# This test compares the numbers produced by the CPU and GPU kernel for
# stateless_random_gamma.
self.skipTest('This test requires GPU')
self._test_match_stateless_cpu_gpu(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(poisson_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testMatchPoisson(self, case, seed):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension,undefined-variable
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(shuffle_cases()))
def testMatchShuffle(self, case, seed):
if get_device().device_type == 'GPU':
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
self.skipTest('Lacking XLA kernel')
self._test_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testOldAndNewStatelessMatchFloat(self, case, seed):
self._test_old_and_new_stateless_match(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], case_id, seed_id), case, seed) # pylint: disable=g-complex-comprehension
for seed_id, seed in enumerate(SEEDS)
for case_id, case in enumerate(
int_cases(minval_maxval=((2, 11111), (None, None)))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testOldAndNewStatelessMatchInt(self, case, seed):
self._test_old_and_new_stateless_match(case, seed)
@parameterized.named_parameters(
('_%s_%s' % (case[0], case_id), case)
for case_id, case in enumerate(float_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testExplicitAlgFloat(self, case):
seed = (7, 17)
self._test_explicit_alg(case, seed)
@parameterized.named_parameters(
('_%s_%s' % (case[0], case_id), case)
for case_id, case in enumerate(
int_cases(minval_maxval=((2, 11111), (None, None)))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testExplicitAlgInt(self, case):
seed = (7, 17)
self._test_explicit_alg(case, seed)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(
float_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismFloat(self, case, seed_type):
if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',
'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest(
'Skip on XLA because XLA kernels do not support int64 seeds.')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(
int_cases(shape_dtypes=(dtypes.int32, dtypes.int64))))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismInt(self, case, seed_type):
if seed_type == dtypes.int64 and get_device().device_type in ('XLA_GPU',
'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest(
'Skip on XLA because XLA kernels do not support int64 seeds.')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(multinomial_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismMultinomial(self, case, seed_type):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(gamma_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismGamma(self, case, seed_type):
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@parameterized.named_parameters(
('_%s_%s_%s' % (case[0], seed_type.name, case_id), case, seed_type) # pylint: disable=g-complex-comprehension
for seed_type in SEED_TYPES
for case_id, case in enumerate(poisson_cases()))
@test_util.disable_tfrt('tensorflow::DirectSession::Run crashes. b/156187396')
def testDeterminismPoisson(self, case, seed_type):
if get_device().device_type == 'GPU':
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking GPU kernel')
if get_device().device_type in ('XLA_GPU', 'XLA_CPU'):
# This test was passing before because soft placement silently picked the
# CPU kernels.
self.skipTest('Lacking XLA kernel')
self._test_determinism(case, seed_type)
@test_util.run_v2_only
def testGetKeyCounterAlg(self):
seed = [1, 2]
key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(
seed)
self.assertAllEqual(key.shape, [1])
self.assertAllEqual(counter.shape, [2])
alg = gen_stateless_random_ops_v2.stateless_random_get_alg()
self.assertAllEqual(alg.shape, [])
def assertDTypeEqual(self, a, b):
self.assertEqual(dtypes.as_dtype(a), dtypes.as_dtype(b))
def assertNoEqualPair(self, ls):
for i in range(len(ls)):
for j in range(i + 1, len(ls)):
self.assertFalse(math_ops.reduce_all(ls[i] == ls[j]))
@parameterized.parameters(['int32', 'int64'])
@test_util.run_v2_only
def testSplit(self, dtype):
"""Test for `split`."""
seed = constant_op.constant([1, 2], dtype=dtype)
new_seed = stateless.split(seed, 3)
self.assertEqual(new_seed.shape, [3, 2])
self.assertDTypeEqual(new_seed.dtype, dtype)
self.assertNoEqualPair([seed] + array_ops_stack.unstack(new_seed))
@parameterized.parameters(['int32', 'int64'])
@test_util.run_v2_only
def testFoldIn(self, dtype):
"""Test for `fold_in`."""
orig_seed = constant_op.constant([1, 2], dtype='int32')
seed = stateless.fold_in(orig_seed, constant_op.constant(3, dtype=dtype))
new_seeds = []
new_seeds.append(seed)
seed = stateless.fold_in(seed, constant_op.constant(4, dtype=dtype))
new_seeds.append(seed)
for s in new_seeds:
self.assertEqual(s.shape, [2])
self.assertDTypeEqual(s.dtype, dtype)
self.assertNoEqualPair([math_ops.cast(orig_seed, dtype)] + new_seeds)
@test_util.run_v2_only
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
with self.assertRaisesWithPredicateMatch(
ValueError,
'minval must be a scalar; got a tensor of shape '):
@def_function.function
def f():
stateless.stateless_random_uniform(
shape=shape, seed=[1, 2], minval=array_ops.zeros(shape, 'int32'),
maxval=100, dtype='int32')
f()
with self.assertRaisesWithPredicateMatch(
ValueError,
'maxval must be a scalar; got a tensor of shape '):
@def_function.function
def f2():
stateless.stateless_random_uniform(
shape=shape, seed=[1, 2], minval=0,
maxval=array_ops.ones(shape, 'int32') * 100,
dtype='int32')
f2()
if __name__ == '__main__':
config.set_soft_device_placement(False)
context.context().enable_xla_devices()
test.main()
| StatelessOpsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 38342,
"end": 38504
} | class ____(_CreateDropBase["Column[Any]"]):
"""Represent a COMMENT ON COLUMN IS NULL statement."""
__visit_name__ = "drop_column_comment"
| DropColumnComment |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/base.py | {
"start": 32714,
"end": 33342
} | class ____(CheckboxList[str]):
"""Backward compatibility util: creates a 1-sized CheckboxList
:param text: the text
"""
show_scrollbar = False
def __init__(self, text: AnyFormattedText = "", checked: bool = False) -> None:
values = [("value", text)]
super().__init__(values=values)
self.checked = checked
@property
def checked(self) -> bool:
return "value" in self.current_values
@checked.setter
def checked(self, value: bool) -> None:
if value:
self.current_values = ["value"]
else:
self.current_values = []
| Checkbox |
python | sphinx-doc__sphinx | sphinx/_cli/__init__.py | {
"start": 2203,
"end": 9930
} | class ____(argparse.ArgumentParser):
def format_help(self) -> str:
help_fragments: list[str] = [
bold(underline(__('Usage:'))),
' ',
__('{0} [OPTIONS] <COMMAND> [<ARGS>]').format(bold(self.prog)),
'\n',
'\n',
__(' The Sphinx documentation generator.'),
'\n',
]
if commands := list(_load_subcommand_descriptions()):
command_lengths = map(len, next(zip(*commands, strict=True), ()))
command_max_length = min(max(command_lengths), 22)
help_fragments += [
'\n',
bold(underline(__('Commands:'))),
'\n',
]
help_fragments += [
f' {command_name: <{command_max_length}} {command_desc}'
for command_name, command_desc in commands
]
help_fragments.append('\n')
# self._action_groups[1] is self._optionals
# Uppercase the title of the Optionals group
self._optionals.title = __('Options')
for argument_group in self._action_groups[1:]:
if arguments := [
action
for action in argument_group._group_actions
if action.help != argparse.SUPPRESS
]:
help_fragments += self._format_optional_arguments(
arguments,
argument_group.title or '',
)
help_fragments += [
'\n',
__(
'For more information, visit https://www.sphinx-doc.org/en/master/man/.'
),
'\n',
]
return ''.join(help_fragments)
def _format_optional_arguments(
self,
actions: Iterable[argparse.Action],
title: str,
) -> Iterator[str]:
yield '\n'
yield bold(underline(title + ':'))
yield '\n'
for action in actions:
prefix = ' ' * all(o[1] == '-' for o in action.option_strings)
opt = prefix + ' ' + ', '.join(map(bold, action.option_strings))
if action.nargs != 0:
opt += ' ' + self._format_metavar(
action.nargs, action.metavar, action.choices, action.dest
)
yield opt
yield '\n'
if action_help := (action.help or '').strip():
yield from (f' {line}\n' for line in action_help.splitlines())
@staticmethod
def _format_metavar(
nargs: int | str | None,
metavar: str | tuple[str, ...] | None,
choices: Iterable[str] | None,
dest: str,
) -> str:
if metavar is None:
if choices is not None:
metavar = '{' + ', '.join(sorted(choices)) + '}'
else:
metavar = dest.upper()
if nargs is None:
return f'{metavar}'
elif nargs == argparse.OPTIONAL:
return f'[{metavar}]'
elif nargs == argparse.ZERO_OR_MORE:
if len(metavar) == 2:
return f'[{metavar[0]} [{metavar[1]} ...]]'
else:
return f'[{metavar} ...]'
elif nargs == argparse.ONE_OR_MORE:
return f'{metavar} [{metavar} ...]'
elif nargs == argparse.REMAINDER:
return '...'
elif nargs == argparse.PARSER:
return f'{metavar} ...'
msg = 'invalid nargs value'
raise ValueError(msg)
def error(self, message: str) -> NoReturn:
msg = __("{0}: error: {1}\nRun '{0} --help' for information")
sys.stderr.write(msg.format(self.prog, message))
raise SystemExit(2)
def _create_parser() -> _RootArgumentParser:
parser = _RootArgumentParser(
prog='sphinx',
description=__(' Manage documentation with Sphinx.'),
epilog=__(
'For more information, visit https://www.sphinx-doc.org/en/master/man/.'
),
add_help=False,
allow_abbrev=False,
)
parser.add_argument(
'-V',
'--version',
action='store_true',
default=argparse.SUPPRESS,
help=__('Show the version and exit.'),
)
parser.add_argument(
'-h',
'-?',
'--help',
action='store_true',
default=argparse.SUPPRESS,
help=__('Show this message and exit.'),
)
# logging control
log_control = parser.add_argument_group(__('Logging'))
log_control.add_argument(
'-v',
'--verbose',
action='count',
dest='verbosity',
default=0,
help=__('Increase verbosity (can be repeated)'),
)
log_control.add_argument(
'-q',
'--quiet',
action='store_const',
dest='verbosity',
const=-1,
help=__('Only print errors and warnings.'),
)
log_control.add_argument(
'--silent',
action='store_const',
dest='verbosity',
const=-2,
help=__('No output at all'),
)
parser.add_argument(
'COMMAND',
nargs=argparse.REMAINDER,
metavar=__('<command>'),
)
return parser
def _parse_command(argv: Sequence[str] = ()) -> tuple[str, Sequence[str]]:
parser = _create_parser()
args = parser.parse_args(argv)
command_name, *command_argv = args.COMMAND or ('help',)
command_name = command_name.lower()
if terminal_supports_colour():
enable_colour()
else:
disable_colour()
# Handle '--version' or '-V' passed to the main command or any subcommand
if 'version' in args or {'-V', '--version'}.intersection(command_argv):
from sphinx import __display_version__
sys.stderr.write(f'sphinx {__display_version__}\n')
raise SystemExit(0)
# Handle '--help' or '-h' passed to the main command (subcommands may have
# their own help text)
if 'help' in args or command_name == 'help':
sys.stderr.write(parser.format_help())
raise SystemExit(0)
if command_name not in _COMMANDS:
sys.stderr.write(
__(
f'sphinx: {command_name!r} is not a sphinx command. '
"See 'sphinx --help'.\n"
)
)
raise SystemExit(2)
return command_name, command_argv
def _load_subcommand(command_name: str) -> tuple[str, _PARSER_SETUP, _RUNNER]:
try:
module: _SubcommandModule = importlib.import_module(_COMMANDS[command_name])
except KeyError:
msg = f'invalid command name {command_name!r}.'
raise ValueError(msg) from None
return module.parser_description, module.set_up_parser, module.run
def _create_sub_parser(
command_name: str,
description: str,
parser_setup: _PARSER_SETUP,
) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog=f'sphinx {command_name}',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
allow_abbrev=False,
)
return parser_setup(parser)
def run(argv: Sequence[str] = (), /) -> int:
locale.setlocale(locale.LC_ALL, '')
init_console()
argv = argv or sys.argv[1:]
try:
cmd_name, cmd_argv = _parse_command(argv)
cmd_description, set_up_parser, runner = _load_subcommand(cmd_name)
cmd_parser = _create_sub_parser(cmd_name, cmd_description, set_up_parser)
cmd_args = cmd_parser.parse_args(cmd_argv)
return runner(cmd_args)
except SystemExit as exc:
return exc.code # type: ignore[return-value]
except (Exception, KeyboardInterrupt):
return 2
if __name__ == '__main__':
raise SystemExit(run())
| _RootArgumentParser |
python | pytorch__pytorch | test/test_ops.py | {
"start": 99943,
"end": 100491
} | class ____(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if isinstance(args[0], torch.Tensor):
old_size = args[0].size()
old_stride = args[0].stride()
rs = func(*args, **kwargs)
check_inplace_view(func, args[0], rs, old_size, old_stride)
else:
rs = func(*args, **kwargs)
return rs
# Test to verify the correctness for tags in `tags.yaml`, also available for access through `torch.Tags`
@unMarkDynamoStrictTest
| _TestTagsMode |
python | doocs__leetcode | solution/0400-0499/0455.Assign Cookies/Solution.py | {
"start": 0,
"end": 331
} | class ____:
def findContentChildren(self, g: List[int], s: List[int]) -> int:
g.sort()
s.sort()
j = 0
for i, x in enumerate(g):
while j < len(s) and s[j] < g[i]:
j += 1
if j >= len(s):
return i
j += 1
return len(g)
| Solution |
python | django__django | tests/staticfiles_tests/storage.py | {
"start": 2443,
"end": 2561
} | class ____(ManifestStaticFilesStorage):
def file_hash(self, name, content=None):
return None
| NoneHashStorage |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 84470,
"end": 84802
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
AutoModelForTableQuestionAnswering = auto_class_update(
AutoModelForTableQuestionAnswering,
head_doc="table question answering",
checkpoint_for_example="google/tapas-base-finetuned-wtq",
)
| AutoModelForTableQuestionAnswering |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_doc_integrations.py | {
"start": 519,
"end": 1416
} | class ____(APITestCase):
endpoint = "sentry-api-0-doc-integrations"
def setUp(self) -> None:
self.user = self.create_user(email="jinx@lol.com")
self.superuser = self.create_user(email="vi@lol.com", is_superuser=True)
self.staff_user = self.create_user(is_staff=True)
self.doc_1 = self.create_doc_integration(name="test_1", is_draft=False, has_avatar=True)
self.doc_2 = self.create_doc_integration(name="test_2", is_draft=True, has_avatar=True)
self.doc_3 = self.create_doc_integration(
name="test_3",
is_draft=False,
metadata={"resources": [{"title": "Documentation", "url": "https://docs.sentry.io/"}]},
features=[2, 3, 4],
)
def get_avatars(self, response: Response) -> list[Any]:
return [doc.get("avatar") for doc in response.data]
@control_silo_test
| DocIntegrationsTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/reduce_join_op_test.py | {
"start": 2089,
"end": 2333
} | class ____(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
| UnicodeTestCase |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vision.py | {
"start": 18692,
"end": 19318
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectImageLabelsOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.label_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=DEFAULT, timeout=None, additional_properties=None
)
| TestCloudVisionDetectImageLabelsOperator |
python | celery__celery | celery/canvas.py | {
"start": 7653,
"end": 35886
} | class ____(dict):
"""Task Signature.
Class that wraps the arguments and execution options
for a single task invocation.
Used as the parts in a :class:`group` and other constructs,
or to pass tasks around as callbacks while being compatible
with serializers with a strict type subset.
Signatures can also be created from tasks:
- Using the ``.signature()`` method that has the same signature
as ``Task.apply_async``:
.. code-block:: pycon
>>> add.signature(args=(1,), kwargs={'kw': 2}, options={})
- or the ``.s()`` shortcut that works for star arguments:
.. code-block:: pycon
>>> add.s(1, kw=2)
- the ``.s()`` shortcut does not allow you to specify execution options
but there's a chaining `.set` method that returns the signature:
.. code-block:: pycon
>>> add.s(2, 2).set(countdown=10).set(expires=30).delay()
Note:
You should use :func:`~celery.signature` to create new signatures.
The ``Signature`` class is the type returned by that function and
should be used for ``isinstance`` checks for signatures.
See Also:
:ref:`guide-canvas` for the complete guide.
Arguments:
task (Union[Type[celery.app.task.Task], str]): Either a task
class/instance, or the name of a task.
args (Tuple): Positional arguments to apply.
kwargs (Dict): Keyword arguments to apply.
options (Dict): Additional options to :meth:`Task.apply_async`.
Note:
If the first argument is a :class:`dict`, the other
arguments will be ignored and the values in the dict will be used
instead::
>>> s = signature('tasks.add', args=(2, 2))
>>> signature(s)
{'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}
"""
TYPES = {}
_app = _type = None
# The following fields must not be changed during freezing/merging because
# to do so would disrupt completion of parent tasks
_IMMUTABLE_OPTIONS = {"group_id", "stamped_headers"}
@classmethod
def register_type(cls, name=None):
"""Register a new type of signature.
Used as a class decorator, for example:
>>> @Signature.register_type()
>>> class mysig(Signature):
>>> pass
"""
def _inner(subclass):
cls.TYPES[name or subclass.__name__] = subclass
return subclass
return _inner
@classmethod
def from_dict(cls, d, app=None):
"""Create a new signature from a dict.
Subclasses can override this method to customize how are
they created from a dict.
"""
typ = d.get('subtask_type')
if typ:
target_cls = cls.TYPES[typ]
if target_cls is not cls:
return target_cls.from_dict(d, app=app)
return Signature(d, app=app)
def __init__(self, task=None, args=None, kwargs=None, options=None,
type=None, subtask_type=None, immutable=False,
app=None, **ex):
self._app = app
if isinstance(task, dict):
super().__init__(task) # works like dict(d)
else:
# Also supports using task class/instance instead of string name.
try:
task_name = task.name
except AttributeError:
task_name = task
else:
self._type = task
super().__init__(
task=task_name, args=tuple(args or ()),
kwargs=kwargs or {},
options=dict(options or {}, **ex),
subtask_type=subtask_type,
immutable=immutable,
)
def __call__(self, *partial_args, **partial_kwargs):
"""Call the task directly (in the current process)."""
args, kwargs, _ = self._merge(partial_args, partial_kwargs, None)
return self.type(*args, **kwargs)
def delay(self, *partial_args, **partial_kwargs):
"""Shortcut to :meth:`apply_async` using star arguments."""
return self.apply_async(partial_args, partial_kwargs)
def apply(self, args=None, kwargs=None, **options):
"""Call task locally.
Same as :meth:`apply_async` but executed the task inline instead
of sending a task message.
"""
args = args if args else ()
kwargs = kwargs if kwargs else {}
# Extra options set to None are dismissed
options = {k: v for k, v in options.items() if v is not None}
# For callbacks: extra args are prepended to the stored args.
args, kwargs, options = self._merge(args, kwargs, options)
return self.type.apply(args, kwargs, **options)
def apply_async(self, args=None, kwargs=None, route_name=None, **options):
"""Apply this task asynchronously.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged
with existing options.
Returns:
~@AsyncResult: promise of future evaluation.
See also:
:meth:`~@Task.apply_async` and the :ref:`guide-calling` guide.
"""
args = args if args else ()
kwargs = kwargs if kwargs else {}
# Extra options set to None are dismissed
options = {k: v for k, v in options.items() if v is not None}
try:
_apply = self._apply_async
except IndexError: # pragma: no cover
# no tasks for chain, etc to find type
return
# For callbacks: extra args are prepended to the stored args.
if args or kwargs or options:
args, kwargs, options = self._merge(args, kwargs, options)
else:
args, kwargs, options = self.args, self.kwargs, self.options
# pylint: disable=too-many-function-args
# Works on this, as it's a property
return _apply(args, kwargs, **options)
def _merge(self, args=None, kwargs=None, options=None, force=False):
"""Merge partial args/kwargs/options with existing ones.
If the signature is immutable and ``force`` is False, the existing
args/kwargs will be returned as-is and only the options will be merged.
Stamped headers are considered immutable and will not be merged regardless.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with existing options.
force (bool): If True, the args/kwargs will be merged even if the signature is
immutable. The stamped headers are not affected by this option and will not
be merged regardless.
Returns:
Tuple: (args, kwargs, options)
"""
args = args if args else ()
kwargs = kwargs if kwargs else {}
if options is not None:
# We build a new options dictionary where values in `options`
# override values in `self.options` except for keys which are
# noted as being immutable (unrelated to signature immutability)
# implying that allowing their value to change would stall tasks
immutable_options = self._IMMUTABLE_OPTIONS
if "stamped_headers" in self.options:
immutable_options = self._IMMUTABLE_OPTIONS.union(set(self.options.get("stamped_headers", [])))
# merge self.options with options without overriding stamped headers from self.options
new_options = {**self.options, **{
k: v for k, v in options.items()
if k not in immutable_options or k not in self.options
}}
else:
new_options = self.options
if self.immutable and not force:
return (self.args, self.kwargs, new_options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
new_options)
def clone(self, args=None, kwargs=None, **opts):
"""Create a copy of this signature.
Arguments:
args (Tuple): Partial args to be prepended to the existing args.
kwargs (Dict): Partial kwargs to be merged with existing kwargs.
options (Dict): Partial options to be merged with
existing options.
"""
args = args if args else ()
kwargs = kwargs if kwargs else {}
# need to deepcopy options so origins links etc. is not modified.
if args or kwargs or opts:
args, kwargs, opts = self._merge(args, kwargs, opts)
else:
args, kwargs, opts = self.args, self.kwargs, self.options
signature = Signature.from_dict({'task': self.task,
'args': tuple(args),
'kwargs': kwargs,
'options': deepcopy(opts),
'subtask_type': self.subtask_type,
'immutable': self.immutable},
app=self._app)
signature._type = self._type
return signature
partial = clone
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None, group_index=None):
"""Finalize the signature by adding a concrete task id.
The task won't be called and you shouldn't call the signature
twice after freezing it as that'll result in two task messages
using the same task id.
The arguments are used to override the signature's headers during
freezing.
Arguments:
_id (str): Task id to use if it didn't already have one.
New UUID is generated if not provided.
group_id (str): Group id to use if it didn't already have one.
chord (Signature): Chord body when freezing a chord header.
root_id (str): Root id to use.
parent_id (str): Parent id to use.
group_index (int): Group index to use.
Returns:
~@AsyncResult: promise of future evaluation.
"""
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
opts = self.options
try:
# if there is already an id for this task, return it
tid = opts['task_id']
except KeyError:
# otherwise, use the _id sent to this function, falling back on a generated UUID
tid = opts['task_id'] = _id or uuid()
if root_id:
opts['root_id'] = root_id
if parent_id:
opts['parent_id'] = parent_id
if 'reply_to' not in opts:
# fall back on unique ID for this thread in the app
opts['reply_to'] = self.app.thread_oid
if group_id and "group_id" not in opts:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
if group_index is not None:
opts['group_index'] = group_index
# pylint: disable=too-many-function-args
# Works on this, as it's a property.
return self.AsyncResult(tid)
_freeze = freeze
def replace(self, args=None, kwargs=None, options=None):
"""Replace the args, kwargs or options set for this signature.
These are only replaced if the argument for the section is
not :const:`None`.
"""
signature = self.clone()
if args is not None:
signature.args = args
if kwargs is not None:
signature.kwargs = kwargs
if options is not None:
signature.options = options
return signature
def set(self, immutable=None, **options):
"""Set arbitrary execution options (same as ``.options.update(…)``).
Returns:
Signature: This is a chaining method call
(i.e., it will return ``self``).
"""
if immutable is not None:
self.set_immutable(immutable)
self.options.update(options)
return self
def set_immutable(self, immutable):
self.immutable = immutable
def _stamp_headers(self, visitor_headers=None, append_stamps=False, self_headers=True, **headers):
"""Collect all stamps from visitor, headers and self,
and return an idempotent dictionary of stamps.
.. versionadded:: 5.3
Arguments:
visitor_headers (Dict): Stamps from a visitor method.
append_stamps (bool):
If True, duplicated stamps will be appended to a list.
If False, duplicated stamps will be replaced by the last stamp.
self_headers (bool):
If True, stamps from self.options will be added.
If False, stamps from self.options will be ignored.
headers (Dict): Stamps that should be added to headers.
Returns:
Dict: Merged stamps.
"""
# Use append_stamps=False to prioritize visitor_headers over headers in case of duplicated stamps.
# This will lose duplicated headers from the headers argument, but that is the best effort solution
# to avoid implicitly casting the duplicated stamp into a list of both stamps from headers and
# visitor_headers of the same key.
# Example:
# headers = {"foo": "bar1"}
# visitor_headers = {"foo": "bar2"}
# _merge_dictionaries(headers, visitor_headers, aggregate_duplicates=True)
# headers["foo"] == ["bar1", "bar2"] -> The stamp is now a list
# _merge_dictionaries(headers, visitor_headers, aggregate_duplicates=False)
# headers["foo"] == "bar2" -> "bar1" is lost, but the stamp is according to the visitor
headers = headers.copy()
if "stamped_headers" not in headers:
headers["stamped_headers"] = list(headers.keys())
# Merge headers with visitor headers
if visitor_headers is not None:
visitor_headers = visitor_headers or {}
if "stamped_headers" not in visitor_headers:
visitor_headers["stamped_headers"] = list(visitor_headers.keys())
# Sync from visitor
_merge_dictionaries(headers, visitor_headers, aggregate_duplicates=append_stamps)
headers["stamped_headers"] = list(set(headers["stamped_headers"]))
# Merge headers with self.options
if self_headers:
stamped_headers = set(headers.get("stamped_headers", []))
stamped_headers.update(self.options.get("stamped_headers", []))
headers["stamped_headers"] = list(stamped_headers)
# Only merge stamps that are in stamped_headers from self.options
redacted_options = {k: v for k, v in self.options.items() if k in headers["stamped_headers"]}
# Sync from self.options
_merge_dictionaries(headers, redacted_options, aggregate_duplicates=append_stamps)
headers["stamped_headers"] = list(set(headers["stamped_headers"]))
return headers
def stamp(self, visitor=None, append_stamps=False, **headers):
"""Stamp this signature with additional custom headers.
Using a visitor will pass on responsibility for the stamping
to the visitor.
.. versionadded:: 5.3
Arguments:
visitor (StampingVisitor): Visitor API object.
append_stamps (bool):
If True, duplicated stamps will be appended to a list.
If False, duplicated stamps will be replaced by the last stamp.
headers (Dict): Stamps that should be added to headers.
"""
self.stamp_links(visitor, append_stamps, **headers)
headers = headers.copy()
visitor_headers = None
if visitor is not None:
visitor_headers = visitor.on_signature(self, **headers) or {}
headers = self._stamp_headers(visitor_headers, append_stamps, **headers)
return self.set(**headers)
def stamp_links(self, visitor, append_stamps=False, **headers):
"""Stamp this signature links (callbacks and errbacks).
Using a visitor will pass on responsibility for the stamping
to the visitor.
Arguments:
visitor (StampingVisitor): Visitor API object.
append_stamps (bool):
If True, duplicated stamps will be appended to a list.
If False, duplicated stamps will be replaced by the last stamp.
headers (Dict): Stamps that should be added to headers.
"""
non_visitor_headers = headers.copy()
# When we are stamping links, we want to avoid adding stamps from the linked signature itself
# so we turn off self_headers to stamp the link only with the visitor and the headers.
# If it's enabled, the link copies the stamps of the linked signature, and we don't want that.
self_headers = False
# Stamp all of the callbacks of this signature
headers = deepcopy(non_visitor_headers)
for link in maybe_list(self.options.get('link')) or []:
link = maybe_signature(link, app=self.app)
visitor_headers = None
if visitor is not None:
visitor_headers = visitor.on_callback(link, **headers) or {}
headers = self._stamp_headers(
visitor_headers=visitor_headers,
append_stamps=append_stamps,
self_headers=self_headers,
**headers
)
link.stamp(visitor, append_stamps, **headers)
# Stamp all of the errbacks of this signature
headers = deepcopy(non_visitor_headers)
for link in maybe_list(self.options.get('link_error')) or []:
link = maybe_signature(link, app=self.app)
visitor_headers = None
if visitor is not None:
visitor_headers = visitor.on_errback(link, **headers) or {}
headers = self._stamp_headers(
visitor_headers=visitor_headers,
append_stamps=append_stamps,
self_headers=self_headers,
**headers
)
link.stamp(visitor, append_stamps, **headers)
def _with_list_option(self, key):
"""Gets the value at the given self.options[key] as a list.
If the value is not a list, it will be converted to one and saved in self.options.
If the key does not exist, an empty list will be set and returned instead.
Arguments:
key (str): The key to get the value for.
Returns:
List: The value at the given key as a list or an empty list if the key does not exist.
"""
items = self.options.setdefault(key, [])
if not isinstance(items, MutableSequence):
items = self.options[key] = [items]
return items
def append_to_list_option(self, key, value):
"""Appends the given value to the list at the given key in self.options."""
items = self._with_list_option(key)
if value not in items:
items.append(value)
return value
def extend_list_option(self, key, value):
"""Extends the list at the given key in self.options with the given value.
If the value is not a list, it will be converted to one.
"""
items = self._with_list_option(key)
items.extend(maybe_list(value))
def link(self, callback):
"""Add callback task to be applied if this task succeeds.
Returns:
Signature: the argument passed, for chaining
or use with :func:`~functools.reduce`.
"""
return self.append_to_list_option('link', callback)
def link_error(self, errback):
"""Add callback task to be applied on error in task execution.
Returns:
Signature: the argument passed, for chaining
or use with :func:`~functools.reduce`.
"""
return self.append_to_list_option('link_error', errback)
def on_error(self, errback):
"""Version of :meth:`link_error` that supports chaining.
on_error chains the original signature, not the errback so::
>>> add.s(2, 2).on_error(errback.s()).delay()
calls the ``add`` task, not the ``errback`` task, but the
reverse is true for :meth:`link_error`.
"""
self.link_error(errback)
return self
def flatten_links(self):
"""Return a recursive list of dependencies.
"unchain" if you will, but with links intact.
"""
return list(itertools.chain.from_iterable(itertools.chain(
[[self]],
(link.flatten_links()
for link in maybe_list(self.options.get('link')) or [])
)))
def __or__(self, other):
"""Chaining operator.
Example:
>>> add.s(2, 2) | add.s(4) | add.s(8)
Returns:
chain: Constructs a :class:`~celery.canvas.chain` of the given signatures.
"""
if isinstance(other, _chain):
# task | chain -> chain
return _chain(seq_concat_seq(
(self,), other.unchain_tasks()), app=self._app)
elif isinstance(other, group):
# unroll group with one member
other = maybe_unroll_group(other)
# task | group() -> chain
return _chain(self, other, app=self.app)
elif isinstance(other, Signature):
# task | task -> chain
return _chain(self, other, app=self._app)
return NotImplemented
def __ior__(self, other):
# Python 3.9 introduces | as the merge operator for dicts.
# We override the in-place version of that operator
# so that canvases continue to work as they did before.
return self.__or__(other)
def election(self):
type = self.type
app = type.app
tid = self.options.get('task_id') or uuid()
with app.producer_or_acquire(None) as producer:
props = type.backend.on_task_call(producer, tid)
app.control.election(tid, 'task',
self.clone(task_id=tid, **props),
connection=producer.connection)
return type.AsyncResult(tid)
def reprcall(self, *args, **kwargs):
"""Return a string representation of the signature.
Merges the given arguments with the signature's arguments
only for the purpose of generating the string representation.
The signature itself is not modified.
Example:
>>> add.s(2, 2).reprcall()
'add(2, 2)'
"""
args, kwargs, _ = self._merge(args, kwargs, {}, force=True)
return reprcall(self['task'], args, kwargs)
def __deepcopy__(self, memo):
memo[id(self)] = self
return dict(self) # TODO: Potential bug of being a shallow copy
def __invert__(self):
return self.apply_async().get()
def __reduce__(self):
# for serialization, the task type is lazily loaded,
# and not stored in the dict itself.
return signature, (dict(self),)
def __json__(self):
return dict(self)
def __repr__(self):
return self.reprcall()
def items(self):
for k, v in super().items():
yield k.decode() if isinstance(k, bytes) else k, v
@property
def name(self):
# for duck typing compatibility with Task.name
return self.task
@cached_property
def type(self):
return self._type or self.app.tasks[self['task']]
@cached_property
def app(self):
return self._app or current_app
@cached_property
def AsyncResult(self):
try:
return self.type.AsyncResult
except KeyError: # task not registered
return self.app.AsyncResult
@cached_property
def _apply_async(self):
try:
return self.type.apply_async
except KeyError:
return _partial(self.app.send_task, self['task'])
id = getitem_property('options.task_id', 'Task UUID')
parent_id = getitem_property('options.parent_id', 'Task parent UUID.')
root_id = getitem_property('options.root_id', 'Task root UUID.')
task = getitem_property('task', 'Name of task.')
args = getitem_property('args', 'Positional arguments to task.')
kwargs = getitem_property('kwargs', 'Keyword arguments to task.')
options = getitem_property('options', 'Task execution options.')
subtask_type = getitem_property('subtask_type', 'Type of signature')
immutable = getitem_property(
'immutable', 'Flag set if no longer accepts new arguments')
def _prepare_chain_from_options(options, tasks, use_link):
# When we publish groups we reuse the same options dictionary for all of
# the tasks in the group. See:
# https://github.com/celery/celery/blob/fb37cb0b8/celery/canvas.py#L1022.
# Issue #5354 reported that the following type of canvases
# causes a Celery worker to hang:
# group(
# add.s(1, 1),
# add.s(1, 1)
# ) | tsum.s() | add.s(1) | group(add.s(1), add.s(1))
# The resolution of #5354 in PR #5681 was to only set the `chain` key
# in the options dictionary if it is not present.
# Otherwise we extend the existing list of tasks in the chain with the new
# tasks: options['chain'].extend(chain_).
# Before PR #5681 we overrode the `chain` key in each iteration
# of the loop which applies all the tasks in the group:
# options['chain'] = tasks if not use_link else None
# This caused Celery to execute chains correctly in most cases since
# in each iteration the `chain` key would reset itself to a new value
# and the side effect of mutating the key did not propagate
# to the next task in the group.
# Since we now mutated the `chain` key, a *list* which is passed
# by *reference*, the next task in the group will extend the list
# of tasks in the chain instead of setting a new one from the chain_
# variable above.
# This causes Celery to execute a chain, even though there might not be
# one to begin with. Alternatively, it causes Celery to execute more tasks
# that were previously present in the previous task in the group.
# The solution is to be careful and never mutate the options dictionary
# to begin with.
# Here is an example of a canvas which triggers this issue:
# add.s(5, 6) | group((add.s(1) | add.s(2), add.s(3))).
# The expected result is [14, 14]. However, when we extend the `chain`
# key the `add.s(3)` task erroneously has `add.s(2)` in its chain since
# it was previously applied to `add.s(1)`.
# Without being careful not to mutate the options dictionary, the result
# in this case is [16, 14].
# To avoid deep-copying the entire options dictionary every single time we
# run a chain we use a ChainMap and ensure that we never mutate
# the original `chain` key, hence we use list_a + list_b to create a new
# list.
if use_link:
return ChainMap({'chain': None}, options)
elif 'chain' not in options:
return ChainMap({'chain': tasks}, options)
elif tasks is not None:
# chain option may already be set, resulting in
# "multiple values for keyword argument 'chain'" error.
# Issue #3379.
# If a chain already exists, we need to extend it with the next
# tasks in the chain.
# Issue #5354.
# WARNING: Be careful not to mutate `options['chain']`.
return ChainMap({'chain': options['chain'] + tasks},
options)
@Signature.register_type(name='chain')
| Signature |
python | pypa__hatch | tests/python/test_core.py | {
"start": 1619,
"end": 3272
} | class ____:
def test_source_does_not_exist(self, temp_dir):
manager = PythonManager(temp_dir / "foo")
assert manager.get_installed() == {}
def test_not_a_directory(self, temp_dir):
manager = PythonManager(temp_dir)
dist = get_distribution("3.10")
path = temp_dir / dist.name
path.touch()
assert manager.get_installed() == {}
def test_no_metadata_file(self, temp_dir):
manager = PythonManager(temp_dir)
dist = get_distribution("3.10")
path = temp_dir / dist.name
path.mkdir()
assert manager.get_installed() == {}
def test_no_python_path(self, temp_dir):
manager = PythonManager(temp_dir)
dist = get_distribution("3.10")
path = temp_dir / dist.name
path.mkdir()
metadata_file = path / InstalledDistribution.metadata_filename()
metadata_file.write_text(json.dumps({"source": dist.source}))
assert manager.get_installed() == {}
def test_order(self, temp_dir, compatible_python_distributions):
manager = PythonManager(temp_dir)
for name in compatible_python_distributions:
dist = get_distribution(name)
path = temp_dir / dist.name
path.mkdir()
metadata_file = path / InstalledDistribution.metadata_filename()
metadata_file.write_text(json.dumps({"source": dist.source}))
python_path = path / dist.python_path
python_path.parent.ensure_dir_exists()
python_path.touch()
assert tuple(manager.get_installed()) == compatible_python_distributions
| TestGetInstalled |
python | SmileyChris__easy-thumbnails | easy_thumbnails/exceptions.py | {
"start": 49,
"end": 268
} | class ____(EasyThumbnailsError):
pass
# Make this error silent when it crops up in a template (most likely via
# Thumbnailer.__getitem__).
InvalidImageFormatError.silent_variable_failure = True
| InvalidImageFormatError |
python | pyca__cryptography | tests/hazmat/primitives/decrepit/test_algorithms.py | {
"start": 7107,
"end": 7393
} | class ____:
test_cfb = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "CAST5"),
["cast5-cfb.txt"],
lambda key, **kwargs: CAST5(binascii.unhexlify(key)),
lambda iv, **kwargs: CFB(binascii.unhexlify(iv)),
)
| TestCAST5ModeCFB |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_utils_test.py | {
"start": 1367,
"end": 13431
} | class ____(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._sess = session.Session()
with cls._sess:
cls._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
cls._b_init_val = np.array([[2.0], [-1.0]])
cls._c_val = np.array([[-4.0], [np.nan]])
cls._a_init = constant_op.constant(
cls._a_init_val, shape=[2, 2], name="a1_init")
cls._b_init = constant_op.constant(
cls._b_init_val, shape=[2, 1], name="b_init")
cls._a = variable_v1.VariableV1(cls._a_init, name="a1")
cls._b = variable_v1.VariableV1(cls._b_init, name="b")
cls._c = constant_op.constant(cls._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
cls._p = math_ops.matmul(cls._a, cls._b, name="p1")
# Sum of two vectors.
cls._s = math_ops.add(cls._p, cls._c, name="s")
cls._graph = cls._sess.graph
# These are all the expected nodes in the graph:
# - Two variables (a, b), each with four nodes (Variable, init, Assign,
# read).
# - One constant (c).
# - One add operation and one matmul operation.
# - One wildcard node name ("*") that covers nodes created internally
# by TensorFlow itself (e.g., Grappler).
cls._expected_num_nodes = 4 * 2 + 1 + 1 + 1 + 1
def setUp(self):
self._run_options = config_pb2.RunOptions()
def _verify_watches(self, watch_opts, expected_output_slot,
expected_debug_ops, expected_debug_urls):
"""Verify a list of debug tensor watches.
This requires all watches in the watch list have exactly the same
output_slot, debug_ops and debug_urls.
Args:
watch_opts: Repeated protobuf field of DebugTensorWatch.
expected_output_slot: Expected output slot index, as an integer.
expected_debug_ops: Expected debug ops, as a list of strings.
expected_debug_urls: Expected debug URLs, as a list of strings.
Returns:
List of node names from the list of debug tensor watches.
"""
node_names = []
for watch in watch_opts:
node_names.append(watch.node_name)
if watch.node_name == "*":
self.assertEqual(-1, watch.output_slot)
self.assertEqual(expected_debug_ops, watch.debug_ops)
self.assertEqual(expected_debug_urls, watch.debug_urls)
else:
self.assertEqual(expected_output_slot, watch.output_slot)
self.assertEqual(expected_debug_ops, watch.debug_ops)
self.assertEqual(expected_debug_urls, watch.debug_urls)
return node_names
def testAddDebugTensorWatches_defaultDebugOp(self):
debug_utils.add_debug_tensor_watch(
self._run_options, "foo/node_a", 1, debug_urls="file:///tmp/tfdbg_1")
debug_utils.add_debug_tensor_watch(
self._run_options, "foo/node_b", 0, debug_urls="file:///tmp/tfdbg_2")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(2, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
watch_1 = debug_watch_opts[1]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(1, watch_0.output_slot)
self.assertEqual("foo/node_b", watch_1.node_name)
self.assertEqual(0, watch_1.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugIdentity"], watch_0.debug_ops)
self.assertEqual(["DebugIdentity"], watch_1.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
self.assertEqual(["file:///tmp/tfdbg_2"], watch_1.debug_urls)
def testAddDebugTensorWatches_explicitDebugOp(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops="DebugNanCount",
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
def testAddDebugTensorWatches_multipleDebugOps(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops=["DebugNanCount", "DebugIdentity"],
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount", "DebugIdentity"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1"], watch_0.debug_urls)
def testAddDebugTensorWatches_multipleURLs(self):
debug_utils.add_debug_tensor_watch(
self._run_options,
"foo/node_a",
0,
debug_ops="DebugNanCount",
debug_urls=["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"])
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(1, len(debug_watch_opts))
watch_0 = debug_watch_opts[0]
self.assertEqual("foo/node_a", watch_0.node_name)
self.assertEqual(0, watch_0.output_slot)
# Verify default debug op name.
self.assertEqual(["DebugNanCount"], watch_0.debug_ops)
# Verify debug URLs.
self.assertEqual(["file:///tmp/tfdbg_1", "file:///tmp/tfdbg_2"],
watch_0.debug_urls)
def testWatchGraph_allNodes(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_ops=["DebugIdentity", "DebugNanCount"],
debug_urls="file:///tmp/tfdbg_1")
debug_watch_opts = self._run_options.debug_options.debug_tensor_watch_opts
self.assertEqual(self._expected_num_nodes, len(debug_watch_opts))
# Verify that each of the nodes in the graph with output tensors in the
# graph have debug tensor watch.
node_names = self._verify_watches(debug_watch_opts, 0,
["DebugIdentity", "DebugNanCount"],
["file:///tmp/tfdbg_1"])
# Verify the node names.
self.assertIn("a1_init", node_names)
self.assertIn("a1", node_names)
self.assertIn("a1/Assign", node_names)
self.assertIn("a1/read", node_names)
self.assertIn("b_init", node_names)
self.assertIn("b", node_names)
self.assertIn("b/Assign", node_names)
self.assertIn("b/read", node_names)
self.assertIn("c", node_names)
self.assertIn("p1", node_names)
self.assertIn("s", node_names)
# Assert that the wildcard node name has been created.
self.assertIn("*", node_names)
def testWatchGraph_nodeNameAllowlist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_allowlist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(
sorted(["a1_init", "a1", "a1/Assign", "a1/read", "p1"]),
sorted(node_names))
def testWatchGraph_opTypeAllowlist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_allowlist="(Variable|MatMul)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["a1", "b", "p1"]), sorted(node_names))
def testWatchGraph_nodeNameAndOpTypeAllowlists(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_allowlist="([a-z]+1$)",
op_type_regex_allowlist="(MatMul)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["p1"], node_names)
def testWatchGraph_tensorDTypeAllowlist(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_allowlist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign", "b", "b/Assign"], node_names)
def testWatchGraph_nodeNameAndTensorDTypeAllowlists(self):
debug_utils.watch_graph(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_allowlist="^a.*",
tensor_dtype_regex_allowlist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertItemsEqual(["a1", "a1/Assign"], node_names)
def testWatchGraph_nodeNameDenylist(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_denylist="(a1$|a1_init$|a1/.*|p1$)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(
sorted(["b_init", "b", "b/Assign", "b/read", "c", "s"]),
sorted(node_names))
def testWatchGraph_opTypeDenylist(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
op_type_regex_denylist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(sorted(["p1", "s"]), sorted(node_names))
def testWatchGraph_nodeNameAndOpTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_denylist="p1$",
op_type_regex_denylist="(Variable|Identity|Assign|Const)")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertEqual(["s"], node_names)
def testWatchGraph_tensorDTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
tensor_dtype_regex_denylist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertNotIn("a1", node_names)
self.assertNotIn("a1/Assign", node_names)
self.assertNotIn("b", node_names)
self.assertNotIn("b/Assign", node_names)
self.assertIn("s", node_names)
def testWatchGraph_nodeNameAndTensorDTypeDenylists(self):
debug_utils.watch_graph_with_denylists(
self._run_options,
self._graph,
debug_urls="file:///tmp/tfdbg_1",
node_name_regex_denylist="^s$",
tensor_dtype_regex_denylist=".*_ref")
node_names = self._verify_watches(
self._run_options.debug_options.debug_tensor_watch_opts, 0,
["DebugIdentity"], ["file:///tmp/tfdbg_1"])
self.assertNotIn("a1", node_names)
self.assertNotIn("a1/Assign", node_names)
self.assertNotIn("b", node_names)
self.assertNotIn("b/Assign", node_names)
self.assertNotIn("s", node_names)
if __name__ == "__main__":
googletest.main()
| DebugUtilsTest |
python | sqlalchemy__sqlalchemy | test/orm/test_dataclasses.py | {
"start": 28458,
"end": 31414
} | class ____(fixtures.TestBase):
def test_propagate_w_plain_mixin_col(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = Column(Integer)
run_test(BaseType)
def test_propagate_w_field_mixin_col(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(BaseType)
def test_propagate_w_field_mixin_col_and_default(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = dataclasses.field(
init=False,
default=None,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(BaseType)
@testing.fixture()
def run_test(self):
def go(BaseType):
declarative = registry().mapped
@declarative
@dataclasses.dataclass
class Single(BaseType):
__tablename__ = "single"
__mapper_args__ = dict(polymorphic_identity="type1")
@declarative
@dataclasses.dataclass
class Joined(Single):
__tablename__ = "joined"
__mapper_args__ = dict(polymorphic_identity="type2")
id = Column(Integer, ForeignKey("single.id"), primary_key=True)
eq_(Single.__table__.name, "single")
eq_(
list(Single.__table__.c.keys()),
["type", "id", "value", "timestamp"],
)
eq_(Single.__table__.kwargs, {"mysql_engine": "InnoDB"})
eq_(Joined.__table__.name, "joined")
eq_(list(Joined.__table__.c.keys()), ["id"])
eq_(Joined.__table__.kwargs, {"mysql_engine": "InnoDB"})
yield go
clear_mappers()
| PropagationFromAbstractTest |
python | doocs__leetcode | lcci/16.21.Sum Swap/Solution.py | {
"start": 0,
"end": 336
} | class ____:
def findSwapValues(self, array1: List[int], array2: List[int]) -> List[int]:
diff = sum(array1) - sum(array2)
if diff & 1:
return []
diff >>= 1
s = set(array2)
for a in array1:
if (b := (a - diff)) in s:
return [a, b]
return []
| Solution |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py | {
"start": 6907,
"end": 14353
} | class ____(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super(RNNCell, self).__init__(
trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the slight
# difference between TF and Keras RNN cell. Notably the state is not wrapped
# in a list for TF cell where they are single tensor state, whereas keras
# cell will wrap the state into a list, and call() will have to unwrap them.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple with
shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(
scope, custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
scope_attrname = "rnncell_scope"
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = vs.variable_scope(
vs.get_variable_scope(), custom_getter=self._rnn_get_variable)
setattr(self, scope_attrname, scope)
with scope:
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
if ops.executing_eagerly_outside_functions():
trainable = variable.trainable
else:
trainable = (
variable in tf_variables.trainable_variables() or
(base_layer_utils.is_split_variable(variable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and all(variable is not v for v in self._trainable_weights):
self._trainable_weights.append(variable)
elif not trainable and all(
variable is not v for v in self._non_trainable_weights):
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if provided.
inputs = tensor_conversion.convert_to_tensor_v2_with_dispatch(
inputs, name="inputs"
)
if batch_size is not None:
if tensor_util.is_tf_type(batch_size):
static_batch_size = tensor_util.constant_value(
batch_size, partial=True)
else:
static_batch_size = batch_size
if inputs.shape.dims[0].value != static_batch_size:
raise ValueError(
"batch size from input tensor is different from the "
"input param. Input tensor batch: {}, batch_size: {}".format(
inputs.shape.dims[0].value, batch_size))
if dtype is not None and inputs.dtype != dtype:
raise ValueError(
"dtype from input tensor is different from the "
"input param. Input tensor dtype: {}, dtype: {}".format(
inputs.dtype, dtype))
batch_size = inputs.shape.dims[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
if batch_size is None or dtype is None:
raise ValueError(
"batch_size and dtype cannot be None while constructing initial "
"state: batch_size={}, dtype={}".format(batch_size, dtype))
return self.zero_state(batch_size, dtype)
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = context.executing_eagerly()
if is_eager and _hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and last_dtype == dtype and
last_state_size == state_size):
return last_output
with backend.name_scope(type(self).__name__ + "ZeroState"):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
# TODO(b/134773139): Remove when contrib RNN cells implement `get_config`
def get_config(self): # pylint: disable=useless-super-delegation
return super(RNNCell, self).get_config()
@property
def _use_input_spec_as_call_signature(self):
# We do not store the shape information for the state argument in the call
# function for legacy RNN cells, so do not generate an input signature.
return False
| RNNCell |
python | apache__airflow | airflow-core/src/airflow/ti_deps/deps/dag_ti_slots_available_dep.py | {
"start": 930,
"end": 1524
} | class ____(BaseTIDep):
"""Determines whether a DAG maximum number of running tasks has been reached."""
NAME = "Task Instance Slots Available"
IGNORABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.get_concurrency_reached(session):
yield self._failing_status(
reason=(
f"The maximum number of running tasks ({ti.task.dag.max_active_tasks}) for "
f"this task's DAG '{ti.dag_id}' has been reached."
)
)
| DagTISlotsAvailableDep |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/overload.py | {
"start": 667,
"end": 870
} | class ____:
"""docstring"""
@overload
def __new__(cls, x: int, y: int) -> Foo: ...
@overload
def __new__(cls, x: str, y: str) -> Foo: ...
def __new__(cls, x, y):
pass
| Foo |
python | walkccc__LeetCode | solutions/885. Spiral Matrix III/885.py | {
"start": 0,
"end": 444
} | class ____:
def spiralMatrixIII(self, rows: int, cols: int, rStart: int, cStart: int) -> list[list[int]]:
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
ans = [[rStart, cStart]]
i = 0
while len(ans) < rows * cols:
for _ in range(i // 2 + 1):
rStart += dy[i % 4]
cStart += dx[i % 4]
if 0 <= rStart < rows and 0 <= cStart < cols:
ans.append([rStart, cStart])
i += 1
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 985552,
"end": 986086
} | class ____(sgqlc.types.Type):
"""Represents a starred repository."""
__schema__ = github_schema
__field_names__ = ("cursor", "node", "starred_at")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(sgqlc.types.non_null("Repository"), graphql_name="node")
starred_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="starredAt")
"""Identifies when the item was starred."""
| StarredRepositoryEdge |
python | google__pytype | pytype/tests/test_builtins4.py | {
"start": 147,
"end": 2414
} | class ____(test_base.BaseTest):
"""Tests for builtin.map."""
def test_basic(self):
ty = self.Infer("""
v = map(int, ("0",))
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Iterator
v : Iterator[int]
""",
)
def test_lambda(self):
ty = self.Infer("""
class Foo:
pass
def f():
return map(lambda x: x, [Foo()])
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Iterator
class Foo:
pass
def f() -> Iterator: ...
""",
)
def test_join(self):
ty = self.Infer("""
def f(input_string, sub):
return ''.join(map(lambda ch: ch, input_string))
""")
self.assertTypesMatchPytd(ty, "def f(input_string, sub) -> str: ...")
def test_empty(self):
ty = self.Infer("""
lst1 = []
lst2 = [x for x in lst1]
lst3 = map(str, lst2)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, List, Iterator
lst1 : List[nothing]
lst2 : List[nothing]
lst3 : Iterator[nothing]
""",
)
def test_heterogeneous(self):
self.Check("""
from typing import Union
def func(a: Union[int, str, float, bool]) -> str:
return str(a)
map(func, [1, 'pi', 3.14, True])
""")
self.Check("""
from typing import Iterable, Union
def func(
first: Iterable[str], second: str, third: Union[int, bool, float]
) -> str:
return ' '.join(first) + second + str(third)
map(func,
[('one', 'two'), {'three', 'four'}, ['five', 'six']],
'abc',
[1, False, 3.14])
""")
def test_error_message(self):
errors = self.CheckWithErrors("""
def func(a: int) -> float:
return float(a)
map(func, ['str']) # wrong-arg-types[e]
""")
self.assertErrorSequences(
errors, {"e": ["Expected", "Iterable[int]", "Actual", "list[str]"]}
)
def test_abspath(self):
self.Check("""
import os.path
map(os.path.abspath, [''])
""")
def test_protocol(self):
self.Check("""
class Foo:
def __len__(self) -> int:
return 0
map(len, [Foo()])
""")
| MapTest |
python | kamyu104__LeetCode-Solutions | Python/sum-of-beauty-in-the-array.py | {
"start": 29,
"end": 583
} | class ____(object):
def sumOfBeauties(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
right = [nums[-1]]*len(nums)
for i in reversed(xrange(2, len(nums)-1)):
right[i] = min(right[i+1], nums[i])
result, left = 0, nums[0]
for i in xrange(1, len(nums)-1):
if left < nums[i] < right[i+1]:
result += 2
elif nums[i-1] < nums[i] < nums[i+1]:
result += 1
left = max(left, nums[i])
return result
| Solution |
python | sympy__sympy | sympy/physics/quantum/tests/test_innerproduct.py | {
"start": 1166,
"end": 1483
} | class ____(Bra, BarState):
@classmethod
def dual_class(self):
return BarKet
def test_doit():
f = FooKet('foo')
b = BarBra('bar')
assert InnerProduct(b, f).doit() == I
assert InnerProduct(Dagger(f), Dagger(b)).doit() == -I
assert InnerProduct(Dagger(f), f).doit() == Integer(1)
| BarBra |
python | encode__starlette | starlette/responses.py | {
"start": 7160,
"end": 9519
} | class ____(Response):
body_iterator: AsyncContentStream
def __init__(
self,
content: ContentStream,
status_code: int = 200,
headers: Mapping[str, str] | None = None,
media_type: str | None = None,
background: BackgroundTask | None = None,
) -> None:
if isinstance(content, AsyncIterable):
self.body_iterator = content
else:
self.body_iterator = iterate_in_threadpool(content)
self.status_code = status_code
self.media_type = self.media_type if media_type is None else media_type
self.background = background
self.init_headers(headers)
async def listen_for_disconnect(self, receive: Receive) -> None:
while True:
message = await receive()
if message["type"] == "http.disconnect":
break
async def stream_response(self, send: Send) -> None:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
async for chunk in self.body_iterator:
if not isinstance(chunk, bytes | memoryview):
chunk = chunk.encode(self.charset)
await send({"type": "http.response.body", "body": chunk, "more_body": True})
await send({"type": "http.response.body", "body": b"", "more_body": False})
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
spec_version = tuple(map(int, scope.get("asgi", {}).get("spec_version", "2.0").split(".")))
if spec_version >= (2, 4):
try:
await self.stream_response(send)
except OSError:
raise ClientDisconnect()
else:
with collapse_excgroups():
async with anyio.create_task_group() as task_group:
async def wrap(func: Callable[[], Awaitable[None]]) -> None:
await func()
task_group.cancel_scope.cancel()
task_group.start_soon(wrap, partial(self.stream_response, send))
await wrap(partial(self.listen_for_disconnect, receive))
if self.background is not None:
await self.background()
| StreamingResponse |
python | ray-project__ray | python/ray/llm/_internal/serve/engines/vllm/kv_transfer/factory.py | {
"start": 786,
"end": 4723
} | class ____:
"""Factory for creating KV connector backend instances with lazy loading."""
@classmethod
def register_backend(
cls,
name: str,
backend_class_or_path: Union[Type["BaseConnectorBackend"], str],
) -> None:
"""Register a connector backend.
This enables the backend to be accessed on every Ray process in the cluster.
Args:
name: The name of the connector (e.g., "LMCacheConnectorV1")
backend_class_or_path: Either:
- The backend class object directly (preferred), or
- A string in the format "module_path:class_name" for lazy loading
Examples:
# Register with class directly (recommended):
KVConnectorBackendFactory.register_backend("MyConnector", MyConnectorClass)
# Register with module path string (for lazy loading):
KVConnectorBackendFactory.register_backend("MyConnector", "my.module:MyClass")
"""
_kv_backend_registry.register(name, backend_class_or_path)
@classmethod
def get_backend_class(cls, name: str) -> Type["BaseConnectorBackend"]:
"""Get the connector backend class by name.
For registered connectors, returns the registered backend class.
For unregistered connectors, returns BaseConnectorBackend which has
a no-op setup() method, allowing connectors that don't require
Ray Serve orchestration to work without registration.
Args:
name: The name of the connector backend
Returns:
The connector backend class
Raises:
ImportError: If a registered backend fails to load
"""
try:
return _kv_backend_registry.get(name)
except ValueError:
logger.warning(
f"Unsupported connector backend: {name}. "
f"Using default: {BaseConnectorBackend.__name__}."
)
return BaseConnectorBackend
except Exception as e:
raise ImportError(
f"Failed to load connector backend '{name}': {type(e).__name__}: {e}"
) from e
@classmethod
def create_backend(
cls, name: str, llm_config: "LLMConfig"
) -> "BaseConnectorBackend":
"""Create a connector backend instance.
Args:
name: The name of the connector backend
llm_config: The LLM configuration
Returns:
An instance of the connector backend
"""
return cls.get_backend_class(name)(llm_config)
@classmethod
def is_registered(cls, name: str) -> bool:
"""Check if a connector backend is registered."""
return _kv_backend_registry.contains(name)
@classmethod
def unregister_backend(cls, name: str) -> None:
"""Unregister a connector backend.
Removes the backend from the registry across all Ray processes.
Args:
name: The name of the connector backend to unregister
"""
_kv_backend_registry.unregister(name)
BUILTIN_BACKENDS = {
"LMCacheConnectorV1": "ray.llm._internal.serve.engines.vllm.kv_transfer.lmcache:LMCacheConnectorV1Backend",
"NixlConnector": "ray.llm._internal.serve.engines.vllm.kv_transfer.nixl:NixlConnectorBackend",
"MultiConnector": "ray.llm._internal.serve.engines.vllm.kv_transfer.multi_connector:MultiConnectorBackend",
}
def _initialize_registry() -> None:
"""Initialize the registry with built-in backends.
This function is called when the module is imported to ensure
built-in backends are registered.
"""
for name, backend_path in BUILTIN_BACKENDS.items():
if not KVConnectorBackendFactory.is_registered(name):
KVConnectorBackendFactory.register_backend(name, backend_path)
# Initialize registry when module is imported
_initialize_registry()
| KVConnectorBackendFactory |
python | davidhalter__jedi | jedi/inference/compiled/value.py | {
"start": 12294,
"end": 13462
} | class ____(ParamNameInterface, AbstractNameDefinition):
def __init__(self, compiled_value, signature_param):
self.parent_context = compiled_value.parent_context
self._signature_param = signature_param
@property
def string_name(self):
return self._signature_param.name
def to_string(self):
s = self._kind_string() + self.string_name
if self._signature_param.has_annotation:
s += ': ' + self._signature_param.annotation_string
if self._signature_param.has_default:
s += '=' + self._signature_param.default_string
return s
def get_kind(self):
return getattr(Parameter, self._signature_param.kind_name)
def infer(self):
p = self._signature_param
inference_state = self.parent_context.inference_state
values = NO_VALUES
if p.has_default:
values = ValueSet([create_from_access_path(inference_state, p.default)])
if p.has_annotation:
annotation = create_from_access_path(inference_state, p.annotation)
values |= annotation.execute_with_values()
return values
| SignatureParamName |
python | doocs__leetcode | solution/2100-2199/2194.Cells in a Range on an Excel Sheet/Solution.py | {
"start": 0,
"end": 228
} | class ____:
def cellsInRange(self, s: str) -> List[str]:
return [
chr(i) + str(j)
for i in range(ord(s[0]), ord(s[-2]) + 1)
for j in range(int(s[1]), int(s[-1]) + 1)
]
| Solution |
python | scipy__scipy | scipy/linalg/tests/test_basic.py | {
"start": 78095,
"end": 81352
} | class ____:
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]],
dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]],
dtype=float))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_native_list_argument(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a.tolist())
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_zero_eigenvalue(self):
# https://github.com/scipy/scipy/issues/12515
# the SYEVR eigh driver may give the zero eigenvalue > eps
a = np.array([[1, -1, 0], [-1, 2, -1], [0, -1, 1]])
p = pinvh(a)
assert_allclose(p @ a @ p, p, atol=1e-15)
assert_allclose(a @ p @ a, a, atol=1e-15)
def test_atol_rtol(self):
rng = np.random.default_rng(1234)
n = 12
# get a random ortho matrix for shuffling
q, _ = qr(rng.random((n, n)))
a = np.diag([4, 3, 2, 1, 0.99e-4, 0.99e-5] + [0.99e-6]*(n-6))
a = q.T @ a @ q
a_m = np.diag([4, 3, 2, 1, 0.99e-4, 0.] + [0.]*(n-6))
a_m = q.T @ a_m @ q
atol = 1e-5
rtol = (4.01e-4 - 4e-5)/4
# Just abs cutoff such that we arrive at a_modified
a_p = pinvh(a, atol=atol, rtol=0.)
adiff1 = a @ a_p @ a - a
adiff2 = a_m @ a_p @ a_m - a_m
# Now adiff1 should dance around atol value since truncation
# while adiff2 should be relatively tiny
assert_allclose(norm(adiff1), atol, rtol=0.1)
assert_allclose(norm(adiff2), 1e-12, atol=1e-11)
# Now do the same but through rtol cancelling atol value
a_p = pinvh(a, atol=atol, rtol=rtol)
adiff1 = a @ a_p @ a - a
adiff2 = a_m @ a_p @ a_m - a_m
# adiff1 and adiff2 should be elevated to ~1e-4 due to mismatch
assert_allclose(norm(adiff1), 1e-4, rtol=0.1)
assert_allclose(norm(adiff2), 1e-4, rtol=0.1)
@pytest.mark.parametrize('dt', [float, np.float32, complex, np.complex64])
def test_empty(self, dt):
a = np.empty((0, 0), dtype=dt)
a_pinv = pinvh(a)
assert a_pinv.size == 0
assert a_pinv.dtype == pinv(np.eye(2, dtype=dt)).dtype
@pytest.mark.parametrize('scale', (1e-20, 1., 1e20))
@pytest.mark.parametrize('pinv_', (pinv, pinvh))
def test_auto_rcond(scale, pinv_):
x = np.array([[1, 0], [0, 1e-10]]) * scale
expected = np.diag(1. / np.diag(x))
x_inv = pinv_(x)
assert_allclose(x_inv, expected)
| TestPinvSymmetric |
python | mlflow__mlflow | mlflow/utils/rest_utils.py | {
"start": 24220,
"end": 28737
} | class ____:
"""
Provides a hostname and optional authentication for talking to an MLflow tracking server.
Args:
host: Hostname (e.g., http://localhost:5000) to MLflow server. Required.
username: Username to use with Basic authentication when talking to server.
If this is specified, password must also be specified.
password: Password to use with Basic authentication when talking to server.
If this is specified, username must also be specified.
token: Token to use with Bearer authentication when talking to server.
If provided, user/password authentication will be ignored.
aws_sigv4: If true, we will create a signature V4 to be added for any outgoing request.
Keys for signing the request can be passed via ENV variables,
or will be fetched via boto3 session.
auth: If set, the auth will be added for any outgoing request.
Keys for signing the request can be passed via ENV variables,
ignore_tls_verification: If true, we will not verify the server's hostname or TLS
certificate. This is useful for certain testing situations, but should never be
true in production.
If this is set to true ``server_cert_path`` must not be set.
client_cert_path: Path to ssl client cert file (.pem).
Sets the cert param of the ``requests.request``
function (see https://requests.readthedocs.io/en/master/api/).
server_cert_path: Path to a CA bundle to use.
Sets the verify param of the ``requests.request``
function (see https://requests.readthedocs.io/en/master/api/).
If this is set ``ignore_tls_verification`` must be false.
use_databricks_sdk: A boolean value represent whether using Databricks SDK for
authentication.
databricks_auth_profile: The name of the profile used by Databricks SDK for
authentication.
client_id: The client ID used by Databricks OAuth
client_secret: The client secret used by Databricks OAuth
"""
def __init__(
self,
host,
username=None,
password=None,
token=None,
aws_sigv4=False,
auth=None,
ignore_tls_verification=False,
client_cert_path=None,
server_cert_path=None,
use_databricks_sdk=False,
databricks_auth_profile=None,
client_id=None,
client_secret=None,
use_secret_scope_token=False,
):
if not host:
raise MlflowException(
message="host is a required parameter for MlflowHostCreds",
error_code=INVALID_PARAMETER_VALUE,
)
if ignore_tls_verification and (server_cert_path is not None):
raise MlflowException(
message=(
"When 'ignore_tls_verification' is true then 'server_cert_path' "
"must not be set! This error may have occurred because the "
"'MLFLOW_TRACKING_INSECURE_TLS' and 'MLFLOW_TRACKING_SERVER_CERT_PATH' "
"environment variables are both set - only one of these environment "
"variables may be set."
),
error_code=INVALID_PARAMETER_VALUE,
)
self.host = host
self.username = username
self.password = password
self.token = token
self.aws_sigv4 = aws_sigv4
self.auth = auth
self.ignore_tls_verification = ignore_tls_verification
self.client_cert_path = client_cert_path
self.server_cert_path = server_cert_path
self.use_databricks_sdk = use_databricks_sdk
self.databricks_auth_profile = databricks_auth_profile
self.client_id = client_id
self.client_secret = client_secret
self.use_secret_scope_token = use_secret_scope_token
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __hash__(self):
return hash(frozenset(self.__dict__.items()))
@property
def verify(self):
if self.use_databricks_sdk:
# Let databricks-sdk set HTTP request `verify` param.
return None
if self.server_cert_path is None:
return not self.ignore_tls_verification
else:
return self.server_cert_path
| MlflowHostCreds |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF005.py | {
"start": 323,
"end": 1140
} | class ____:
words = ("how", "fun!")
def yay(self):
return self.words
yay = Fun().yay
foo = [4, 5, 6]
bar = [1, 2, 3] + foo
zoob = tuple(bar)
quux = (7, 8, 9) + zoob
spam = quux + (10, 11, 12)
spom = list(spam)
eggs = spom + [13, 14, 15]
elatement = ("we all say",) + yay()
excitement = ("we all think",) + Fun().yay()
astonishment = ("we all feel",) + Fun.words
chain = ["a", "b", "c"] + eggs + list(("yes", "no", "pants") + zoob)
baz = () + zoob
[] + foo + [
]
pylint_call = [sys.executable, "-m", "pylint"] + args + [path]
pylint_call_tuple = (sys.executable, "-m", "pylint") + args + (path, path2)
b = a + [2, 3] + [4]
# Uses the non-preferred quote style, which should be retained.
f"{a() + ['b']}"
###
# Non-errors.
###
a = (1,) + [2]
a = [1, 2] + (3, 4)
a = ([1, 2, 3] + b) + (4, 5, 6)
| Fun |
python | tornadoweb__tornado | tornado/test/testing_test.py | {
"start": 642,
"end": 1864
} | class ____(AsyncTestCase):
def test_wait_timeout(self):
time = self.io_loop.time
# Accept default 5-second timeout, no error
self.io_loop.add_timeout(time() + 0.01, self.stop)
self.wait()
# Timeout passed to wait()
self.io_loop.add_timeout(time() + 1, self.stop)
with self.assertRaises(self.failureException):
self.wait(timeout=0.01)
# Timeout set with environment variable
self.io_loop.add_timeout(time() + 1, self.stop)
with set_environ("ASYNC_TEST_TIMEOUT", "0.01"):
with self.assertRaises(self.failureException):
self.wait()
def test_subsequent_wait_calls(self):
"""
This test makes sure that a second call to wait()
clears the first timeout.
"""
# The first wait ends with time left on the clock
self.io_loop.add_timeout(self.io_loop.time() + 0.00, self.stop)
self.wait(timeout=0.1)
# The second wait has enough time for itself but would fail if the
# first wait's deadline were still in effect.
self.io_loop.add_timeout(self.io_loop.time() + 0.2, self.stop)
self.wait(timeout=0.4)
| AsyncTestCaseTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/utility/test_xl_col_to_name.py | {
"start": 296,
"end": 1562
} | class ____(unittest.TestCase):
"""
Test xl_col_to_name() utility function.
"""
def test_xl_col_to_name(self):
"""Test xl_col_to_name()"""
tests = [
# col, col string
(0, "A"),
(1, "B"),
(2, "C"),
(9, "J"),
(24, "Y"),
(25, "Z"),
(26, "AA"),
(254, "IU"),
(255, "IV"),
(256, "IW"),
(16383, "XFD"),
(16384, "XFE"),
(-1, ""),
]
for col, string in tests:
exp = string
got = xl_col_to_name(col)
# Ignore the warnings for negative values.
warnings.filterwarnings("ignore")
self.assertEqual(exp, got)
def test_xl_col_to_name_abs(self):
"""Test xl_col_to_name() with absolute references"""
tests = [
# col, col_abs, col string
(0, True, "$A"),
(-1, True, ""),
]
for col, col_abs, string in tests:
exp = string
got = xl_col_to_name(col, col_abs)
# Ignore the warnings for negative values.
warnings.filterwarnings("ignore")
self.assertEqual(exp, got)
| TestUtility |
python | django__django | django/core/cache/backends/redis.py | {
"start": 5101,
"end": 8010
} | class ____(BaseCache):
def __init__(self, server, params):
super().__init__(params)
if isinstance(server, str):
self._servers = re.split("[;,]", server)
else:
self._servers = server
self._class = RedisCacheClient
self._options = params.get("OPTIONS", {})
@cached_property
def _cache(self):
return self._class(self._servers, **self._options)
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
# The key will be made persistent if None used as a timeout.
# Non-positive values will cause the key to be deleted.
return None if timeout is None else max(0, int(timeout))
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.get(key, default)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
self._cache.set(key, value, self.get_backend_timeout(timeout))
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.touch(key, self.get_backend_timeout(timeout))
def delete(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.delete(key)
def get_many(self, keys, version=None):
key_map = {
self.make_and_validate_key(key, version=version): key for key in keys
}
ret = self._cache.get_many(key_map.keys())
return {key_map[k]: v for k, v in ret.items()}
def has_key(self, key, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.has_key(key)
def incr(self, key, delta=1, version=None):
key = self.make_and_validate_key(key, version=version)
return self._cache.incr(key, delta)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
if not data:
return []
safe_data = {}
for key, value in data.items():
key = self.make_and_validate_key(key, version=version)
safe_data[key] = value
self._cache.set_many(safe_data, self.get_backend_timeout(timeout))
return []
def delete_many(self, keys, version=None):
if not keys:
return
safe_keys = [self.make_and_validate_key(key, version=version) for key in keys]
self._cache.delete_many(safe_keys)
def clear(self):
return self._cache.clear()
| RedisCache |
python | ray-project__ray | python/ray/serve/_private/benchmarks/locust_utils.py | {
"start": 762,
"end": 897
} | class ____:
request_id: str
status_code: int
exception: str
response_time_ms: float
start_time_s: float
| FailedRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/isinstance1.py | {
"start": 91,
"end": 214
} | class ____:
def bar(self):
a = isinstance(object(), self.__class__)
b = isinstance(object(), __class__)
| Foo |
python | django__django | django/db/models/lookups.py | {
"start": 16904,
"end": 16999
} | class ____(IntegerFieldOverflow, Exact):
pass
@IntegerField.register_lookup
| IntegerFieldExact |
python | PyCQA__pylint | tests/functional/i/inherit_non_class.py | {
"start": 1949,
"end": 1990
} | class ____(ParentGood[int]):
pass
| Child1 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 203316,
"end": 203737
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "reaction", "subject")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
reaction = sgqlc.types.Field("Reaction", graphql_name="reaction")
subject = sgqlc.types.Field("Reactable", graphql_name="subject")
| AddReactionPayload |
python | huggingface__transformers | src/transformers/models/vaultgemma/modeling_vaultgemma.py | {
"start": 12622,
"end": 15650
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: VaultGemmaConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[VaultGemmaConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
| VaultGemmaRotaryEmbedding |
python | mozilla__bleach | bleach/_vendor/parse.py | {
"start": 7227,
"end": 10710
} | class ____(_NetlocResultMixinBase, _ResultMixinBytes):
__slots__ = ()
@property
def _userinfo(self):
netloc = self.netloc
userinfo, have_info, hostinfo = netloc.rpartition(b'@')
if have_info:
username, have_password, password = userinfo.partition(b':')
if not have_password:
password = None
else:
username = password = None
return username, password
@property
def _hostinfo(self):
netloc = self.netloc
_, _, hostinfo = netloc.rpartition(b'@')
_, have_open_br, bracketed = hostinfo.partition(b'[')
if have_open_br:
hostname, _, port = bracketed.partition(b']')
_, _, port = port.partition(b':')
else:
hostname, _, port = hostinfo.partition(b':')
if not port:
port = None
return hostname, port
from collections import namedtuple
_DefragResultBase = namedtuple('DefragResult', 'url fragment')
_SplitResultBase = namedtuple(
'SplitResult', 'scheme netloc path query fragment')
_ParseResultBase = namedtuple(
'ParseResult', 'scheme netloc path params query fragment')
_DefragResultBase.__doc__ = """
DefragResult(url, fragment)
A 2-tuple that contains the url without fragment identifier and the fragment
identifier as a separate argument.
"""
_DefragResultBase.url.__doc__ = """The URL with no fragment identifier."""
_DefragResultBase.fragment.__doc__ = """
Fragment identifier separated from URL, that allows indirect identification of a
secondary resource by reference to a primary resource and additional identifying
information.
"""
_SplitResultBase.__doc__ = """
SplitResult(scheme, netloc, path, query, fragment)
A 5-tuple that contains the different components of a URL. Similar to
ParseResult, but does not split params.
"""
_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request."""
_SplitResultBase.netloc.__doc__ = """
Network location where the request is made to.
"""
_SplitResultBase.path.__doc__ = """
The hierarchical path, such as the path to a file to download.
"""
_SplitResultBase.query.__doc__ = """
The query component, that contains non-hierarchical data, that along with data
in path component, identifies a resource in the scope of URI's scheme and
network location.
"""
_SplitResultBase.fragment.__doc__ = """
Fragment identifier, that allows indirect identification of a secondary resource
by reference to a primary resource and additional identifying information.
"""
_ParseResultBase.__doc__ = """
ParseResult(scheme, netloc, path, params, query, fragment)
A 6-tuple that contains components of a parsed URL.
"""
_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__
_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__
_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__
_ParseResultBase.params.__doc__ = """
Parameters for last path element used to dereference the URI in order to provide
access to perform some operation on the resource.
"""
_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__
_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__
# For backwards compatibility, alias _NetlocResultMixinStr
# ResultBase is no longer part of the documented API, but it is
# retained since deprecating it isn't worth the hassle
ResultBase = _NetlocResultMixinStr
# Structured result objects for string data
| _NetlocResultMixinBytes |
python | django__django | tests/model_forms/tests.py | {
"start": 116075,
"end": 117625
} | class ____(SimpleTestCase):
def test_custom_error_messages(self):
data = {"name1": "@#$!!**@#$", "name2": "@#$!!**@#$"}
errors = CustomErrorMessageForm(data).errors
self.assertHTMLEqual(
str(errors["name1"]),
'<ul class="errorlist" id="id_name1_error">'
"<li>Form custom error message.</li></ul>",
)
self.assertHTMLEqual(
str(errors["name2"]),
'<ul class="errorlist" id="id_name2_error">'
"<li>Model custom error message.</li></ul>",
)
def test_model_clean_error_messages(self):
data = {"name1": "FORBIDDEN_VALUE", "name2": "ABC"}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors["name1"]),
'<ul class="errorlist" id="id_name1_error">'
"<li>Model.clean() error messages.</li></ul>",
)
data = {"name1": "FORBIDDEN_VALUE2", "name2": "ABC"}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertHTMLEqual(
str(form.errors["name1"]),
'<ul class="errorlist" id="id_name1_error">'
"<li>Model.clean() error messages (simpler syntax).</li></ul>",
)
data = {"name1": "GLOBAL_ERROR", "name2": "ABC"}
form = CustomErrorMessageForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors["__all__"], ["Global error message."])
| ModelFormCustomErrorTests |
python | pypa__warehouse | warehouse/packaging/services.py | {
"start": 7564,
"end": 9315
} | class ____(GenericBlobStorage):
def get(self, path: str):
# Note: this is not actually used to serve files, instead our CDN is
# configured to connect directly to our storage bucket. See:
# https://github.com/python/pypi-infra/blob/master/terraform/file-hosting/vcl/main.vcl
try:
return self.bucket.Object(self._get_path(path)).get()["Body"]
except botocore.exceptions.ClientError as exc:
if exc.response["Error"]["Code"] != "NoSuchKey":
raise
raise FileNotFoundError(f"No such key: {path!r}") from None
def get_metadata(self, path: str):
try:
return self.bucket.Object(self._get_path(path)).metadata
except botocore.exceptions.ClientError as exc:
if exc.response["Error"]["Code"] != "NoSuchKey":
raise
raise FileNotFoundError(f"No such key: {path!r}") from None
def get_checksum(self, path: str):
try:
return (
self.bucket.Object(self._get_path(path)).e_tag.rstrip('"').lstrip('"')
)
except botocore.exceptions.ClientError as exc:
if exc.response["ResponseMetadata"]["HTTPStatusCode"] != 404:
# https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_RequestBody
raise
raise FileNotFoundError(f"No such key: {path!r}") from None
def store(self, path: str, file_path, *, meta=None):
extra_args = {}
if meta is not None:
extra_args["Metadata"] = meta
path = self._get_path(path)
self.bucket.upload_file(file_path, path, ExtraArgs=extra_args)
@implementer(IFileStorage)
| GenericS3BlobStorage |
python | walkccc__LeetCode | solutions/3217. Delete Nodes From Linked List Present in Array/3217.py | {
"start": 0,
"end": 347
} | class ____:
def modifiedList(
self,
nums: list[int],
head: ListNode | None,
) -> ListNode | None:
dummy = ListNode(0, head)
numsSet = set(nums)
curr = dummy
while curr.next:
if curr.next.val in numsSet:
curr.next = curr.next.next
else:
curr = curr.next
return dummy.next
| Solution |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 12371,
"end": 13352
} | class ____(FileStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file for the permanent hostname and executes hostname
command for the current hostname.
"""
FILE = '/etc/myname'
COMMAND = "hostname"
def __init__(self, module):
super(OpenBSDStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
| OpenBSDStrategy |
python | getsentry__sentry | tests/sentry/integrations/slack/webhooks/commands/test_link_user.py | {
"start": 3905,
"end": 4720
} | class ____(SlackCommandsTest):
"""Slash commands results are generated on Region Silo"""
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unlink_command(self, mock_record: MagicMock) -> None:
self.link_user()
data = self.send_slack_message("unlink")
assert "to unlink your identity" in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_unlink_command_already_unlinked(self, mock_record: MagicMock) -> None:
data = self.send_slack_message("unlink")
assert NOT_LINKED_MESSAGE in get_response_text(data)
assert_slo_metric(mock_record, EventLifecycleOutcome.SUCCESS)
| SlackCommandsUnlinkUserTest |
python | paramiko__paramiko | paramiko/kex_ecdh_nist.py | {
"start": 4782,
"end": 4898
} | class ____(KexNistp256):
name = "ecdh-sha2-nistp384"
hash_algo = sha384
curve = ec.SECP384R1()
| KexNistp384 |
python | google__jax | jax/experimental/array_serialization/serialization_test.py | {
"start": 25900,
"end": 26566
} | class ____(jtu.JaxTestCase):
@jtu.skip_on_devices('cpu')
def test_transfer_shard_to_host(self):
np_inp = np.arange(16).reshape((4, 4))
sharding = SingleDeviceSharding(jax.devices()[0], memory_kind='device')
arr = jax.device_put(np_inp, sharding)
shard = arr.addressable_shards[0]
np_out = asyncio.run(ts_impl._transfer_shard_to_host(shard))
self.assertArraysEqual(np_out, np_inp)
def _remove_from_serialization_registry(t: Any):
if t in node_serialization_registry:
serialized_name = node_serialization_registry[t][0]
del node_serialization_registry[t]
del node_deserialization_registry[serialized_name]
| TransferShardTest |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/scroll_visible.py | {
"start": 193,
"end": 359
} | class ____(Static):
def compose(self) -> ComposeResult:
yield Label(("|\n" * 100)[:-1])
yield Label("SHOULD BE VISIBLE", id="target")
| MyCustomWidget |
python | scrapy__scrapy | tests/test_utils_deprecate.py | {
"start": 357,
"end": 8782
} | class ____:
def _mywarnings(
self, w: list[WarningMessage], category: type[Warning] = MyWarning
) -> list[WarningMessage]:
return [x for x in w if x.category is MyWarning]
def test_no_warning_on_definition(self):
with warnings.catch_warnings(record=True) as w:
create_deprecated_class("Deprecated", NewName)
w = self._mywarnings(w)
assert w == []
def test_subclassing_warning_message(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
w = self._mywarnings(w)
assert len(w) == 1
assert (
str(w[0].message) == "tests.test_utils_deprecate.UserClass inherits from "
"deprecated class tests.test_utils_deprecate.Deprecated, "
"please inherit from tests.test_utils_deprecate.NewName."
" (warning only on first subclass, there may be others)"
)
assert w[0].lineno == inspect.getsourcelines(UserClass)[1]
def test_custom_class_paths(self):
Deprecated = create_deprecated_class(
"Deprecated",
NewName,
new_class_path="foo.NewClass",
old_class_path="bar.OldClass",
warn_category=MyWarning,
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
_ = Deprecated()
w = self._mywarnings(w)
assert len(w) == 2
assert "foo.NewClass" in str(w[0].message)
assert "bar.OldClass" in str(w[0].message)
assert "foo.NewClass" in str(w[1].message)
assert "bar.OldClass" in str(w[1].message)
def test_subclassing_warns_only_on_direct_children(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_once=False, warn_category=MyWarning
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class NoWarnOnMe(UserClass):
pass
w = self._mywarnings(w)
assert len(w) == 1
assert "UserClass" in str(w[0].message)
def test_subclassing_warns_once_by_default(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
with warnings.catch_warnings(record=True) as w:
class UserClass(Deprecated):
pass
class FooClass(Deprecated):
pass
class BarClass(Deprecated):
pass
w = self._mywarnings(w)
assert len(w) == 1
assert "UserClass" in str(w[0].message)
def test_warning_on_instance(self):
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
# ignore subclassing warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", MyWarning)
class UserClass(Deprecated):
pass
with warnings.catch_warnings(record=True) as w:
_, lineno = Deprecated(), inspect.getlineno(inspect.currentframe())
_ = UserClass() # subclass instances don't warn
w = self._mywarnings(w)
assert len(w) == 1
assert (
str(w[0].message) == "tests.test_utils_deprecate.Deprecated is deprecated, "
"instantiate tests.test_utils_deprecate.NewName instead."
)
assert w[0].lineno == lineno
def test_warning_auto_message(self):
with warnings.catch_warnings(record=True) as w:
Deprecated = create_deprecated_class("Deprecated", NewName)
class UserClass2(Deprecated):
pass
msg = str(w[0].message)
assert "tests.test_utils_deprecate.NewName" in msg
assert "tests.test_utils_deprecate.Deprecated" in msg
def test_issubclass(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class("DeprecatedName", NewName)
class UpdatedUserClass1(NewName):
pass
class UpdatedUserClass1a(NewName):
pass
class OutdatedUserClass1(DeprecatedName):
pass
class OutdatedUserClass1a(DeprecatedName):
pass
class UnrelatedClass:
pass
assert issubclass(UpdatedUserClass1, NewName)
assert issubclass(UpdatedUserClass1a, NewName)
assert issubclass(UpdatedUserClass1, DeprecatedName)
assert issubclass(UpdatedUserClass1a, DeprecatedName)
assert issubclass(OutdatedUserClass1, DeprecatedName)
assert not issubclass(UnrelatedClass, DeprecatedName)
assert not issubclass(OutdatedUserClass1, OutdatedUserClass1a)
assert not issubclass(OutdatedUserClass1a, OutdatedUserClass1)
with pytest.raises(TypeError):
issubclass(object(), DeprecatedName)
def test_isinstance(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
DeprecatedName = create_deprecated_class("DeprecatedName", NewName)
class UpdatedUserClass2(NewName):
pass
class UpdatedUserClass2a(NewName):
pass
class OutdatedUserClass2(DeprecatedName):
pass
class OutdatedUserClass2a(DeprecatedName):
pass
class UnrelatedClass:
pass
assert isinstance(UpdatedUserClass2(), NewName)
assert isinstance(UpdatedUserClass2a(), NewName)
assert isinstance(UpdatedUserClass2(), DeprecatedName)
assert isinstance(UpdatedUserClass2a(), DeprecatedName)
assert isinstance(OutdatedUserClass2(), DeprecatedName)
assert isinstance(OutdatedUserClass2a(), DeprecatedName)
assert not isinstance(OutdatedUserClass2a(), OutdatedUserClass2)
assert not isinstance(OutdatedUserClass2(), OutdatedUserClass2a)
assert not isinstance(UnrelatedClass(), DeprecatedName)
def test_clsdict(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ScrapyDeprecationWarning)
Deprecated = create_deprecated_class("Deprecated", NewName, {"foo": "bar"})
assert Deprecated.foo == "bar"
def test_deprecate_a_class_with_custom_metaclass(self):
Meta1 = type("Meta1", (type,), {})
New = Meta1("New", (), {})
create_deprecated_class("Deprecated", New)
def test_deprecate_subclass_of_deprecated_class(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
Deprecated = create_deprecated_class(
"Deprecated", NewName, warn_category=MyWarning
)
AlsoDeprecated = create_deprecated_class(
"AlsoDeprecated",
Deprecated,
new_class_path="foo.Bar",
warn_category=MyWarning,
)
w = self._mywarnings(w)
assert len(w) == 0, [str(warning) for warning in w]
with warnings.catch_warnings(record=True) as w:
AlsoDeprecated()
class UserClass(AlsoDeprecated):
pass
w = self._mywarnings(w)
assert len(w) == 2
assert "AlsoDeprecated" in str(w[0].message)
assert "foo.Bar" in str(w[0].message)
assert "AlsoDeprecated" in str(w[1].message)
assert "foo.Bar" in str(w[1].message)
def test_inspect_stack(self):
with (
mock.patch("inspect.stack", side_effect=IndexError),
warnings.catch_warnings(record=True) as w,
):
DeprecatedName = create_deprecated_class("DeprecatedName", NewName)
class SubClass(DeprecatedName):
pass
assert "Error detecting parent module" in str(w[0].message)
@mock.patch(
"scrapy.utils.deprecate.DEPRECATION_RULES",
[
("scrapy.contrib.pipeline.", "scrapy.pipelines."),
("scrapy.contrib.", "scrapy.extensions."),
],
)
| TestWarnWhenSubclassed |
python | pandas-dev__pandas | pandas/tests/indexes/test_indexing.py | {
"start": 8478,
"end": 9104
} | class ____:
def test_convert_almost_null_slice(self, index):
# slice with None at both ends, but not step
key = slice(None, None, "foo")
if isinstance(index, IntervalIndex):
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
index._convert_slice_indexer(key, "loc")
else:
msg = "'>=' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
index._convert_slice_indexer(key, "loc")
| TestConvertSliceIndexer |
python | walkccc__LeetCode | solutions/2612. Minimum Reverse Operations/2612.py | {
"start": 42,
"end": 881
} | class ____:
def minReverseOperations(
self,
n: int,
p: int,
banned: list[int],
k: int,
) -> list[int]:
bannedSet = set(banned)
ans = [-1] * n
# unseen[i] := the unseen numbers that % 2 == i
unseen = [SortedList(), SortedList()]
for num in range(n):
if num != p and num not in bannedSet:
unseen[num % 2].add(num)
# Perform BFS from `p`.
q = collections.deque([p])
ans[p] = 0
while q:
u = q.popleft()
lo = max(u - k + 1, k - 1 - u)
hi = min(u + k - 1, n - 1 - (u - (n - k)))
# Choose the correct set of numbers.
nums = unseen[lo % 2]
i = nums.bisect_left(lo)
while i < len(nums) and nums[i] <= hi:
num = nums[i]
ans[num] = ans[u] + 1
q.append(num)
nums.pop(i)
return ans
| Solution |
python | catalyst-team__catalyst | catalyst/callbacks/scheduler.py | {
"start": 480,
"end": 6318
} | class ____(ISchedulerCallback):
"""Scheduler callback, abstraction over scheduler step.
Args:
scheduler_key: scheduler name, if ``None``,
default is ``None``.
mode: scheduler mode, should be one of
``"epoch"`` or ``"batch"``, default is ``None``.
If ``None`` and object is instance of ``BatchScheduler``
or ``OneCycleLRWithWarmup`` then will be used ``"batch"``
otherwise - ``"epoch"``.
loader_key: loader name to look after for ReduceLROnPlateau scheduler
metric_key: metric name to forward to scheduler
object, if ``None`` then will be used main metric
specified in experiment.
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
scheduler_key: str = None,
mode: str = None,
loader_key: str = None,
metric_key: str = None,
):
"""Init."""
super().__init__()
if loader_key is not None or metric_key is not None:
assert loader_key is not None and metric_key is not None, (
"For metric reduction `SchedulerCallback` "
"requires both `loader_key` and `metric_key` specified."
)
self._use_metric_reduction = True
else:
self._use_metric_reduction = False
assert mode in ("batch", "epoch", None)
self.scheduler_key = scheduler_key
self.mode = mode
self.loader_key = loader_key
self.metric_key = metric_key
self.scheduler = None
@staticmethod
def _scheduler_step(scheduler, reduced_metric=None):
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(reduced_metric)
else:
scheduler.step()
lr_list = [param_group["lr"] for param_group in scheduler.optimizer.param_groups]
momentum_list = get_optimizer_momentum_list(scheduler.optimizer)
return lr_list, momentum_list
def _update_lr_and_momentum_in_metrics_dict(
self,
metrics_dict: dict,
lr_list: List[float],
momentum_list: List[Union[float, None]],
):
"""Update learning rate and momentum in metrics_dict
(consider only 0-th param group)
Args:
metrics_dict: batch_metrics or epoch_metrics
lr_list: lr for each param group
momentum_list: momentum for each param group
"""
lr = lr_list[0]
momentum = momentum_list[0]
lr_key = f"lr/{self.scheduler_key}" if self.scheduler_key is not None else "lr"
metrics_dict[lr_key] = lr
if momentum is not None:
momentum_key = (
f"momentum/{self.scheduler_key}"
if self.scheduler_key is not None
else "momentum"
)
metrics_dict[momentum_key] = momentum
def make_batch_step(self, runner: "IRunner") -> None:
"""Perform scheduler step and update batch metrics
Args:
runner: current runner
"""
lr_list, momentum_list = self._scheduler_step(scheduler=self.scheduler)
self._update_lr_and_momentum_in_metrics_dict(
runner.batch_metrics, lr_list, momentum_list
)
def make_epoch_step(self, runner: "IRunner") -> None:
"""Perform scheduler step and update epoch metrics
Args:
runner: current runner
"""
if self._use_metric_reduction:
reduced_metric = runner.epoch_metrics[self.loader_key][self.metric_key]
else:
reduced_metric = None
lr_list, momentum_list = self._scheduler_step(
scheduler=self.scheduler, reduced_metric=reduced_metric
)
# @TODO: remove trick to save pure epoch-based metrics, like lr/momentum
self._update_lr_and_momentum_in_metrics_dict(
runner.epoch_metrics["_epoch_"], lr_list, momentum_list
)
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler."""
self.scheduler = get_attr(runner, key="scheduler", inner_key=self.scheduler_key)
assert self.scheduler is not None
if isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
assert self.loader_key is not None and self.metric_key is not None, (
"For `ReduceLROnPlateau` scheduler `SchedulerCallback` "
"required both `loader_key` and `metric_key` specified"
)
if self.mode is None:
if isinstance(self.scheduler, BatchScheduler):
self.mode = "batch"
else:
self.mode = "epoch"
if isinstance(self.scheduler, OneCycleLRWithWarmup) and self.mode == "batch":
self.scheduler.reset()
assert self.mode is not None
def on_loader_start(self, runner: "IRunner") -> None:
"""Event handler."""
if (
runner.is_train_loader
and isinstance(self.scheduler, OneCycleLRWithWarmup)
and self.mode == "batch"
):
self.scheduler.recalculate(
loader_batch_len=runner.loader_batch_len,
current_batch_step=runner.batch_step,
)
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader and self.mode == "batch":
self.make_batch_step(runner=runner)
def on_epoch_end(self, runner: "IRunner") -> None:
"""Event handler."""
if self.mode == "epoch":
self.make_epoch_step(runner=runner)
| SchedulerCallback |
python | redis__redis-py | redis/commands/policies.py | {
"start": 10668,
"end": 11267
} | class ____(AsyncBasePolicyResolver):
"""
Async version of StaticPolicyResolver.
"""
def __init__(self, fallback: Optional[AsyncPolicyResolver] = None) -> None:
"""
Parameters:
fallback (Optional[AsyncPolicyResolver]): An optional fallback policy resolver
used for resolving policies if static policies are inadequate.
"""
super().__init__(STATIC_POLICIES, fallback)
def with_fallback(self, fallback: "AsyncPolicyResolver") -> "AsyncPolicyResolver":
return AsyncStaticPolicyResolver(fallback)
| AsyncStaticPolicyResolver |
python | dagster-io__dagster | python_modules/libraries/dagster-snowflake/dagster_snowflake_tests/test_snowflake_io_manager.py | {
"start": 459,
"end": 754
} | class ____(DbTypeHandler[int]):
def handle_output(self, *args, **kwargs):
return None
def load_input(self, *args, **kwargs): # pyright: ignore[reportIncompatibleMethodOverride]
return None
@property
def supported_types(self):
return [int]
| PassTypeHandler |
python | python-pillow__Pillow | src/PIL/BlpImagePlugin.py | {
"start": 1416,
"end": 1454
} | class ____(IntEnum):
JPEG = 0
| Format |
python | walkccc__LeetCode | solutions/318. Maximum Product of Word Lengths/318.py | {
"start": 0,
"end": 421
} | class ____:
def maxProduct(self, words: list[str]) -> int:
ans = 0
def getMask(word: str) -> int:
mask = 0
for c in word:
mask |= 1 << ord(c) - ord('a')
return mask
masks = [getMask(word) for word in words]
for i in range(len(words)):
for j in range(i):
if not (masks[i] & masks[j]):
ans = max(ans, len(words[i]) * len(words[j]))
return ans
| Solution |
python | pandas-dev__pandas | pandas/tests/frame/test_constructors.py | {
"start": 102338,
"end": 105464
} | class ____:
def test_frame_from_dict_of_series_overlapping_monthly_period_indexes(self):
rng1 = pd.period_range("1/1/1999", "1/1/2012", freq="M")
s1 = Series(np.random.default_rng(2).standard_normal(len(rng1)), rng1)
rng2 = pd.period_range("1/1/1980", "12/1/2001", freq="M")
s2 = Series(np.random.default_rng(2).standard_normal(len(rng2)), rng2)
df = DataFrame({"s1": s1, "s2": s2})
exp = pd.period_range("1/1/1980", "1/1/2012", freq="M")
tm.assert_index_equal(df.index, exp)
def test_frame_from_dict_with_mixed_tzaware_indexes(self):
# GH#44091
dti = date_range("2016-01-01", periods=3)
ser1 = Series(range(3), index=dti)
ser2 = Series(range(3), index=dti.tz_localize("UTC"))
ser3 = Series(range(3), index=dti.tz_localize("US/Central"))
ser4 = Series(range(3))
# no tz-naive, but we do have mixed tzs and a non-DTI
df1 = DataFrame({"A": ser2, "B": ser3, "C": ser4})
exp_index = Index(
list(ser2.index) + list(ser3.index) + list(ser4.index), dtype=object
)
tm.assert_index_equal(df1.index, exp_index)
df2 = DataFrame({"A": ser2, "C": ser4, "B": ser3})
exp_index3 = Index(
list(ser2.index) + list(ser4.index) + list(ser3.index), dtype=object
)
tm.assert_index_equal(df2.index, exp_index3)
df3 = DataFrame({"B": ser3, "A": ser2, "C": ser4})
exp_index3 = Index(
list(ser3.index) + list(ser2.index) + list(ser4.index), dtype=object
)
tm.assert_index_equal(df3.index, exp_index3)
df4 = DataFrame({"C": ser4, "B": ser3, "A": ser2})
exp_index4 = Index(
list(ser4.index) + list(ser3.index) + list(ser2.index), dtype=object
)
tm.assert_index_equal(df4.index, exp_index4)
# TODO: not clear if these raising is desired (no extant tests),
# but this is de facto behavior 2021-12-22
msg = "Cannot join tz-naive with tz-aware DatetimeIndex"
with pytest.raises(TypeError, match=msg):
DataFrame({"A": ser2, "B": ser3, "C": ser4, "D": ser1})
with pytest.raises(TypeError, match=msg):
DataFrame({"A": ser2, "B": ser3, "D": ser1})
with pytest.raises(TypeError, match=msg):
DataFrame({"D": ser1, "A": ser2, "B": ser3})
@pytest.mark.parametrize(
"key_val, col_vals, col_type",
[
["3", ["3", "4"], "utf8"],
[3, [3, 4], "int8"],
],
)
def test_dict_data_arrow_column_expansion(self, key_val, col_vals, col_type):
# GH 53617
pa = pytest.importorskip("pyarrow")
cols = pd.arrays.ArrowExtensionArray(
pa.array(col_vals, type=pa.dictionary(pa.int8(), getattr(pa, col_type)()))
)
result = DataFrame({key_val: [1, 2]}, columns=cols)
expected = DataFrame([[1, np.nan], [2, np.nan]], columns=cols)
expected.isetitem(1, expected.iloc[:, 1].astype(object))
tm.assert_frame_equal(result, expected)
| TestDataFrameConstructorIndexInference |
python | pytorch__pytorch | test/inductor/test_perf.py | {
"start": 39544,
"end": 40932
} | class ____:
def test_horizontal(self):
def f(a):
b = a.sum(dim=0)
c = a.cos()
return b, c
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """210""")
# TODO: We aren't fusing outer dim softmaxes
def test_softmax_outer(self):
def f(a):
return torch.softmax(a, dim=0)
inp = (T(10, 10),)
self.assertExpectedInline(count_numel(f, *inp), """200""")
# TODO: The greedy fusion strategy results in suboptimal grouping
@patch.object(config, "realize_opcount_threshold", 0)
def test_fusion_choice4(self):
def f(a, b, b2):
c = a + b
d = torch.mm(c, c)
e = c + b + b2
f = d + e + b2
return f, e
inp = (T(10, 10), T(10, 10, dtype=torch.float16), T(10, 10))
self.assertExpectedInline(count_numel(f, *inp), """1000""")
# TODO: We materialize the intermediate if we don't unroll the reduction
def test_neighbor(self):
def f(a, b):
return ((a - b) ** 2).sum(dim=-1).amax(dim=1)
inp = (T(10, 1, 8), T(1, 10, 8))
self.assertExpectedInline(count_numel(f, *inp), """170""")
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU_AND_TRITON:
run_tests(needs="filelock")
| WouldBeNiceIfItWorked |
python | doocs__leetcode | lcof/面试题39. 数组中出现次数超过一半的数字/Solution.py | {
"start": 0,
"end": 247
} | class ____:
def majorityElement(self, nums: List[int]) -> int:
cnt = m = 0
for v in nums:
if cnt == 0:
m, cnt = v, 1
else:
cnt += 1 if m == v else -1
return m
| Solution |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 14031,
"end": 14686
} | class ____(SimpleFormatNode):
"""
Choose a string if a regex group was found.
This node represents the expression ${group :+ value_if_exists}, where
value_if_exists is evaluated if $group is present on the regex match.
"""
KIND = FormatKind.IF
def __init__(self, group_number, positive_match):
SimpleFormatNode.__init__(self, group_number)
self.positive_match = positive_match
def transform_regex(self, regex_result):
result = ''
if regex_result.group(self.group_number) is not None:
result = self.positive_match.transform_regex(regex_result)
return result
| IfFormatNode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.