language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 3681,
"end": 3890
} | class ____(models.Model):
parent = models.ForeignKey(Issue3674ParentModel, related_name='children', on_delete=models.CASCADE)
value = models.CharField(primary_key=True, max_length=64)
| Issue3674ChildModel |
python | SmileyChris__easy-thumbnails | easy_thumbnails/apps.py | {
"start": 36,
"end": 159
} | class ____(AppConfig):
name = 'easy_thumbnails'
default_auto_field = 'django.db.models.AutoField'
| EasyThumbnailsConfig |
python | networkx__networkx | networkx/algorithms/tests/test_chordal.py | {
"start": 39,
"end": 4438
} | class ____:
@classmethod
def setup_class(cls):
# simple graph
connected_chordal_G = nx.Graph()
connected_chordal_G.add_edges_from(
[
(1, 2),
(1, 3),
(2, 3),
(2, 4),
(3, 4),
(3, 5),
(3, 6),
(4, 5),
(4, 6),
(5, 6),
]
)
cls.connected_chordal_G = connected_chordal_G
chordal_G = nx.Graph()
chordal_G.add_edges_from(
[
(1, 2),
(1, 3),
(2, 3),
(2, 4),
(3, 4),
(3, 5),
(3, 6),
(4, 5),
(4, 6),
(5, 6),
(7, 8),
]
)
chordal_G.add_node(9)
cls.chordal_G = chordal_G
non_chordal_G = nx.Graph()
non_chordal_G.add_edges_from([(1, 2), (1, 3), (2, 4), (2, 5), (3, 4), (3, 5)])
cls.non_chordal_G = non_chordal_G
self_loop_G = nx.Graph()
self_loop_G.add_edges_from([(1, 1)])
cls.self_loop_G = self_loop_G
@pytest.mark.parametrize("G", (nx.DiGraph(), nx.MultiGraph(), nx.MultiDiGraph()))
def test_is_chordal_not_implemented(self, G):
with pytest.raises(nx.NetworkXNotImplemented):
nx.is_chordal(G)
def test_is_chordal(self):
assert not nx.is_chordal(self.non_chordal_G)
assert nx.is_chordal(self.chordal_G)
assert nx.is_chordal(self.connected_chordal_G)
assert nx.is_chordal(nx.Graph())
assert nx.is_chordal(nx.complete_graph(3))
assert nx.is_chordal(nx.cycle_graph(3))
assert not nx.is_chordal(nx.cycle_graph(5))
assert nx.is_chordal(self.self_loop_G)
def test_induced_nodes(self):
G = nx.generators.classic.path_graph(10)
Induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)
assert Induced_nodes == {1, 2, 3, 4, 5, 6, 7, 8, 9}
pytest.raises(
nx.NetworkXTreewidthBoundExceeded, nx.find_induced_nodes, G, 1, 9, 1
)
Induced_nodes = nx.find_induced_nodes(self.chordal_G, 1, 6)
assert Induced_nodes == {1, 2, 4, 6}
pytest.raises(nx.NetworkXError, nx.find_induced_nodes, self.non_chordal_G, 1, 5)
def test_graph_treewidth(self):
with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"):
nx.chordal_graph_treewidth(self.non_chordal_G)
def test_chordal_find_cliques(self):
cliques = {
frozenset([9]),
frozenset([7, 8]),
frozenset([1, 2, 3]),
frozenset([2, 3, 4]),
frozenset([3, 4, 5, 6]),
}
assert set(nx.chordal_graph_cliques(self.chordal_G)) == cliques
with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"):
set(nx.chordal_graph_cliques(self.non_chordal_G))
with pytest.raises(nx.NetworkXError, match="Input graph is not chordal"):
set(nx.chordal_graph_cliques(self.self_loop_G))
def test_chordal_find_cliques_path(self):
G = nx.path_graph(10)
cliqueset = nx.chordal_graph_cliques(G)
for u, v in G.edges():
assert frozenset([u, v]) in cliqueset or frozenset([v, u]) in cliqueset
def test_chordal_find_cliquesCC(self):
cliques = {frozenset([1, 2, 3]), frozenset([2, 3, 4]), frozenset([3, 4, 5, 6])}
cgc = nx.chordal_graph_cliques
assert set(cgc(self.connected_chordal_G)) == cliques
def test_complete_to_chordal_graph(self):
fgrg = nx.fast_gnp_random_graph
test_graphs = [
nx.barbell_graph(6, 2),
nx.cycle_graph(15),
nx.wheel_graph(20),
nx.grid_graph([10, 4]),
nx.ladder_graph(15),
nx.star_graph(5),
nx.bull_graph(),
fgrg(20, 0.3, seed=1),
]
for G in test_graphs:
H, a = nx.complete_to_chordal_graph(G)
assert nx.is_chordal(H)
assert len(a) == H.number_of_nodes()
if nx.is_chordal(G):
assert G.number_of_edges() == H.number_of_edges()
assert set(a.values()) == {0}
else:
assert len(set(a.values())) == H.number_of_nodes()
| TestMCS |
python | joke2k__faker | faker/providers/automotive/en_GB/__init__.py | {
"start": 48,
"end": 322
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``en_GB`` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_the_United_Kingdom
"""
license_formats = (
"??## ???",
"??##???",
)
| Provider |
python | django__django | tests/delete_regress/models.py | {
"start": 2893,
"end": 2978
} | class ____(models.Model):
my_file = models.ForeignKey(File, models.CASCADE)
| FooFile |
python | Textualize__textual | tests/option_list/test_option_prompt_replacement.py | {
"start": 223,
"end": 3245
} | class ____(App[None]):
"""Test option list application."""
def compose(self) -> ComposeResult:
yield OptionList(
Option("0", id="0"),
Option("line1\nline2"),
)
async def test_replace_option_prompt_with_invalid_id() -> None:
"""Attempting to replace the prompt of an option ID that doesn't exist should raise an exception."""
async with OptionListApp().run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).replace_option_prompt("does-not-exist", "new-prompt")
async def test_replace_option_prompt_with_invalid_index() -> None:
"""Attempting to replace the prompt of an option index that doesn't exist should raise an exception."""
async with OptionListApp().run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).replace_option_prompt_at_index(23, "new-prompt")
async def test_replace_option_prompt_with_valid_id() -> None:
"""It should be possible to replace the prompt of an option ID that does exist."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt("0", "new-prompt")
assert option_list.get_option("0").prompt == "new-prompt"
async def test_replace_option_prompt_with_valid_index() -> None:
"""It should be possible to replace the prompt of an option index that does exist."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList).replace_option_prompt_at_index(1, "new-prompt")
assert option_list.get_option_at_index(1).prompt == "new-prompt"
async def test_replace_single_line_option_prompt_with_multiple() -> None:
"""It should be possible to replace single line prompt with multiple lines """
new_prompt = "new-prompt\nsecond line"
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt("0", new_prompt)
assert option_list.get_option("0").prompt == new_prompt
async def test_replace_multiple_line_option_prompt_with_single() -> None:
"""It should be possible to replace multiple line prompt with a single line"""
new_prompt = "new-prompt"
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt("0", new_prompt)
assert option_list.get_option("0").prompt == new_prompt
async def test_replace_multiple_line_option_prompt_with_multiple() -> None:
"""It should be possible to replace multiple line prompt with multiple lines"""
new_prompt = "new-prompt\nsecond line"
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt_at_index(1, new_prompt)
assert option_list.get_option_at_index(1).prompt == new_prompt
| OptionListApp |
python | joke2k__faker | tests/providers/test_internet.py | {
"start": 34547,
"end": 34856
} | class ____:
"""Test th_TH internet provider methods"""
def test_tld(self, faker):
tld = faker.tld()
assert tld in ThThInternetProvider.tlds
def test_slug(self, faker):
num_of_samples = 100
for _ in range(num_of_samples):
assert faker.slug() != ""
| TestThTh |
python | scikit-learn__scikit-learn | sklearn/linear_model/_ridge.py | {
"start": 99442,
"end": 107366
} | class ____(_RidgeClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation. Currently,
only the n_features > n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : array-like of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
If using Leave-One-Out cross-validation, alphas must be strictly positive.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
scoring : str, callable, default=None
The scoring method to use for cross-validation. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: negative :ref:`mean squared error <mean_squared_error>` if cv is
None (i.e. when using leave-one-out cross-validation), or
:ref:`accuracy <accuracy_score>` otherwise.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
store_cv_results : bool, default=False
Flag indicating if the cross-validation results corresponding to
each alpha should be stored in the ``cv_results_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
.. versionchanged:: 1.5
Parameter name changed from `store_cv_values` to `store_cv_results`.
Attributes
----------
cv_results_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
Cross-validation results for each alpha (only if ``store_cv_results=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors if `scoring is None` otherwise it
will contain standardized per point prediction values.
.. versionchanged:: 1.5
`cv_values_` changed to `cv_results_`.
coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
best_score_ : float
Score of base estimator with best alpha.
.. versionadded:: 0.23
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.9630...
"""
_parameter_constraints: dict = {
**_BaseRidgeCV._parameter_constraints,
"class_weight": [dict, StrOptions({"balanced"}), None],
}
for param in ("gcv_mode", "alpha_per_target"):
_parameter_constraints.pop(param)
def __init__(
self,
alphas=(0.1, 1.0, 10.0),
*,
fit_intercept=True,
scoring=None,
cv=None,
class_weight=None,
store_cv_results=False,
):
super().__init__(
alphas=alphas,
fit_intercept=fit_intercept,
scoring=scoring,
cv=cv,
store_cv_results=store_cv_results,
)
self.class_weight = class_weight
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None, **params):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
**params : dict, default=None
Parameters to be passed to the underlying scorer.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
# `RidgeClassifier` does not accept "sag" or "saga" solver and thus support
# csr, csc, and coo sparse matrices. By using solver="eigen" we force to accept
# all sparse format.
X, y, sample_weight, Y = self._prepare_data(X, y, sample_weight, solver="eigen")
# If cv is None, gcv mode will be used and we used the binarized Y
# since y will not be binarized in _RidgeGCV estimator.
# If cv is not None, a GridSearchCV with some RidgeClassifier
# estimators are used where y will be binarized. Thus, we pass y
# instead of the binarized Y.
target = Y if self.cv is None else y
super().fit(X, target, sample_weight=sample_weight, **params)
return self
| RidgeClassifierCV |
python | skorch-dev__skorch | skorch/tests/test_doctor.py | {
"start": 148,
"end": 22074
} | class ____: # pylint: disable=too-many-public-methods
"""Test functionality of SkorchDoctor using a simple model"""
@pytest.fixture(scope='module')
def module_cls(self):
"""Return a simple module class with predictable parameters"""
class MyModule(nn.Module):
"""Module with predictable parameters"""
def __init__(self):
super().__init__()
self.lin0 = nn.Linear(20, 20)
nn.init.eye_(self.lin0.weight)
nn.init.zeros_(self.lin0.bias)
self.lin1 = nn.Linear(20, 2)
nn.init.zeros_(self.lin1.weight)
nn.init.ones_(self.lin1.bias)
self.softmax = nn.Softmax(dim=-1)
def forward(self, X):
X = self.lin0(X)
X = self.lin1(X)
return self.softmax(X)
return MyModule
@pytest.fixture(scope='module')
def custom_split(self):
"""Split train/valid deterministically so that we know all training
samples"""
class Split():
"""Deterministically split train/valid into 80%/20%"""
def __call__(self, dataset, y=None, groups=None):
n = int(len(dataset) * 0.8)
dataset_train = torch.utils.data.Subset(dataset, np.arange(n))
dataset_valid = torch.utils.data.Subset(
dataset, np.arange(n, len(dataset)))
return dataset_train, dataset_valid
return Split()
@pytest.fixture(scope='module')
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture(scope='module')
def doctor_cls(self):
from skorch.helper import SkorchDoctor
return SkorchDoctor
@pytest.fixture(scope='module')
def data(self, classifier_data):
X, y = classifier_data
# a small amount of data is enough
return X[:50], y[:50]
@pytest.fixture(scope='module')
def doctor(self, module_cls, net_cls, doctor_cls, data, custom_split):
net = net_cls(module_cls, max_epochs=3, batch_size=32, train_split=custom_split)
doctor = doctor_cls(net)
doctor.fit(*data)
return doctor
def test_activation_recs_general_content(self, doctor):
recs = doctor.activation_recs_
assert set(recs.keys()) == {'module', 'criterion'}
# nothing recorded for criterion
assert recs['criterion'] == []
recs_module = recs['module']
# 3 epochs, 2 batches per epoch
assert len(recs_module) == 6
# each batch has layers lin0, lin1, softmax
for batch in recs_module:
assert set(batch.keys()) == {'lin0', 'lin1', 'softmax'}
def test_activation_recs_values(self, doctor, data):
recs_module = doctor.activation_recs_['module']
for key in ('lin0', 'lin1', 'softmax'):
# 80% of 50 samples is 40, batch size 32 => 32 + 8 samples per batch
batch_sizes = [len(batch[key]) for batch in recs_module]
assert batch_sizes == [32, 8, 32, 8, 32, 8]
X, _ = data
# for the very first batch, before any update, we actually know the values
batch0 = recs_module[0]
lin0_0 = batch0['lin0']
# since it is the identity function, batches should equal the data
np.testing.assert_array_almost_equal(lin0_0, X[:32])
lin1_0 = batch0['lin1']
# since weights are 0 and bias is 1, all values should be 1
np.testing.assert_array_almost_equal(lin1_0, 1.0)
softmax_0 = batch0['softmax']
# since all inputs are equal, probabilities should be uniform
np.testing.assert_array_almost_equal(softmax_0, 0.5)
def test_activation_recs_not_all_identical(self, doctor):
# make sure that values are not just all identical, using a large
# tolerance to exclude small deviations
recs = doctor.activation_recs_['module']
recs_lin0 = [rec['lin0'] for rec in recs]
for act0, act1 in itertools.combinations(recs_lin0, r=2):
if act0.shape == act1.shape:
assert not np.allclose(act0, act1, rtol=1e-3)
recs_lin1 = [rec['lin1'] for rec in recs]
for act0, act1 in itertools.combinations(recs_lin1, r=2):
if act0.shape == act1.shape:
assert not np.allclose(act0, act1, rtol=1e-3)
softmax = [rec['softmax'] for rec in recs]
for act0, act1 in itertools.combinations(softmax, r=2):
if act0.shape == act1.shape:
assert not np.allclose(act0, act1, rtol=1e-3)
def test_gradient_recs_general_content(self, doctor):
recs = doctor.gradient_recs_
assert set(recs.keys()) == {'module', 'criterion'}
# nothing recorded for criterion, 3 epochs
assert recs['criterion'] == []
recs_module = recs['module']
# 3 epochs, 2 batches per epoch
assert len(recs_module) == 6
# each batch has weights and biases for lin0 & lin1
expected = {'lin0.weight', 'lin0.bias', 'lin1.weight', 'lin1.bias'}
for batch in recs_module:
assert set(batch.keys()) == expected
def test_gradient_recs_values(self, doctor):
recs_module = doctor.gradient_recs_['module']
expected_shapes = {
'lin0.weight': (20, 20),
'lin0.bias': (20,),
'lin1.weight': (2, 20),
'lin1.bias': (2,),
}
for key in ('lin0.weight', 'lin0.bias', 'lin1.weight', 'lin1.bias'):
grad_shapes = [batch[key].shape for batch in recs_module]
expected_shape = expected_shapes[key]
# 2 batches, 3 epochs
assert grad_shapes == [expected_shape] * 6
# There is not really much we know about the gradient values, we just
# rely on the gradient hooks doing what they're supposed to do. The only
# gradients we actually know are for the first layer in the first batch:
# They have to be zero because in the second layer, we have weights of 0.
batch0 = recs_module[0]
grad_weight = batch0['lin0.weight']
grad_bias = batch0['lin0.bias']
assert np.allclose(grad_weight, 0.0)
assert np.allclose(grad_bias, 0.0)
def test_gradient_recs_not_all_identical(self, doctor):
# make sure that values are not just all identical, using a large
# tolerance to exclude small deviations
recs = doctor.gradient_recs_['module']
recs_lin0_weight = [rec['lin0.weight'] for rec in recs]
for grad0, grad1 in itertools.combinations(recs_lin0_weight, r=2):
assert not np.allclose(grad0, grad1, rtol=1e-3)
recs_lin0_bias = [rec['lin0.bias'] for rec in recs]
for grad0, grad1 in itertools.combinations(recs_lin0_bias, r=2):
assert not np.allclose(grad0, grad1, rtol=1e-3)
recs_lin1_weight = [rec['lin1.weight'] for rec in recs]
for grad0, grad1 in itertools.combinations(recs_lin1_weight, r=2):
assert not np.allclose(grad0, grad1, rtol=1e-3)
recs_lin1_bias = [rec['lin1.bias'] for rec in recs]
for grad0, grad1 in itertools.combinations(recs_lin1_bias, r=2):
assert not np.allclose(grad0, grad1, rtol=1e-3)
def test_param_update_recs_general_content(self, doctor):
recs = doctor.param_update_recs_
assert set(recs.keys()) == {'module', 'criterion'}
# nothing recorded for criterion
assert recs['criterion'] == []
recs_module = recs['module']
# 3 epochs, 2 batches per epoch
assert len(recs_module) == 6
# each batch has weights and biases for lin0 & lin1
expected = {'lin0.weight', 'lin0.bias', 'lin1.weight', 'lin1.bias'}
for batch in recs_module:
assert set(batch.keys()) == expected
def test_param_update_recs_values(self, doctor):
recs= doctor.param_update_recs_['module']
assert all(np.isscalar(val) for d in recs for val in d.values())
# for the very first batch, before any update, we actually know that the
# updates must be 0 because the gradients are 0.
batch0 = recs[0]
assert np.isclose(batch0['lin0.weight'], 0)
assert np.isclose(batch0['lin0.bias'], 0)
def test_param_update_recs_not_all_identical(self, doctor):
# make sure that values are not just all identical, using a large
# tolerance to exclude small deviations
recs = doctor.param_update_recs_['module']
recs_lin0_weight = [rec['lin0.weight'] for rec in recs]
for upd0, upd1 in itertools.combinations(recs_lin0_weight, r=2):
assert not np.isclose(upd0, upd1, rtol=1e-3)
recs_lin0_bias = [rec['lin0.bias'] for rec in recs]
for upd0, upd1 in itertools.combinations(recs_lin0_bias, r=2):
assert not np.isclose(upd0, upd1, rtol=1e-3)
recs_lin1_weight = [rec['lin1.weight'] for rec in recs]
for upd0, upd1 in itertools.combinations(recs_lin1_weight, r=2):
assert not np.isclose(upd0, upd1, rtol=1e-3)
recs_lin1_bias = [rec['lin1.bias'] for rec in recs]
for upd0, upd1 in itertools.combinations(recs_lin1_bias, r=2):
assert not np.isclose(upd0, upd1, rtol=1e-3)
def test_hooks_cleaned_up_after_fit(self, doctor, data):
# make sure that the hooks are cleaned up by checking that no more recs
# are written when continuing to fit the net
num_activation_recs_before = len(doctor.activation_recs_['module'])
num_gradient_recs_before = len(doctor.gradient_recs_['module'])
net = doctor.net
net.partial_fit(*data)
num_activation_recs_after = len(doctor.activation_recs_['module'])
num_gradient_recs_after = len(doctor.gradient_recs_['module'])
assert num_activation_recs_before == num_activation_recs_after
assert num_gradient_recs_before == num_gradient_recs_after
def test_callbacks_cleaned_up_after_fit_with_initial_callbacks(
self, doctor_cls, net_cls, module_cls, data
):
# make sure that the callbacks are the same before and after, this is
# important because SkorchDoctor will temporarily add a callback
from skorch.callbacks import EpochScoring, GradientNormClipping
net = net_cls(
module_cls,
callbacks=[EpochScoring('f1'), GradientNormClipping(1.0)],
).initialize()
callbacks_without_doctor = net.callbacks_[:]
doctor = doctor_cls(net).fit(*data)
callbacks_with_doctor = doctor.net.callbacks_
assert len(callbacks_without_doctor) == len(callbacks_with_doctor)
for (name0, cb0), (name1, cb1) in zip(
callbacks_without_doctor, callbacks_with_doctor
):
assert name0 == name1
# pylint: disable=unidiomatic-typecheck
assert type(cb0) == type(cb1)
def test_get_layer_names(self, doctor):
layer_names = doctor.get_layer_names()
expected = {
'criterion': [],
'module': ['lin0', 'lin1', 'softmax']
}
assert layer_names == expected
def test_get_parameter_names(self, doctor):
param_names = doctor.get_param_names()
expected = {
'criterion': [],
'module': ['lin0.weight', 'lin0.bias', 'lin1.weight', 'lin1.bias'],
}
assert param_names == expected
def test_predict(self, doctor, data):
X, _ = data
y_pred_doctor = doctor.predict(X)
y_pred_net = doctor.net.predict(X)
np.testing.assert_allclose(y_pred_doctor, y_pred_net)
def test_predict_proba(self, doctor, data):
X, _ = data
y_proba_doctor = doctor.predict_proba(X)
y_proba_net = doctor.net.predict_proba(X)
np.testing.assert_allclose(y_proba_doctor, y_proba_net)
def test_score(self, doctor, data):
X, y = data
score_doctor = doctor.score(X, y)
score_net = doctor.net.score(X, y)
np.testing.assert_allclose(score_doctor, score_net)
def test_recs_with_filter(self, module_cls, net_cls, doctor_cls, data, custom_split):
# when initializing SkorchDoctor with a match_fn, only records whose
# keys match should be kept
def match_fn(name):
return "lin0" in name
net = net_cls(module_cls, max_epochs=3, batch_size=32, train_split=custom_split)
doctor = doctor_cls(net, match_fn=match_fn)
doctor.fit(*data)
# check trivial case of empty lists
assert doctor.activation_recs_['module']
assert doctor.gradient_recs_['module']
assert doctor.param_update_recs_['module']
for rec in doctor.activation_recs_['module']:
for key in rec.keys():
assert match_fn(key)
for rec in doctor.gradient_recs_['module']:
for key in rec.keys():
assert match_fn(key)
for rec in doctor.param_update_recs_['module']:
for key in rec.keys():
assert match_fn(key)
def test_recs_with_filter_no_match(
self, module_cls, net_cls, doctor_cls, data, custom_split
):
# raise a helpful error if the match function filters away everything
def match_fn(name):
return "this-substring-does-not-exist" in name
net = net_cls(module_cls, max_epochs=3, batch_size=32, train_split=custom_split)
doctor = doctor_cls(net, match_fn=match_fn)
msg = (
"No activations, gradients, or updates are being recorded, "
"please check the match_fn"
)
with pytest.raises(ValueError, match=msg):
doctor.fit(*data)
############
# PLOTTING #
############
# Just do very basic plotting tests, not exact content, just that it works
@pytest.fixture
def mock_matplotlib_not_installed(self):
# fixture to make it seem like matplotlib was not installed
orig_import = __import__
def import_mock(name, *args):
if name == 'matplotlib':
# pretend that matplotlib is not installed
raise ModuleNotFoundError("no module named 'matplotlib'")
return orig_import(name, *args)
with mock.patch('builtins.__import__', side_effect=import_mock):
yield import_mock
# pylint: disable=unused-argument
def test_matplotlib_not_installed(self, mock_matplotlib_not_installed, doctor):
# Note: Unfortunately, the order of tests matters here: This test should
# run before the ones below that use matplotlib, otherwise the import
# mock doesn't work correctly.
msg = (
r"This feature requires matplotlib to be installed; "
r"please install it first, e.g. using "
r"\'python -m pip install matplotlib\'"
)
with pytest.raises(ImportError, match=msg):
doctor.plot_loss()
@pytest.fixture(scope='module')
def plt(self):
matplotlib = pytest.importorskip('matplotlib')
matplotlib.use("agg")
import matplotlib.pyplot as plt
# not sure why closing is important but sklearn does it:
# https://github.com/scikit-learn/scikit-learn/blob/964189df31dd2aa037c5bc58c96f88193f61253b/sklearn/conftest.py#L193
plt.close("all")
yield plt
plt.close("all")
# pylint: disable=unused-argument
def test_plot_not_fitted_raises(self, plt, doctor_cls, net_cls, module_cls):
# testing only one of the plotting functions, but all support it
from skorch.exceptions import NotInitializedError
doctor = doctor_cls(net_cls(module_cls))
msg = (
r"SkorchDoctor is not initialized yet. Call 'fit\(X, y\) before using this "
"method."
)
with pytest.raises(NotInitializedError, match=msg):
doctor.plot_loss()
def test_plot_loss_default(self, plt, doctor):
ax = doctor.plot_loss()
assert isinstance(ax, plt.Subplot)
def test_plot_loss_non_default(self, plt, doctor):
_, ax = plt.subplots()
ax_after = doctor.plot_loss(ax=ax, figsize=(1, 2), lw=5)
assert isinstance(ax, plt.Subplot)
assert ax_after is ax
def test_plot_activations_default(self, plt, doctor):
axes = doctor.plot_activations()
assert isinstance(axes, np.ndarray)
assert axes.shape == (1, 1)
assert isinstance(axes[0, 0], plt.Subplot)
def test_plot_activations_non_default(self, plt, doctor):
_, axes = plt.subplots(1, 1, squeeze=False)
axes_after = doctor.plot_activations(
axes=axes,
step=0,
match_fn=lambda name: 'lin' in name,
histtype='bar',
lw=3,
bins=np.arange(10),
density=False,
figsize=(1, 2),
align='left',
)
assert axes_after is axes
def test_plot_activations_passed_axes_2d(self, plt, doctor):
_, axes = plt.subplots(2, 2)
axes_after = doctor.plot_activations(
axes=axes,
step=0,
match_fn=lambda name: 'lin' in name,
histtype='bar',
lw=3,
bins=np.arange(10),
density=False,
figsize=(1, 2),
align='left',
)
assert axes_after is axes
# pylint: disable=unused-argument
def test_plot_activations_no_match(self, plt, doctor):
msg = (
r"No layer found matching the specification of match_fn. "
r"Use doctor.get_layer_names\(\) to check all layers."
)
with pytest.raises(ValueError, match=msg):
doctor.plot_activations(match_fn=lambda name: 'foo' in name)
def test_plot_gradients_default(self, plt, doctor):
axes = doctor.plot_gradients()
assert isinstance(axes, np.ndarray)
assert axes.shape == (1, 1)
assert isinstance(axes[0, 0], plt.Subplot)
def test_plot_gradients_non_default(self, plt, doctor):
_, axes = plt.subplots(1, 1, squeeze=False)
axes_after = doctor.plot_gradients(
axes=axes,
step=0,
match_fn=lambda name: 'lin' in name,
histtype='bar',
lw=3,
bins=np.arange(10),
density=False,
figsize=(1, 2),
align='left',
)
assert axes_after is axes
# pylint: disable=unused-argument
def test_plot_gradients_no_match(self, plt, doctor):
msg = (
r"No parameter found matching the specification of match_fn. "
r"Use doctor.get_param_names\(\) to check all parameters."
)
with pytest.raises(ValueError, match=msg):
doctor.plot_gradients(match_fn=lambda name: 'foo' in name)
def test_plot_param_updates_default(self, plt, doctor):
axes = doctor.plot_param_updates()
assert isinstance(axes, np.ndarray)
assert axes.shape == (1, 1)
assert isinstance(axes[0, 0], plt.Subplot)
def test_plot_param_updates_non_default(self, plt, doctor):
_, axes = plt.subplots(1, 1, squeeze=False)
axes_after = doctor.plot_param_updates(
axes=axes,
match_fn=lambda name: 'lin' in name,
lw=3,
figsize=(1, 2),
)
assert isinstance(axes, np.ndarray)
assert axes_after is axes
# pylint: disable=unused-argument
def test_plot_param_updates_no_match(self, plt, doctor):
msg = (
r"No parameter found matching the specification of match_fn. "
r"Use doctor.get_param_names\(\) to check all parameters."
)
with pytest.raises(ValueError, match=msg):
doctor.plot_param_updates(match_fn=lambda name: 'foo' in name)
def test_plot_activations_over_time_default(self, plt, doctor):
ax = doctor.plot_activations_over_time(layer_name='lin0')
assert isinstance(ax, plt.Subplot)
def test_plot_activations_over_time_non_default(self, plt, doctor):
_, ax = plt.subplots()
ax_after = doctor.plot_activations_over_time(
layer_name='softmax',
ax=ax,
lw=3,
bins=np.arange(10),
color='r',
figsize=(1, 2),
interpolate=True,
)
assert ax_after is ax
# pylint: disable=unused-argument
def test_plot_activations_over_time_no_match(self, plt, doctor):
msg = (
r"No layer named 'foo' could be found. "
r"Use doctor.get_layer_names\(\) to check all layers."
)
with pytest.raises(ValueError, match=msg):
doctor.plot_activations_over_time(layer_name='foo')
def test_plot_gradient_over_time_default(self, plt, doctor):
ax = doctor.plot_gradient_over_time(param_name='lin1.weight')
assert isinstance(ax, plt.Subplot)
def test_plot_gradient_over_time_non_default(self, plt, doctor):
_, ax = plt.subplots()
ax_after = doctor.plot_gradient_over_time(
param_name='lin0.bias',
ax=ax,
lw=3,
bins=np.arange(10),
color='r',
figsize=(1, 2),
interpolate=True,
)
assert ax_after is ax
# pylint: disable=unused-argument
def test_plot_gradient_over_time_no_match(self, plt, doctor):
msg = (
r"No parameter named 'foo' could be found. "
r"Use doctor.get_param_names\(\) to check all parameters."
)
with pytest.raises(ValueError, match=msg):
doctor.plot_gradient_over_time(param_name='foo')
| TestSkorchDoctorSimple |
python | huggingface__transformers | src/transformers/models/dbrx/configuration_dbrx.py | {
"start": 2083,
"end": 4689
} | class ____(PreTrainedConfig):
"""Configuration class for Dbrx FFN.
[`DbrxFFN`] class. It is used to instantiate feedforward layers according to
the specified arguments, defining the layers architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
ffn_act_fn (`dict`, *optional*, defaults to `None`): A dict specifying activation function for the FFN.
The dict should have a key 'name' with the value being the name of the activation function along with
any additional keyword arguments. If `None`, then set to `{"name": "silu"}`.
ffn_hidden_size (`int`, *optional*, defaults to 3584): The hidden size of the feedforward network.
moe_num_experts (`int`, *optional*, defaults to 4): The number of experts in the mixture of experts layer.
moe_top_k (`int`, *optional*, defaults to 1): The number of experts to use in the mixture of experts layer.
moe_jitter_eps (`float`, *optional*, defaults to `None`): If not `None`, the jitter epsilon for the mixture of experts layer.
moe_loss_weight (`float`, *optional*, defaults to 0.01): The loss weight for the mixture of experts layer.
moe_normalize_expert_weights (`float`, *optional*, defaults to 1.0): The normalization factor for the expert weights.
"""
base_config_key = "ffn_config"
def __init__(
self,
hidden_size=6144,
ffn_act_fn: Optional[dict] = None,
ffn_hidden_size: int = 3584,
moe_num_experts: int = 4,
moe_top_k: int = 1,
moe_jitter_eps: Optional[float] = None,
moe_loss_weight: float = 0.01,
moe_normalize_expert_weights: Optional[float] = 1.0,
**kwargs: Any,
):
super().__init__()
if ffn_act_fn is None:
ffn_act_fn = {"name": "silu"}
self.hidden_size = hidden_size
self.ffn_act_fn = ffn_act_fn
self.ffn_hidden_size = ffn_hidden_size
self.moe_num_experts = moe_num_experts
self.moe_top_k = moe_top_k
self.moe_jitter_eps = moe_jitter_eps
self.moe_loss_weight = moe_loss_weight
self.moe_normalize_expert_weights = moe_normalize_expert_weights
for k in ["model_type", "attn_implementation", "transformers_version", "_commit_hash", "torch_dtype", "dtype"]:
if k in kwargs:
kwargs.pop(k)
if len(kwargs) != 0:
raise ValueError(f"Found unknown {kwargs=}")
| DbrxFFNConfig |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_autodoc.py | {
"start": 1960,
"end": 2418
} | class ____(ModuleLevelDocumenter):
directivetype = "bokeh-color"
objtype = ""
priority = 20
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, Color)
# We don't need/want anything from the actual NamedColor class
def add_content(self, more_content, no_docstring=False):
pass
def get_object_members(self, want_all):
return False, []
| ColorDocumenter |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 40908,
"end": 42865
} | class ____(Response):
"""
Response of events.debug_images endpoint.
:param metrics: Debug image events grouped by task metrics and iterations
:type metrics: Sequence[dict]
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "debug_images"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"metrics": {
"description": "Debug image events grouped by task metrics and iterations",
"items": {"type": "object"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, metrics: Optional[List[dict]] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None:
super(DebugImagesResponse, self).__init__(**kwargs)
self.metrics = metrics
self.scroll_id = scroll_id
@schema_property("metrics")
def metrics(self) -> Optional[List[dict]]:
return self._property_metrics
@metrics.setter
def metrics(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_metrics = None
return
self.assert_isinstance(value, "metrics", (list, tuple))
self.assert_isinstance(value, "metrics", (dict,), is_array=True)
self._property_metrics = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| DebugImagesResponse |
python | scipy__scipy | scipy/integrate/_quad_vec.py | {
"start": 662,
"end": 1402
} | class ____:
"""
Argument transform from (start, +-oo) to (0, 1)
"""
def __init__(self, func, start, infty):
self._func = func
self._start = start
self._sgn = -1 if infty < 0 else 1
# Overflow threshold for the 1/t**2 factor
self._tmin = sys.float_info.min**0.5
def get_t(self, x):
z = self._sgn * (x - self._start) + 1
if z == 0:
# Can happen only if point not in range
return np.inf
return 1 / z
def __call__(self, t):
if t < self._tmin:
return 0.0
else:
x = self._start + self._sgn * (1 - t) / t
f = self._func(x)
return self._sgn * (f / t) / t
| SemiInfiniteFunc |
python | google__jax | jax/_src/cache_key.py | {
"start": 1771,
"end": 12942
} | class ____(enum.IntEnum):
# Do not remove any callback pointers from precompiled IR.
NO = enum.auto()
# Remove all callback pointers from precompiled IR.
ALL = enum.auto()
# Remove only custom_partitioning callback pointer from precompiled IR.
CUSTOM_PARTITIONING = enum.auto()
def get(
module: ir.Module,
devices: np.ndarray,
compile_options: xla_client.CompileOptions,
backend: xla_client.Client,
compression_algorithm: str = "zstandard",
ignore_callbacks: IgnoreCallbacks = IgnoreCallbacks.NO,
) -> str:
"""Creates a hashed string to use as a key to the compilation cache.
Creates a cache key that is a hex-encoded string of a unique hash based on
the arguments. The hex-encoded string is 256 characters long.
Args:
module: the input program
devices: an array of accelerator devices that the program will run on
compile_options: options passed to the XLA compiler
backend: description of the platform (e.g., TPU version)
compression_algorithm: a string representing the compression algorithm used
for the executable before persisting in the cache
ignore_callbacks: whether to remove the all callback pointer from the
computation.
Typical return value example:
'jit__psum-14ac577cdb2ef6d986078b4054cc9893a9a14a16dbb0d8f37b89167c1f1aacdf'
"""
entries = [
(
"computation",
lambda hash_obj: _hash_computation(
hash_obj, module, ignore_callbacks
),
),
(
"jax_lib version",
lambda hash_obj: hash_obj.update(
bytes(jaxlib_version_str.encode("utf-8"))
),
),
(
"backend version",
lambda hash_obj: _hash_platform(hash_obj, backend)
),
(
"XLA flags",
lambda hash_obj: _hash_xla_flags(hash_obj, get_flag_prefixes()),
),
(
"compile_options",
lambda hash_obj: _hash_serialized_compile_options(
hash_obj,
compile_options,
# In case of GPU multi-process tasks we need to strip device
# assignment to use cache key as invariant between processes.
strip_device_assignment=(backend.platform == "gpu"),
),
),
(
"accelerator_config",
lambda hash_obj: _hash_accelerator_config(hash_obj, devices),
),
(
"compression",
lambda hash_obj: _hash_string(hash_obj, compression_algorithm),
),
("custom_hook", lambda hash_obj: _hash_string(hash_obj, custom_hook())),
]
hash_obj = hashlib.sha256()
for name, hashfn in entries:
hashfn(hash_obj)
_log_cache_key_hash(hash_obj, name, hashfn)
sym_name = module.operation.attributes['sym_name']
module_name = ir.StringAttr(sym_name).value
return module_name + "-" + hash_obj.digest().hex()
def _log_cache_key_hash(hash_obj, last_serialized: str, hashfn):
if logger.isEnabledFor(logging.DEBUG):
# Log the hash of just this entry
fresh_hash_obj = hashlib.sha256()
hashfn(fresh_hash_obj)
logger.debug(
"get_cache_key hash of serialized %s: %s",
last_serialized,
fresh_hash_obj.digest().hex(),
)
# Log the cumulative hash
logger.debug(
"get_cache_key hash after serializing %s: %s",
last_serialized,
hash_obj.digest().hex(),
)
def _remove_callbacks(m: ir.Module, ignore_callbacks: IgnoreCallbacks):
"""Removes callback pointers from precompiled IR.
Python function pointers are not deterministic across executions.
"""
def _update_bc_attribute(op: ir.Operation) -> ir.WalkResult:
if op.name == "stablehlo.custom_call" and (
(
ignore_callbacks == IgnoreCallbacks.ALL
and op.attributes["call_target_name"].value.endswith("callback")
)
or op.attributes["call_target_name"].value == "CustomSPMDPartitioning"
):
op.attributes["backend_config"] = ir.StringAttr.get("REMOVED")
return ir.WalkResult.ADVANCE
if ignore_callbacks == IgnoreCallbacks.NO:
return m
m.operation.walk(_update_bc_attribute)
return m
def _serialize_ir(m: ir.Module, ignore_callbacks: IgnoreCallbacks) -> bytes:
output = io.BytesIO()
if ignore_callbacks != IgnoreCallbacks.NO:
m = _remove_callbacks(
type_cast(ir.Module, m.operation.clone()), ignore_callbacks
)
m.operation.write_bytecode(file=output)
return output.getvalue()
def _canonicalize_ir(
m_original: ir.Module, ignore_callbacks: IgnoreCallbacks
) -> bytes:
with m_original.context:
m = type_cast(ir.Module, m_original.operation.clone())
passes = pm.PassManager.parse(
"builtin.module(strip-debuginfo)"
)
passes.run(m.operation)
return _serialize_ir(m, ignore_callbacks)
def _hash_computation(hash_obj, module, ignore_callbacks: IgnoreCallbacks):
if config.compilation_cache_include_metadata_in_key.value:
canonical_ir = _serialize_ir(module, ignore_callbacks)
else:
canonical_ir = _canonicalize_ir(module, ignore_callbacks)
hash_obj.update(canonical_ir)
def _hash_devices(hash_obj, devices: np.ndarray) -> None:
for device in devices.flat:
_hash_string(hash_obj, device.device_kind)
def _hash_accelerator_config(hash_obj, accelerators: np.ndarray):
accelerator_devices = []
for accelerator in accelerators.flat:
accelerator_devices.append(accelerator)
try:
hash_obj.update(
xla_client.get_topology_for_devices(accelerator_devices).serialize()
)
except _jax.JaxRuntimeError as ex:
# Fall back for those backends that do not support serialized
# PjRtTopologyDescription as yet.
logger.info("get (_hash_accelerator_config): unable to hash "
"accelerator config, falling back to hashing "
"devices %s (type %s)", ex, type(ex))
_hash_devices(hash_obj, accelerators)
# LINT.IfChange(xla_flags)
xla_flags_to_exclude_from_cache_key = [
"--xla_dump_compress_protos",
"--xla_dump_module_metadata",
"--xla_dump_max_hlo_modules",
"--xla_dump_include_timestamp",
"--xla_dump_hlo_pass_re",
"--xla_dump_hlo_module_re",
"--xla_dump_hlo_snapshots",
"--xla_dump_fusion_visualization",
"--xla_dump_hlo_as_url",
"--xla_dump_hlo_as_proto",
"--xla_dump_hlo_as_text",
"--xla_dump_hlo_as_long_text",
"--xla_dump_hlo_as_html",
"--xla_dump_hlo_as_dot",
"--xla_dump_to",
"--xla_force_host_platform_device_count",
"--xla_dump_disable_metadata",
"--xla_dump_hlo_pipeline_re",
"--xla_tpu_sdc_checker_streamz_metric",
"--xla_tpu_sdc_checker_enable_sdc_event_callbacks",
"--xla_tpu_sdc_checker_enable_coresweep_ng_callbacks",
"--xla_tpu_sdc_checker_no_logging_if_callbacks_are_present",
"--xla_gpu_cuda_data_dir",
"--xla_gpu_experimental_autotune_cache_mode",
]
env_override_flags_to_exclude_from_cache_key = {
x.strip("-") for x in xla_flags_to_exclude_from_cache_key
}
# LINT.ThenChange(:debug_options)
def _hash_serialized_compile_options(hash_obj, compile_options_obj,
strip_device_assignment=False):
# Do not mess with the original CompileOptions object since it is passed to
# the compiler. Create a deep copy for the purpose of cache key generation.
compile_options_copy = copy.deepcopy(compile_options_obj)
# Certain debug options do not affect the compile result and thus, should not
# be part of the cache key as their inclusion will result in unnecessary cache
# misses. Clear them here by setting bool values to False, ints to 0, and
# strings to empty. The exact values used to clear are not relevant as long
# as the same values are used every time for each field.
debug_options = compile_options_copy.executable_build_options.debug_options
# LINT.IfChange(debug_options)
debug_options.xla_force_host_platform_device_count = 0
debug_options.xla_dump_to = ""
debug_options.xla_dump_hlo_module_re = ""
debug_options.xla_dump_hlo_pass_re = ""
debug_options.xla_dump_hlo_as_text = False
debug_options.xla_dump_hlo_as_proto = False
debug_options.xla_dump_hlo_as_dot = False
debug_options.xla_dump_hlo_as_url = False
debug_options.xla_dump_hlo_as_html = False
debug_options.xla_dump_fusion_visualization = False
debug_options.xla_dump_hlo_snapshots = False
debug_options.xla_dump_max_hlo_modules = False
debug_options.xla_dump_module_metadata = False
debug_options.xla_dump_compress_protos = False
debug_options.xla_dump_hlo_as_long_text = False
debug_options.xla_dump_disable_metadata = False
debug_options.xla_dump_hlo_pipeline_re = ""
debug_options.xla_gpu_experimental_autotune_cache_mode = 0
# Optional way to specify the cuda install path to be used by the compiler.
# This could possibly affect the cuda version compiled with, but this should
# already be included in the platform information (and might not be reflected
# by the cuda path regardless, since this only hashes on the directory name
# and not the contents). It can also cause spurious cache misses if the cuda
# path changes across runs despite being the same version, so we clear it
# here.
debug_options.xla_gpu_cuda_data_dir = ""
# LINT.ThenChange(:xla_flags)
compile_options_copy.env_option_overrides = [
flag_value
for flag_value in compile_options_copy.env_option_overrides
if flag_value[0] not in env_override_flags_to_exclude_from_cache_key
]
if strip_device_assignment and compile_options_copy.device_assignment:
replica_count = compile_options_copy.device_assignment.replica_count()
computation_count = compile_options_copy.device_assignment.computation_count()
compile_options_copy.device_assignment = xla_client.DeviceAssignment.create(
np.arange(replica_count * computation_count).reshape(
[replica_count, computation_count])
)
return hash_obj.update(compile_options_copy.SerializeAsString())
def _hash_platform(hash_obj, backend):
_hash_string(hash_obj, backend.platform)
_hash_string(hash_obj, backend.platform_version)
_hash_string(hash_obj, backend.runtime_type)
def _hash_xla_flags(hash_obj, extra_flag_prefixes: list[str]):
xla_flags = []
xla_flags_env_var = os.getenv("XLA_FLAGS")
if xla_flags_env_var:
xla_flags.extend(xla_flags_env_var.split())
libtpu_init_args_env_var = os.getenv("LIBTPU_INIT_ARGS")
if libtpu_init_args_env_var:
xla_flags.extend(libtpu_init_args_env_var.split())
for arg in sys.argv:
if arg.startswith("--xla") or any(
arg.startswith(p) for p in extra_flag_prefixes
):
xla_flags.append(arg)
# N.B. all XLA flags that take an argument must use '=' and not a space
# (e.g. --xla_force_host_platform_device_count=8) (I think).
for flag in sorted(xla_flags):
if flag.split("=")[0] in xla_flags_to_exclude_from_cache_key:
logger.debug("Not including XLA flag in cache key: %s", flag)
continue
logger.debug("Including XLA flag in cache key: %s", flag)
_hash_string(hash_obj, flag)
def _hash_string(hash_obj, str_var):
hash_obj.update(str_var.encode("utf-8").strip())
| IgnoreCallbacks |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 39974,
"end": 41558
} | class ____(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
align_corners (bool): align_corners argument of F.interpolate.
Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation.
"""
def __init__(self, pool_scales: tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = Data2VecVisionPyramidPoolingBlock(
pool_scale=pool_scale, in_channels=in_channels, channels=channels
)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
# Copied from transformers.models.beit.modeling_beit.BeitUperHead with Beit->Data2VecVision
| Data2VecVisionPyramidPoolingModule |
python | crytic__slither | slither/core/variables/local_variable_init_from_tuple.py | {
"start": 95,
"end": 693
} | class ____(LocalVariable):
"""
Use on this pattern:
var(a,b) = f()
It is not possible to split the variable declaration in sigleton and keep the init value
We init a and b with f(). get_tuple_index ret() returns which returns values of f is to be used
"""
def __init__(self) -> None:
super().__init__()
self._tuple_index: Optional[int] = None
@property
def tuple_index(self) -> Optional[int]:
return self._tuple_index
@tuple_index.setter
def tuple_index(self, idx: int):
self._tuple_index = idx
| LocalVariableInitFromTuple |
python | pypa__pip | src/pip/_vendor/rich/markup.py | {
"start": 453,
"end": 8451
} | class ____(NamedTuple):
"""A tag in console markup."""
name: str
"""The tag name. e.g. 'bold'."""
parameters: Optional[str]
"""Any additional parameters after the name."""
def __str__(self) -> str:
return (
self.name if self.parameters is None else f"{self.name} {self.parameters}"
)
@property
def markup(self) -> str:
"""Get the string representation of this tag."""
return (
f"[{self.name}]"
if self.parameters is None
else f"[{self.name}={self.parameters}]"
)
_ReStringMatch = Match[str] # regex match object
_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
def escape(
markup: str,
_escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
) -> str:
"""Escapes text so that it won't be interpreted as markup.
Args:
markup (str): Content to be inserted in to markup.
Returns:
str: Markup with square brackets escaped.
"""
def escape_backslashes(match: Match[str]) -> str:
"""Called by re.sub replace matches."""
backslashes, text = match.groups()
return f"{backslashes}{backslashes}\\{text}"
markup = _escape(escape_backslashes, markup)
if markup.endswith("\\") and not markup.endswith("\\\\"):
return markup + "\\"
return markup
def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
"""Parse markup in to an iterable of tuples of (position, text, tag).
Args:
markup (str): A string containing console markup
"""
position = 0
_divmod = divmod
_Tag = Tag
for match in RE_TAGS.finditer(markup):
full_text, escapes, tag_text = match.groups()
start, end = match.span()
if start > position:
yield start, markup[position:start], None
if escapes:
backslashes, escaped = _divmod(len(escapes), 2)
if backslashes:
# Literal backslashes
yield start, "\\" * backslashes, None
start += backslashes * 2
if escaped:
# Escape of tag
yield start, full_text[len(escapes) :], None
position = end
continue
text, equals, parameters = tag_text.partition("=")
yield start, None, _Tag(text, parameters if equals else None)
position = end
if position < len(markup):
yield position, markup[position:], None
def render(
markup: str,
style: Union[str, Style] = "",
emoji: bool = True,
emoji_variant: Optional[EmojiVariant] = None,
) -> Text:
"""Render console markup in to a Text instance.
Args:
markup (str): A string containing console markup.
style: (Union[str, Style]): The style to use.
emoji (bool, optional): Also render emoji code. Defaults to True.
emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
Raises:
MarkupError: If there is a syntax error in the markup.
Returns:
Text: A test instance.
"""
emoji_replace = _emoji_replace
if "[" not in markup:
return Text(
emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
style=style,
)
text = Text(style=style)
append = text.append
normalize = Style.normalize
style_stack: List[Tuple[int, Tag]] = []
pop = style_stack.pop
spans: List[Span] = []
append_span = spans.append
_Span = Span
_Tag = Tag
def pop_style(style_name: str) -> Tuple[int, Tag]:
"""Pop tag matching given style name."""
for index, (_, tag) in enumerate(reversed(style_stack), 1):
if tag.name == style_name:
return pop(-index)
raise KeyError(style_name)
for position, plain_text, tag in _parse(markup):
if plain_text is not None:
# Handle open brace escapes, where the brace is not part of a tag.
plain_text = plain_text.replace("\\[", "[")
append(emoji_replace(plain_text) if emoji else plain_text)
elif tag is not None:
if tag.name.startswith("/"): # Closing tag
style_name = tag.name[1:].strip()
if style_name: # explicit close
style_name = normalize(style_name)
try:
start, open_tag = pop_style(style_name)
except KeyError:
raise MarkupError(
f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
) from None
else: # implicit close
try:
start, open_tag = pop()
except IndexError:
raise MarkupError(
f"closing tag '[/]' at position {position} has nothing to close"
) from None
if open_tag.name.startswith("@"):
if open_tag.parameters:
handler_name = ""
parameters = open_tag.parameters.strip()
handler_match = RE_HANDLER.match(parameters)
if handler_match is not None:
handler_name, match_parameters = handler_match.groups()
parameters = (
"()" if match_parameters is None else match_parameters
)
try:
meta_params = literal_eval(parameters)
except SyntaxError as error:
raise MarkupError(
f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
)
except Exception as error:
raise MarkupError(
f"error parsing {open_tag.parameters!r}; {error}"
) from None
if handler_name:
meta_params = (
handler_name,
meta_params
if isinstance(meta_params, tuple)
else (meta_params,),
)
else:
meta_params = ()
append_span(
_Span(
start, len(text), Style(meta={open_tag.name: meta_params})
)
)
else:
append_span(_Span(start, len(text), str(open_tag)))
else: # Opening tag
normalized_tag = _Tag(normalize(tag.name), tag.parameters)
style_stack.append((len(text), normalized_tag))
text_length = len(text)
while style_stack:
start, tag = style_stack.pop()
style = str(tag)
if style:
append_span(_Span(start, text_length, style))
text.spans = sorted(spans[::-1], key=attrgetter("start"))
return text
if __name__ == "__main__": # pragma: no cover
MARKUP = [
"[red]Hello World[/red]",
"[magenta]Hello [b]World[/b]",
"[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
"Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
":warning-emoji: [bold red blink] DANGER![/]",
]
from pip._vendor.rich import print
from pip._vendor.rich.table import Table
grid = Table("Markup", "Result", padding=(0, 1))
for markup in MARKUP:
grid.add_row(Text(markup), markup)
print(grid)
| Tag |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/views/permissions.py | {
"start": 1613,
"end": 2328
} | class ____(PermissionViewModelView):
"""Customize permission names for FAB's builtin PermissionViewModelView."""
class_permission_name = permissions.RESOURCE_PERMISSION
route_base = "/permissions"
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
list_title = lazy_gettext("List Permissions")
show_title = lazy_gettext("Show Permission")
add_title = lazy_gettext("Add Permission")
edit_title = lazy_gettext("Edit Permission")
label_columns = {
"action": lazy_gettext("Action"),
"resource": lazy_gettext("Resource"),
}
list_columns = ["action", "resource"]
| PermissionPairModelView |
python | ray-project__ray | python/ray/tune/search/basic_variant.py | {
"start": 1937,
"end": 6506
} | class ____:
"""Generates trials from the spec.
Args:
uuid_prefix: Used in creating the trial name.
num_samples: Number of samples from distribution
(same as tune.TuneConfig).
unresolved_spec: Experiment specification
that might have unresolved distributions.
constant_grid_search: Should random variables be sampled
first before iterating over grid variants (True) or not (False).
points_to_evaluate: Configurations that will be tried out without sampling.
lazy_eval: Whether variants should be generated
lazily or eagerly. This is toggled depending
on the size of the grid search.
start: index at which to start counting trials.
random_state (int | np.random.Generator | np.random.RandomState):
Seed or numpy random generator to use for reproducible results.
If None (default), will use the global numpy random generator
(``np.random``). Please note that full reproducibility cannot
be guaranteed in a distributed environment.
"""
def __init__(
self,
uuid_prefix: str,
num_samples: int,
unresolved_spec: dict,
constant_grid_search: bool = False,
points_to_evaluate: Optional[List] = None,
lazy_eval: bool = False,
start: int = 0,
random_state: Optional[
Union[int, "np_random_generator", np.random.RandomState]
] = None,
):
self.parser = _make_parser()
self.num_samples = num_samples
self.uuid_prefix = uuid_prefix
self.num_samples_left = num_samples
self.unresolved_spec = unresolved_spec
self.constant_grid_search = constant_grid_search
self.points_to_evaluate = points_to_evaluate or []
self.num_points_to_evaluate = len(self.points_to_evaluate)
self.counter = start
self.lazy_eval = lazy_eval
self.variants = None
self.random_state = random_state
def create_trial(self, resolved_vars, spec):
trial_id = self.uuid_prefix + ("%05d" % self.counter)
experiment_tag = str(self.counter)
# Always append resolved vars to experiment tag?
if resolved_vars:
experiment_tag += "_{}".format(format_vars(resolved_vars))
self.counter += 1
return _create_trial_from_spec(
spec,
self.parser,
evaluated_params=_flatten_resolved_vars(resolved_vars),
trial_id=trial_id,
experiment_tag=experiment_tag,
)
def __next__(self):
"""Generates Trial objects with the variant generation process.
Uses a fixed point iteration to resolve variants. All trials
should be able to be generated at once.
See also: `ray.tune.search.variant_generator`.
Returns:
Trial object
"""
if "run" not in self.unresolved_spec:
raise TuneError("Must specify `run` in {}".format(self.unresolved_spec))
if self.variants and self.variants.has_next():
# This block will be skipped upon instantiation.
# `variants` will be set later after the first loop.
resolved_vars, spec = next(self.variants)
return self.create_trial(resolved_vars, spec)
if self.points_to_evaluate:
config = self.points_to_evaluate.pop(0)
self.num_samples_left -= 1
self.variants = _VariantIterator(
_get_preset_variants(
self.unresolved_spec,
config,
constant_grid_search=self.constant_grid_search,
random_state=self.random_state,
),
lazy_eval=self.lazy_eval,
)
resolved_vars, spec = next(self.variants)
return self.create_trial(resolved_vars, spec)
elif self.num_samples_left > 0:
self.variants = _VariantIterator(
generate_variants(
self.unresolved_spec,
constant_grid_search=self.constant_grid_search,
random_state=self.random_state,
),
lazy_eval=self.lazy_eval,
)
self.num_samples_left -= 1
resolved_vars, spec = next(self.variants)
return self.create_trial(resolved_vars, spec)
else:
raise StopIteration
def __iter__(self):
return self
@PublicAPI
| _TrialIterator |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_shape_base_.py | {
"start": 17363,
"end": 18210
} | class ____(TestCase):
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
def test_1D_arrays(self):
# example from docstring
a = np.array((1, 2, 3))
b = np.array((2, 3, 4))
expected = np.array([[1, 2], [2, 3], [3, 4]])
actual = np.column_stack((a, b))
assert_equal(actual, expected)
def test_2D_arrays(self):
# same as hstack 2D docstring example
a = np.array([[1], [2], [3]])
b = np.array([[2], [3], [4]])
expected = np.array([[1, 2], [2, 3], [3, 4]])
actual = np.column_stack((a, b))
assert_equal(actual, expected)
def test_generator(self):
# numpy 1.24 emits a warning but we don't
# with assert_warns(FutureWarning):
column_stack([np.arange(3) for _ in range(2)])
| TestColumnStack |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/python_set.py | {
"start": 2709,
"end": 2860
} | class ____:
def __getitem__(self, inner_type):
return create_typed_runtime_set(inner_type)
Set: DagsterSetApi = DagsterSetApi()
| DagsterSetApi |
python | pytorch__pytorch | test/inductor/test_loop_ordering.py | {
"start": 40441,
"end": 44835
} | class ____(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
gm = torch.fx.symbolic_trace(lambda: 0)
graph = GraphLowering(gm)
graph.scheduler = MockScheduler
cls._exit_stack = contextlib.ExitStack()
cls._exit_stack.enter_context(V.set_graph_handler(graph))
def _check_expr(self, expr, reconstruction, val_range):
import numpy as np
from sympy import lambdify
assert len(expr.free_symbols) == 1
p0 = next(iter(expr.free_symbols))
def floordiv_replacement(a, b):
"""Replace FloorDiv(a, b) with a // b"""
return a // b
def modularindexing_replacement(x, base, divisor):
"""Replace ModularIndexing(x, base, divisor) with (x // base) % divisor"""
return (x // base) % divisor
# Replace custom functions with sympy equivalents
expr_numpy_ready = expr.replace(FloorDiv, floordiv_replacement).replace(
ModularIndexing, modularindexing_replacement
)
reconstruction_numpy_ready = reconstruction.replace(
FloorDiv, floordiv_replacement
).replace(ModularIndexing, modularindexing_replacement)
# Now lambdify with standard numpy
forward_func = lambdify(p0, expr_numpy_ready, modules="numpy")
inverse_func = lambdify(p0, reconstruction_numpy_ready, modules="numpy")
test_values = np.arange(0, val_range, dtype=np.int64)
forward_values = forward_func(test_values).astype(np.int64)
recovered_values = inverse_func(forward_values).astype(np.int64)
torch.testing.assert_close(test_values, recovered_values)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._exit_stack.close()
def test_original_complex_expression(self):
"""Test the original motivating complex expression."""
p0 = sympy.Symbol("p0")
expr = (
32768 * FloorDiv(p0, 32768)
+ 8192 * FloorDiv(ModularIndexing(p0, 1, 16), 4)
+ ModularIndexing(p0, 1, 4)
+ 256 * ModularIndexing(p0, 16, 32)
+ 4 * ModularIndexing(p0, 512, 64)
)
reconstruction = generate_inverse_formula(expr, p0)
self.assertIsNotNone(reconstruction)
self._check_expr(expr, reconstruction, 2097152)
def test_inversion_cases(self):
"""Test various expressions for correct inversion behavior."""
p = sympy.Symbol("p")
cases = [
# (expression, should_be_invertible, test_range)
# Simple 2-term base-10 style: 10 = 1 × 10 ✓
(10 * ModularIndexing(p, 10, 10) + ModularIndexing(p, 1, 10), True, 100),
# Simple 2-term base-2 style: 2 = 1 × 2 ✓
(2 * ModularIndexing(p, 2, 2) + ModularIndexing(p, 1, 2), True, 4),
# 3-term decimal: 100 = 10×10, 10 = 1×10 ✓
(
100 * FloorDiv(p, 100)
+ 10 * FloorDiv(ModularIndexing(p, 1, 100), 10)
+ ModularIndexing(p, 1, 10),
True,
1000,
),
(4 * p, False, 64), # expr and inverse not bijections
# when sorted, invertible
(ModularIndexing(p, 1, 10) + 10 * ModularIndexing(p, 10, 10), True, None),
# Wrong coefficient ratios: 4 ≠ 1×2
(4 * ModularIndexing(p, 1, 8) + ModularIndexing(p, 8, 2), False, None),
(
100 * FloorDiv(p, 100) + 7 * ModularIndexing(p, 1, 100),
False,
None,
), # Wrong ratios
(FloorDiv(p, 100) + FloorDiv(p, 10) + p, False, None), # Overlapping ranges
(p**2 + 10 * p + 1, False, None), # Quadratic
(sympy.sin(p) + sympy.cos(p), False, None), # Trigonometric
]
for expr, should_invert, test_range in cases:
reconstruction = generate_inverse_formula(expr, p)
if should_invert:
self.assertIsNotNone(reconstruction, f"Expected invertible: {expr}")
# Test correctness on sample values
self._check_expr(expr, reconstruction, test_range)
else:
self.assertIsNone(reconstruction, f"Expected non-invertible: {expr}")
if __name__ == "__main__":
if HAS_GPU:
run_tests()
| TestIndexInversion |
python | redis__redis-py | redis/cache.py | {
"start": 3451,
"end": 6248
} | class ____(CacheInterface):
def __init__(
self,
cache_config: CacheConfigurationInterface,
) -> None:
self._cache = OrderedDict()
self._cache_config = cache_config
self._eviction_policy = self._cache_config.get_eviction_policy().value()
self._eviction_policy.cache = self
@property
def collection(self) -> OrderedDict:
return self._cache
@property
def config(self) -> CacheConfigurationInterface:
return self._cache_config
@property
def eviction_policy(self) -> EvictionPolicyInterface:
return self._eviction_policy
@property
def size(self) -> int:
return len(self._cache)
def set(self, entry: CacheEntry) -> bool:
if not self.is_cachable(entry.cache_key):
return False
self._cache[entry.cache_key] = entry
self._eviction_policy.touch(entry.cache_key)
if self._cache_config.is_exceeds_max_size(len(self._cache)):
self._eviction_policy.evict_next()
return True
def get(self, key: CacheKey) -> Union[CacheEntry, None]:
entry = self._cache.get(key, None)
if entry is None:
return None
self._eviction_policy.touch(key)
return entry
def delete_by_cache_keys(self, cache_keys: List[CacheKey]) -> List[bool]:
response = []
for key in cache_keys:
if self.get(key) is not None:
self._cache.pop(key)
response.append(True)
else:
response.append(False)
return response
def delete_by_redis_keys(
self, redis_keys: Union[List[bytes], List[str]]
) -> List[bool]:
response = []
keys_to_delete = []
for redis_key in redis_keys:
# Prepare both versions for lookup
candidates = [redis_key]
if isinstance(redis_key, str):
candidates.append(redis_key.encode("utf-8"))
elif isinstance(redis_key, bytes):
try:
candidates.append(redis_key.decode("utf-8"))
except UnicodeDecodeError:
pass # Non-UTF-8 bytes, skip str version
for cache_key in self._cache:
if any(candidate in cache_key.redis_keys for candidate in candidates):
keys_to_delete.append(cache_key)
response.append(True)
for key in keys_to_delete:
self._cache.pop(key)
return response
def flush(self) -> int:
elem_count = len(self._cache)
self._cache.clear()
return elem_count
def is_cachable(self, key: CacheKey) -> bool:
return self._cache_config.is_allowed_to_cache(key.command)
| DefaultCache |
python | scikit-learn__scikit-learn | sklearn/feature_selection/_univariate_selection.py | {
"start": 34099,
"end": 36823
} | class ____(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
alpha : float, default=5e-2
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFwe, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 15)
"""
_parameter_constraints: dict = {
**_BaseFilter._parameter_constraints,
"alpha": [Interval(Real, 0, 1, closed="both")],
}
def __init__(self, score_func=f_classif, *, alpha=5e-2):
super().__init__(score_func=score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self)
return self.pvalues_ < self.alpha / len(self.pvalues_)
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
| SelectFwe |
python | spyder-ide__spyder | spyder/plugins/editor/extensions/closequotes.py | {
"start": 1446,
"end": 4887
} | class ____(EditorExtension):
"""Editor Extension for insert closing quotes automatically."""
def on_state_changed(self, state):
"""Connect/disconnect sig_key_pressed signal."""
if state:
self.editor.sig_key_pressed.connect(self._on_key_pressed)
else:
self.editor.sig_key_pressed.disconnect(self._on_key_pressed)
def _on_key_pressed(self, event):
if event.isAccepted():
return
# It is necessary to use here the text of the event and not the key
# to avoid issues with international keyboards.
# See spyder-ide/spyder#9814
char = event.text()
if char in ('"', '\'') and self.enabled:
self.editor.completion_widget.hide()
self._autoinsert_quotes(char)
event.accept()
def _autoinsert_quotes(self, char):
"""Control how to automatically insert quotes in various situations."""
line_text = self.editor.get_text('sol', 'eol')
line_to_cursor = self.editor.get_text('sol', 'cursor')
cursor = self.editor.textCursor()
last_three = self.editor.get_text('sol', 'cursor')[-3:]
last_two = self.editor.get_text('sol', 'cursor')[-2:]
trailing_text = self.editor.get_text('cursor', 'eol').strip()
if self.editor.has_selected_text():
text = self.editor.get_selected_text()
self.editor.insert_text("{0}{1}{0}".format(char, text))
# keep text selected, for inserting multiple quotes
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, 1)
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,
len(text))
self.editor.setTextCursor(cursor)
elif self.editor.in_comment():
self.editor.insert_text(char)
elif (len(trailing_text) > 0 and
not unmatched_quotes_in_line(line_to_cursor) == char and
not trailing_text[0] in (',', ':', ';', ')', ']', '}')):
self.editor.insert_text(char)
elif (unmatched_quotes_in_line(line_text) and
(not last_three == 3*char)):
self.editor.insert_text(char)
# Move to the right if we are before a quote
elif self.editor.next_char() == char:
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor, 1)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# Automatic insertion of triple double quotes (for docstrings)
elif last_three == 3*char:
self.editor.insert_text(3*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor, 3)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# If last two chars are quotes, just insert one more because most
# probably the user wants to write a docstring
elif last_two == 2*char:
self.editor.insert_text(char)
self.editor.delayed_popup_docstring()
# Automatic insertion of quotes
else:
self.editor.insert_text(2*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter)
self.editor.setTextCursor(cursor)
| CloseQuotesExtension |
python | getsentry__sentry | tests/sentry/auth_v2/endpoints/test_csrf.py | {
"start": 270,
"end": 3391
} | class ____(APITestCase):
endpoint = "sentry-api-0-auth-v2-csrf"
def setUp(self) -> None:
super().setUp()
self.url = reverse(self.endpoint)
self.user = self.create_user()
def test_get_csrf_token_anonymous(self) -> None:
response = self.client.get(self.url, HTTP_X_SENTRY_AUTH_V2="test")
assert response.status_code == 200
assert response.json()["detail"] == "Set CSRF cookie"
assert response.json()["session"] is not None
assert response.json()["session"]["userId"] is None
assert response.json()["session"]["sessionCsrfToken"] is not None
# Verify CSRF cookie is set
assert settings.CSRF_COOKIE_NAME in response.cookies
def test_get_csrf_token_authenticated(self) -> None:
self.login_as(self.user)
response = self.client.get(self.url, HTTP_X_SENTRY_AUTH_V2="test")
assert response.status_code == 200
assert response.json()["detail"] == "Set CSRF cookie"
assert response.json()["session"] is not None
assert response.json()["session"]["userId"] == str(self.user.id)
assert response.json()["session"]["sessionCsrfToken"] is not None
# Verify CSRF cookie is set
assert settings.CSRF_COOKIE_NAME in response.cookies
def test_rotate_csrf_token_anonymous(self) -> None:
# Get initial CSRF token
initial_response = self.client.get(self.url, HTTP_X_SENTRY_AUTH_V2="test")
initial_csrf = initial_response.cookies[settings.CSRF_COOKIE_NAME].value
# Then rotate the token
response = self.client.put(self.url, HTTP_X_SENTRY_AUTH_V2="test")
assert response.status_code == 200
assert response.json()["detail"] == "Rotated CSRF cookie"
assert response.json()["session"] is not None
assert response.json()["session"]["userId"] is None
assert response.json()["session"]["sessionCsrfToken"] is not None
# Verify CSRF cookie is rotated
assert settings.CSRF_COOKIE_NAME in response.cookies
rotated_csrf = response.cookies[settings.CSRF_COOKIE_NAME].value
assert rotated_csrf != initial_csrf
def test_rotate_csrf_token_authenticated(self) -> None:
self.login_as(self.user)
# Get initial CSRF token
initial_response = self.client.get(self.url, HTTP_X_SENTRY_AUTH_V2="test")
initial_csrf = initial_response.cookies[settings.CSRF_COOKIE_NAME].value
# Then rotate the token
response = self.client.put(self.url, HTTP_X_SENTRY_AUTH_V2="test")
assert response.status_code == 200
assert response.json()["detail"] == "Rotated CSRF cookie"
assert response.json()["session"] is not None
assert response.json()["session"]["userId"] == str(self.user.id)
assert response.json()["session"]["sessionCsrfToken"] is not None
# Verify CSRF cookie is rotated
assert settings.CSRF_COOKIE_NAME in response.cookies
rotated_csrf = response.cookies[settings.CSRF_COOKIE_NAME].value
assert rotated_csrf != initial_csrf
| CsrfTokenEndpointTest |
python | nedbat__coveragepy | tests/test_oddball.py | {
"start": 591,
"end": 1827
} | class ____(CoverageTest):
"""Tests of the threading support."""
def test_threading(self) -> None:
self.check_coverage(
"""\
import threading
def fromMainThread():
return "called from main thread"
def fromOtherThread():
return "called from other thread"
def neverCalled():
return "no one calls me"
other = threading.Thread(target=fromOtherThread)
other.start()
fromMainThread()
other.join()
""",
lines=[1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15],
missing="10",
)
def test_thread_run(self) -> None:
self.check_coverage(
"""\
import threading
class TestThread(threading.Thread):
def run(self):
self.a = 5
self.do_work()
self.a = 7
def do_work(self):
self.a = 10
thd = TestThread()
thd.start()
thd.join()
""",
lines=[1, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14],
missing="",
)
| ThreadingTest |
python | cython__cython | Cython/Compiler/UtilityCode.py | {
"start": 11769,
"end": 14332
} | class ____(Code.AbstractUtilityCode):
def __init__(self, pxd_name, shared_utility_qualified_name, template_context, requires):
self._pxd_name = pxd_name
self._shared_utility_qualified_name = shared_utility_qualified_name
self.template_context = template_context
self.requires = requires
self._shared_library_scope = None
def find_module(self, context):
scope = context
for name, is_package in scope._split_qualified_name(self._shared_utility_qualified_name, relative_import=False):
scope = scope.find_submodule(name, as_package=is_package)
pxd_pathname = os.path.join(
os.path.split(Cython.__file__)[0],
'Utility',
self._pxd_name
)
try:
rel_path = self._shared_utility_qualified_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
source_desc = TemplatedFileSourceDescriptor(pxd_pathname, rel_path, self.template_context)
source_desc.in_utility_code = True
err, result = context.process_pxd(source_desc, scope, self._shared_utility_qualified_name)
(pxd_codenodes, pxd_scope) = result
context.utility_pxds[self._pxd_name] = (pxd_codenodes, pxd_scope)
scope.pxd_file_loaded = True
if err:
raise err
except CompileError:
pass
return scope
def declare_in_scope(self, dest_scope, used=False, cython_scope=None,
allowlist=None):
if self._pxd_name not in cython_scope.context.utility_pxds:
self._shared_library_scope = self.find_module(cython_scope.context)
for dep in self.requires:
if dep.is_cython_utility:
dep.declare_in_scope(scope, cython_scope=cython_scope)
for e in self._shared_library_scope.c_class_entries:
dest_scope.add_imported_entry(e.name, e, e.pos)
return dest_scope
def get_shared_library_scope(self, cython_scope):
if self._pxd_name not in cython_scope.context.utility_pxds:
self._shared_library_scope = self.find_module(cython_scope.context)
return self._shared_library_scope
def declare_declarations_in_scope(declaration_string, env, private_type=True,
*args, **kwargs):
"""
Declare some declarations given as Cython code in declaration_string
in scope env.
"""
CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env)
| CythonSharedUtilityCode |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 91511,
"end": 93989
} | class ____(parser_test_base.ParserTestBase):
"""Tests for typing.Self."""
def test_method_return(self):
self.check(
"""
from typing_extensions import Self
class A:
def f(self) -> Self: ...
""",
"""
from typing import TypeVar
from typing_extensions import Self
_SelfA = TypeVar('_SelfA', bound=A)
class A:
def f(self: _SelfA) -> _SelfA: ...
""",
)
def test_classmethod_return(self):
self.check(
"""
from typing_extensions import Self
class A:
@classmethod
def f(cls) -> Self: ...
""",
"""
from typing import TypeVar
from typing_extensions import Self
_SelfA = TypeVar('_SelfA', bound=A)
class A:
@classmethod
def f(cls: type[_SelfA]) -> _SelfA: ...
""",
)
def test_new_return(self):
self.check(
"""
from typing_extensions import Self
class A:
def __new__(cls) -> Self: ...
""",
"""
from typing import TypeVar
from typing_extensions import Self
_SelfA = TypeVar('_SelfA', bound=A)
class A:
def __new__(cls: type[_SelfA]) -> _SelfA: ...
""",
)
def test_parameterized_return(self):
self.check(
"""
from typing import List
from typing_extensions import Self
class A:
def f(self) -> List[Self]: ...
""",
"""
from typing import TypeVar
from typing_extensions import Self
_SelfA = TypeVar('_SelfA', bound=A)
class A:
def f(self: _SelfA) -> list[_SelfA]: ...
""",
)
def test_parameter(self):
self.check(
"""
from typing_extensions import Self
class A:
def f(self, other: Self) -> bool: ...
""",
"""
from typing import TypeVar
from typing_extensions import Self
_SelfA = TypeVar('_SelfA', bound=A)
class A:
def f(self: _SelfA, other: _SelfA) -> bool: ...
""",
)
def test_nested_class(self):
self.check(
"""
from typing_extensions import Self
class A:
class B:
def f(self) -> Self: ...
""",
"""
from typing import TypeVar
from typing_extensions import Self
_SelfAB = TypeVar('_SelfAB', bound=A.B)
class A:
class B:
def f(self: _SelfAB) -> _SelfAB: ...
""",
)
| TypingSelfTest |
python | tensorflow__tensorflow | tensorflow/python/framework/py_context_manager_test.py | {
"start": 1820,
"end": 3745
} | class ____(test_util.TensorFlowTestCase):
def testBasic(self):
cm = TestContextManager()
def body(var):
cm.log.append("body(%r)" % var)
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), NO_EXCEPTION_LOG)
def testBodyRaisesException(self):
cm = TestContextManager()
def body(var):
cm.log.append("body(%r)" % var)
raise ValueError("Foo")
with self.assertRaisesRegex(ValueError, "Foo"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertRegex("\n".join(cm.log), EXCEPTION_LOG)
def testEnterRaisesException(self):
cm = TestContextManager("raise_from_enter")
def body(var):
cm.log.append("body(%r)" % var)
with self.assertRaisesRegex(ValueError, "exception in __enter__"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), "__enter__()")
# Test behavior in unsupported case where __exit__ raises an exception.
def testExitRaisesException(self):
cm = TestContextManager("raise_from_exit")
def body(var):
cm.log.append("body(%r)" % var)
# Note: this does *not* raise an exception (but does log a warning):
_py_context_manager.test_py_context_manager(cm, body)
self.assertEqual("\n".join(cm.log), NO_EXCEPTION_LOG)
# Test behavior in unsupported case where __exit__ suppresses exception.
def testExitSuppressesException(self):
cm = TestContextManager("suppress_exception")
def body(var):
cm.log.append("body(%r)" % var)
raise ValueError("Foo")
with self.assertRaisesRegex(
ValueError, "tensorflow::PyContextManager::Enter does not support "
"context managers that suppress exception"):
_py_context_manager.test_py_context_manager(cm, body)
self.assertRegex("\n".join(cm.log), EXCEPTION_LOG)
if __name__ == "__main__":
googletest.main()
| OpDefUtilTest |
python | Pylons__pyramid | docs/quick_tutorial/authentication/tutorial/views.py | {
"start": 271,
"end": 1718
} | class ____:
def __init__(self, request):
self.request = request
self.logged_in = request.authenticated_userid
@view_config(route_name='home')
def home(self):
return {'name': 'Home View'}
@view_config(route_name='hello')
def hello(self):
return {'name': 'Hello View'}
@view_config(route_name='login', renderer='login.pt')
def login(self):
request = self.request
login_url = request.route_url('login')
message = ''
login = ''
password = ''
if 'form.submitted' in request.params:
login = request.params['login']
password = request.params['password']
hashed_pw = USERS.get(login)
if hashed_pw and check_password(password, hashed_pw):
headers = remember(request, login)
return HTTPFound(location=request.route_url("home"),
headers=headers)
message = 'Failed login'
return dict(
name='Login',
message=message,
url=request.application_url + '/login',
login=login,
password=password,
)
@view_config(route_name='logout')
def logout(self):
request = self.request
headers = forget(request)
url = request.route_url('home')
return HTTPFound(location=url,
headers=headers)
| TutorialViews |
python | great-expectations__great_expectations | great_expectations/render/renderer/content_block/validation_results_table_content_block.py | {
"start": 689,
"end": 11066
} | class ____(ExpectationStringRenderer):
_content_block_type = "table"
_rendered_component_type = RenderedTableContent
_rendered_component_default_init_kwargs = {"table_options": {"search": True, "icon-size": "sm"}}
_default_element_styling = {
"default": {"classes": ["badge", "badge-secondary"]},
"params": {"column": {"classes": ["badge", "badge-primary"]}},
}
_default_content_block_styling = {
"body": {
"classes": ["table"],
},
"classes": ["ml-2", "mr-2", "mt-0", "mb-0", "table-responsive"],
}
@classmethod
def _get_custom_columns(cls, validation_results):
custom_columns = []
if (
len(validation_results) > 0
and "meta_properties_to_render" in validation_results[0].expectation_config.kwargs
and validation_results[0].expectation_config.kwargs["meta_properties_to_render"]
is not None
):
custom_columns = list(
validation_results[0].expectation_config.kwargs["meta_properties_to_render"].keys()
)
return sorted(custom_columns)
@classmethod
@override
def _process_content_block(cls, content_block, has_failed_evr, render_object=None) -> None:
super()._process_content_block(content_block, has_failed_evr)
content_block.header_row = ["Status", "Expectation", "Observed Value"]
content_block.header_row_options = {"Status": {"sortable": True}}
# Add custom meta_properties_to_render header
if render_object is not None:
custom_columns = cls._get_custom_columns(render_object)
content_block.header_row += custom_columns
for column in custom_columns:
content_block.header_row_options[column] = {"sortable": True}
if has_failed_evr is False:
styling = deepcopy(content_block.styling) if content_block.styling else {}
if styling.get("classes"):
styling["classes"].append("hide-succeeded-validations-column-section-target-child")
else:
styling["classes"] = ["hide-succeeded-validations-column-section-target-child"]
content_block.styling = styling
@override
@classmethod
def _get_content_block_fn( # noqa: C901 # FIXME CoP
cls,
expectation_type: str,
expectation_config: ExpectationConfiguration | None = None,
) -> Callable | None:
content_block_fn = super()._get_content_block_fn(
expectation_type=expectation_type, expectation_config=expectation_config
)
expectation_string_fn = content_block_fn
if expectation_string_fn is None:
expectation_string_fn = cls._get_legacy_v2_api_style_expectation_string_fn(
expectation_type
)
if expectation_string_fn is None:
expectation_string_fn = cls._missing_content_block_fn
# This function wraps expect_* methods from ExpectationStringRenderer to generate table classes # noqa: E501 # FIXME CoP
def row_generator_fn( # noqa: C901 # FIXME CoP
configuration=None,
result=None,
runtime_configuration=None,
**kwargs,
):
eval_param_value_dict = kwargs.get("suite_parameters", None)
# loading into suite parameters to be passed onto prescriptive renderer
if eval_param_value_dict is not None:
runtime_configuration["suite_parameters"] = eval_param_value_dict
expectation = result.expectation_config
expectation_string_cell = expectation_string_fn(
configuration=expectation, runtime_configuration=runtime_configuration
)
status_icon_renderer = get_renderer_impl(
object_name=expectation_type,
renderer_type=LegacyDiagnosticRendererType.STATUS_ICON,
)
status_cell = (
[status_icon_renderer[1](result=result)]
if status_icon_renderer
else [cls._diagnostic_status_icon_renderer(result=result)]
)
unexpected_statement = []
unexpected_table = None
observed_value = ["--"]
data_docs_exception_message = """\
An unexpected Exception occurred during data docs rendering. Because of this error, certain parts of data docs will \
not be rendered properly and/or may not appear altogether. Please use the trace, included in this message, to \
diagnose and repair the underlying issue. Detailed information follows:
""" # noqa: E501 # FIXME CoP
try:
unexpected_statement_renderer = get_renderer_impl(
object_name=expectation_type,
renderer_type=LegacyDiagnosticRendererType.UNEXPECTED_STATEMENT,
)
unexpected_statement = (
unexpected_statement_renderer[1](result=result)
if unexpected_statement_renderer
else []
)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
data_docs_exception_message
+ f'{type(e).__name__}: "{e!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
try:
unexpected_table_renderer = get_renderer_impl(
object_name=expectation_type,
renderer_type=LegacyDiagnosticRendererType.UNEXPECTED_TABLE,
)
unexpected_table = (
unexpected_table_renderer[1](result=result)
if unexpected_table_renderer
else None
)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
data_docs_exception_message
+ f'{type(e).__name__}: "{e!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
try:
observed_value_renderer = get_renderer_impl(
object_name=expectation_type,
renderer_type=LegacyDiagnosticRendererType.OBSERVED_VALUE,
)
observed_value = [
observed_value_renderer[1](result=result)
if observed_value_renderer
else (
cls._get_legacy_v2_api_observed_value(expectation_string_fn, result) or "--"
)
]
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
data_docs_exception_message
+ f'{type(e).__name__}: "{e!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
# If the expectation has some unexpected values...:
if unexpected_statement:
expectation_string_cell += unexpected_statement
if unexpected_table:
expectation_string_cell += unexpected_table
if len(expectation_string_cell) > 1:
output_row = [status_cell + [expectation_string_cell] + observed_value]
else:
output_row = [status_cell + expectation_string_cell + observed_value]
meta_properties_renderer = get_renderer_impl(
object_name=expectation_type,
renderer_type=LegacyDiagnosticRendererType.META_PROPERTIES,
)
if meta_properties_renderer:
output_row[0] += meta_properties_renderer[1](result=result)
return output_row
return row_generator_fn
@classmethod
def _get_legacy_v2_api_style_expectation_string_fn(cls, expectation_type):
legacy_expectation_string_fn = getattr(cls, expectation_type, None)
if legacy_expectation_string_fn is None:
# With the V2 API, expectation rendering was implemented by defining a method with the same name as the expectation. # noqa: E501 # FIXME CoP
# If no legacy rendering is present, return None.
return None
# deprecated-v0.13.28
warnings.warn(
"V2 API style custom rendering is deprecated as of v0.13.28 and is not fully supported anymore; " # noqa: E501 # FIXME CoP
"As it will be removed in v0.16, please transition to V3 API and associated rendering style", # noqa: E501 # FIXME CoP
DeprecationWarning,
)
def expectation_string_fn_with_legacy_translation(
configuration: ExpectationConfiguration, runtime_configuration: dict
):
if runtime_configuration is None:
runtime_configuration = {}
# With the V2 API, the expectation string function had a different signature; the below translates from the new signature to the legacy signature. # noqa: E501 # FIXME CoP
return legacy_expectation_string_fn(
expectation=configuration,
styling=runtime_configuration.get("styling", None),
include_column_name=runtime_configuration.get("include_column_name", True),
)
return expectation_string_fn_with_legacy_translation
@staticmethod
def _get_legacy_v2_api_observed_value(expectation_string_fn, result):
if expectation_string_fn.__name__ != "expectation_string_fn_with_legacy_translation":
# If legacy V2 API style rendering is used, "expectation_string_fn" will be the method defined in the above "_get_legacy_v2_api_style_expectation_string_fn". # noqa: E501 # FIXME CoP
# If this isn't the case, return None, so we don't do any legacy logic.
return None
# With V2 API style rendering, the result had an "observed_value" entry that could be rendered. # noqa: E501 # FIXME CoP
return result["result"].get("observed_value")
| ValidationResultsTableContentBlockRenderer |
python | Textualize__textual | examples/sidebar.py | {
"start": 583,
"end": 1469
} | class ____(Widget):
"""
Our sidebar widget.
Add desired content to compose()
"""
DEFAULT_CSS = """
Sidebar {
width: 30;
/* Needs to go in its own layer to sit above content */
layer: sidebar;
/* Dock the sidebar to the appropriate side */
dock: left;
/* Offset x to be -100% to move it out of view by default */
offset-x: -100%;
background: $primary;
border-right: vkey $background;
/* Enable animation */
transition: offset 200ms;
&.-visible {
/* Set offset.x to 0 to make it visible when class is applied */
offset-x: 0;
}
& > Vertical {
margin: 1 2;
}
}
"""
def compose(self) -> ComposeResult:
with Vertical():
yield Label("Your sidebar here!")
| Sidebar |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_client.py | {
"start": 1601,
"end": 13802
} | class ____:
def test_limit_reached(self, mocker, requests_mock, api, fb_call_rate_response, account_id, some_config):
"""Error once, check that we retry and not fail"""
# turn Campaigns into non batch mode to test non batch logic
campaign_responses = [
fb_call_rate_response,
{
"json": {
"data": [
{"id": 1, "updated_time": "2020-09-25T00:00:00Z"},
{"id": 2, "updated_time": "2020-09-25T00:00:00Z"},
]
},
"status_code": 200,
},
]
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns",
campaign_responses,
)
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/1/",
[{"status_code": 200}],
)
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/2/",
[{"status_code": 200}],
)
stream = Campaigns(
api=api,
account_ids=[account_id],
start_date=ab_datetime_now(),
end_date=ab_datetime_now(),
)
try:
records = list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
assert records
except FacebookRequestError:
pytest.fail("Call rate error has not being handled")
def test_given_rate_limit_reached_when_read_then_raise_transient_traced_exception(
self, requests_mock, api, fb_call_rate_response, account_id, some_config
):
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns",
[fb_call_rate_response],
)
stream = Campaigns(
api=api,
account_ids=[account_id],
start_date=ab_datetime_now(),
end_date=ab_datetime_now(),
)
with pytest.raises(AirbyteTracedException) as exception:
list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
assert exception.value.failure_type == FailureType.transient_error
def test_batch_limit_reached(self, requests_mock, api, fb_call_rate_response, account_id):
"""Error once, check that we retry and not fail"""
responses = [
fb_call_rate_response,
{
"json": {
"data": [
{
"id": "123",
"object_type": "SHARE",
"status": "ACTIVE",
},
{
"id": "1234",
"object_type": "SHARE",
"status": "ACTIVE",
},
],
"status_code": 200,
}
},
]
batch_responses = [
fb_call_rate_response,
{
"json": [
{
"body": json.dumps({"name": "creative 1"}),
"code": 200,
"headers": {},
},
{
"body": json.dumps({"name": "creative 2"}),
"code": 200,
"headers": {},
},
]
},
]
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/adcreatives",
responses,
)
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/",
responses,
)
requests_mock.register_uri("POST", FacebookSession.GRAPH + f"/{FB_API_VERSION}/", batch_responses)
stream = AdCreatives(api=api, account_ids=[account_id])
records = list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
assert records == [
{
"account_id": "unknown_account",
"id": "123",
"object_type": "SHARE",
"status": "ACTIVE",
},
{
"account_id": "unknown_account",
"id": "1234",
"object_type": "SHARE",
"status": "ACTIVE",
},
]
@pytest.mark.parametrize(
"error_response",
[
{"json": {"error": {}}, "status_code": 500},
{"json": {"error": {"code": 104}}},
{"json": {"error": {"code": 2}}, "status_code": 500},
],
ids=["server_error", "connection_reset_error", "temporary_oauth_error"],
)
def test_common_error_retry(self, error_response, requests_mock, api, account_id):
"""Error once, check that we retry and not fail"""
account_data = {
"account_id": "unknown_account",
"id": 1,
"updated_time": "2020-09-25T00:00:00Z",
"name": "Some name",
}
responses = [
error_response,
{
"json": account_data,
"status_code": 200,
},
]
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/me/business_users",
json={"data": []},
)
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/",
responses,
)
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/{account_data['id']}/",
responses,
)
stream = AdAccount(api=api, account_ids=[account_id])
accounts = list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
assert accounts == [account_data]
def test_limit_error_retry(self, fb_call_amount_data_response, requests_mock, api, account_id):
"""Error every time, check limit parameter decreases by 2 times every new call"""
res = requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/campaigns",
[fb_call_amount_data_response],
)
stream = Campaigns(
api=api,
account_ids=[account_id],
start_date=ab_datetime_now(),
end_date=ab_datetime_now(),
page_size=100,
)
try:
list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
except AirbyteTracedException:
assert [x.qs.get("limit")[0] for x in res.request_history] == [
"100",
"50",
"25",
"12",
"6",
]
def test_limit_error_retry_revert_page_size(self, requests_mock, api, account_id):
"""Error every time, check limit parameter decreases by 2 times every new call"""
error = {
"json": {
"error": {
"message": "An unknown error occurred",
"code": 1,
}
},
"status_code": 500,
}
success = {
"json": {
"data": [],
"paging": {
"cursors": {
"after": "test",
},
"next": f"https://graph.facebook.com/{FB_API_VERSION}/act_{account_id}/activities?limit=31&after=test",
},
},
"status_code": 200,
}
res = requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/activities",
[error, success, error, success],
)
stream = Activities(
api=api,
account_ids=[account_id],
start_date=ab_datetime_now(),
end_date=ab_datetime_now(),
page_size=100,
)
try:
list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
except FacebookRequestError:
assert [x.qs.get("limit")[0] for x in res.request_history] == [
"100",
"50",
"100",
"50",
]
def test_start_date_not_provided(self, requests_mock, api, account_id):
success = {
"json": {
"data": [],
"paging": {
"cursors": {
"after": "test",
},
"next": f"https://graph.facebook.com/{FB_API_VERSION}/act_{account_id}/activities?limit=31&after=test",
},
},
"status_code": 200,
}
requests_mock.register_uri(
"GET",
FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/activities",
[success],
)
stream = Activities(
api=api,
account_ids=[account_id],
start_date=None,
end_date=None,
page_size=100,
)
list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
def test_limit_error_retry_next_page(self, fb_call_amount_data_response, requests_mock, api, account_id):
"""Unlike the previous test, this one tests the API call fail on the second or more page of a request."""
base_url = FacebookSession.GRAPH + f"/{FB_API_VERSION}/act_{account_id}/advideos"
res = requests_mock.register_uri(
"GET",
base_url,
[
{
"json": {
"data": [
{"id": 1, "updated_time": "2020-09-25T00:00:00Z"},
{"id": 2, "updated_time": "2020-09-25T00:00:00Z"},
],
"paging": {"next": f"{base_url}?after=after_page_1&limit=100"},
},
"status_code": 200,
},
fb_call_amount_data_response,
],
)
stream = Videos(
api=api,
account_ids=[account_id],
start_date=ab_datetime_now(),
end_date=ab_datetime_now(),
page_size=100,
)
try:
list(
stream.read_records(
sync_mode=SyncMode.full_refresh,
stream_state={},
stream_slice={"account_id": account_id},
)
)
except AirbyteTracedException:
assert [x.qs.get("limit")[0] for x in res.request_history] == [
"100",
"100",
"50",
"25",
"12",
"6",
]
| TestBackoff |
python | django__django | tests/model_regress/models.py | {
"start": 801,
"end": 869
} | class ____(models.Model):
when = models.DateField(null=True)
| Party |
python | wireservice__csvkit | tests/test_cli.py | {
"start": 92,
"end": 2749
} | class ____(unittest.TestCase):
def setUp(self):
self.headers = ['id', 'name', 'i_work_here', '1', 'more-header-values', 'stuff', 'blueberry']
def test_match_column_identifier_string(self):
self.assertEqual(2, match_column_identifier(self.headers, 'i_work_here'))
self.assertEqual(2, match_column_identifier(self.headers, 'i_work_here', column_offset=0))
def test_match_column_identifier_numeric(self):
self.assertEqual(2, match_column_identifier(self.headers, 3))
self.assertEqual(3, match_column_identifier(self.headers, 3, column_offset=0))
def test_match_column_which_could_be_integer_name_is_treated_as_positional_id(self):
self.assertEqual(0, match_column_identifier(self.headers, '1'))
self.assertEqual(1, match_column_identifier(self.headers, '1', column_offset=0))
def test_parse_column_identifiers(self):
self.assertEqual([2, 0, 1], parse_column_identifiers('i_work_here,1,name', self.headers))
self.assertEqual([2, 1, 1], parse_column_identifiers('i_work_here,1,name', self.headers, column_offset=0))
self.assertEqual(
[1, 1],
parse_column_identifiers(
'i_work_here,1,name',
self.headers,
column_offset=0,
excluded_columns='i_work_here,foobar',
),
)
def test_range_notation(self):
self.assertEqual([0, 1, 2], parse_column_identifiers('1:3', self.headers))
self.assertEqual([1, 2, 3], parse_column_identifiers('1:3', self.headers, column_offset=0))
self.assertEqual([1, 2, 3], parse_column_identifiers('2-4', self.headers))
self.assertEqual([2, 3, 4], parse_column_identifiers('2-4', self.headers, column_offset=0))
self.assertEqual([0, 1, 2, 3], parse_column_identifiers('1,2:4', self.headers))
self.assertEqual([1, 2, 3, 4], parse_column_identifiers('1,2:4', self.headers, column_offset=0))
self.assertEqual([4, 2, 5], parse_column_identifiers('more-header-values,3,stuff', self.headers))
self.assertEqual([4, 3, 5], parse_column_identifiers(
'more-header-values,3,stuff', self.headers, column_offset=0))
def test_range_notation_open_ended(self):
self.assertEqual([0, 1, 2], parse_column_identifiers(':3', self.headers))
target = list(range(3, len(self.headers))) # protect against devs adding to self.headers
target.insert(0, 0)
self.assertEqual(target, parse_column_identifiers('1,4:', self.headers))
self.assertEqual(list(range(0, len(self.headers))), parse_column_identifiers('1:', self.headers))
| TestCli |
python | django__django | tests/file_storage/models.py | {
"start": 382,
"end": 774
} | class ____(FileSystemStorage):
def get_valid_name(self, name):
# mark the name to show that this was called
return name + "_valid"
temp_storage_location = tempfile.mkdtemp()
temp_storage = FileSystemStorage(location=temp_storage_location)
def callable_storage():
return temp_storage
def callable_default_storage():
return default_storage
| CustomValidNameStorage |
python | redis__redis-py | tests/test_asyncio/test_connection_pool.py | {
"start": 28215,
"end": 28504
} | class ____:
@pytest_asyncio.fixture()
async def r(self, create_redis, server):
redis = await create_redis(single_connection_client=False)
yield redis
await redis.flushall()
@pytest.mark.onlynoncluster
@pytest.mark.xfail(strict=False)
| TestMultiConnectionClient |
python | Pylons__pyramid | src/pyramid/scripts/pshell.py | {
"start": 689,
"end": 9504
} | class ____:
description = """\
Open an interactive shell with a Pyramid app loaded. This command
accepts one positional argument named "config_uri" which specifies the
PasteDeploy config file to use for the interactive shell. The format is
"inifile#name". If the name is left off, the Pyramid default application
will be assumed. Example: "pshell myapp.ini#main".
If you do not point the loader directly at the section of the ini file
containing your Pyramid application, the command will attempt to
find the app for you. If you are loading a pipeline that contains more
than one Pyramid application within it, the loader will use the
last one.
"""
script_name = 'pshell'
bootstrap = staticmethod(bootstrap) # for testing
get_config_loader = staticmethod(get_config_loader) # for testing
importlib_metadata = importlib.metadata # for testing
parser = argparse.ArgumentParser(
description=textwrap.dedent(description),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'-p',
'--python-shell',
action='store',
dest='python_shell',
default='',
help=(
'Select the shell to use. A list of possible '
'shells is available using the --list-shells '
'option.'
),
)
parser.add_argument(
'-l',
'--list-shells',
dest='list',
action='store_true',
help='List all available shells.',
)
parser.add_argument(
'--setup',
dest='setup',
help=(
"A callable that will be passed the environment "
"before it is made available to the shell. This "
"option will override the 'setup' key in the "
"[pshell] ini section."
),
)
parser.add_argument(
'config_uri',
nargs='?',
default=None,
help='The URI to the configuration file.',
)
parser.add_argument(
'config_vars',
nargs='*',
default=(),
help="Variables required by the config file. For example, "
"`http_port=%%(http_port)s` would expect `http_port=8080` to be "
"passed here.",
)
default_runner = python_shell_runner # testing
loaded_objects = {}
object_help = {}
preferred_shells = []
setup = None
pystartup = os.environ.get('PYTHONSTARTUP')
resolver = DottedNameResolver(None)
def __init__(self, argv, quiet=False):
self.quiet = quiet
self.args = self.parser.parse_args(argv[1:])
def pshell_file_config(self, loader, defaults):
settings = loader.get_settings('pshell', defaults)
self.loaded_objects = {}
self.object_help = {}
self.setup = None
for k, v in settings.items():
if k == 'setup':
self.setup = v
elif k == 'default_shell':
self.preferred_shells = [x.lower() for x in aslist(v)]
else:
self.loaded_objects[k] = self.resolver.maybe_resolve(v)
self.object_help[k] = v
def out(self, msg): # pragma: no cover
if not self.quiet:
print(msg)
def run(self, shell=None):
if self.args.list:
return self.show_shells()
if not self.args.config_uri:
self.out('Requires a config file argument')
return 2
config_uri = self.args.config_uri
config_vars = parse_vars(self.args.config_vars)
config_vars.setdefault('__script__', self.script_name)
loader = self.get_config_loader(config_uri)
loader.setup_logging(config_vars)
self.pshell_file_config(loader, config_vars)
self.env = self.bootstrap(config_uri, options=config_vars)
# remove the closer from the env
self.closer = self.env.pop('closer')
try:
if shell is None:
try:
shell = self.make_shell()
except ValueError as e:
self.out(str(e))
return 1
with self.setup_env():
shell(self.env, self.help)
finally:
self.closer()
@contextmanager
def setup_env(self):
# setup help text for default environment
env = self.env
env_help = dict(env)
env_help['app'] = 'The WSGI application.'
env_help['root'] = 'Root of the default resource tree.'
env_help['registry'] = 'Active Pyramid registry.'
env_help['request'] = 'Active request object.'
env_help['root_factory'] = (
'Default root factory used to create `root`.'
)
# load the pshell section of the ini file
env.update(self.loaded_objects)
# eliminate duplicates from env, allowing custom vars to override
for k in self.loaded_objects:
if k in env_help:
del env_help[k]
# override use_script with command-line options
if self.args.setup:
self.setup = self.args.setup
if self.setup:
# call the setup callable
self.setup = self.resolver.maybe_resolve(self.setup)
# store the env before muddling it with the script
orig_env = env.copy()
setup_manager = make_contextmanager(self.setup)
with setup_manager(env):
# remove any objects from default help that were overidden
for k, v in env.items():
if k not in orig_env or v is not orig_env[k]:
if getattr(v, '__doc__', False):
env_help[k] = v.__doc__.replace("\n", " ")
else:
env_help[k] = v
del orig_env
# generate help text
help = ''
if env_help:
help += 'Environment:'
for var in sorted(env_help.keys()):
help += '\n %-12s %s' % (var, env_help[var])
if self.object_help:
help += '\n\nCustom Variables:'
for var in sorted(self.object_help.keys()):
help += '\n %-12s %s' % (var, self.object_help[var])
if self.pystartup and os.path.isfile(self.pystartup):
with open(self.pystartup, 'rb') as fp:
exec(fp.read().decode('utf-8'), env)
if '__builtins__' in env:
del env['__builtins__']
self.help = help.strip()
yield
def show_shells(self):
shells = self.find_all_shells()
sorted_names = sorted(shells.keys(), key=lambda x: x.lower())
self.out('Available shells:')
for name in sorted_names:
self.out(f' {name}')
return 0
def find_all_shells(self):
importlib_metadata = self.importlib_metadata
shells = {}
eps = importlib_metadata.entry_points()
if hasattr(eps, 'select'):
eps = eps.select(group='pyramid.pshell_runner')
else: # pragma: no cover
# fallback for py38 and py39
eps = eps.get('pyramid.pshell_runner')
for ep in eps:
name = ep.name
shell_factory = ep.load()
shells[name] = shell_factory
return shells
def make_shell(self):
shells = self.find_all_shells()
shell = None
user_shell = self.args.python_shell.lower()
if not user_shell:
preferred_shells = self.preferred_shells
if not preferred_shells:
# by default prioritize all shells above python
preferred_shells = [k for k in shells.keys() if k != 'python']
max_weight = len(preferred_shells)
def order(x):
# invert weight to reverse sort the list
# (closer to the front is higher priority)
try:
return preferred_shells.index(x[0].lower()) - max_weight
except ValueError:
return 1
sorted_shells = sorted(shells.items(), key=order)
if len(sorted_shells) > 0:
shell = sorted_shells[0][1]
else:
runner = shells.get(user_shell)
if runner is not None:
shell = runner
if shell is None:
raise ValueError(
'could not find a shell named "%s"' % user_shell
)
if shell is None:
# should never happen, but just incase entry points are borked
shell = self.default_runner
return shell
if __name__ == '__main__': # pragma: no cover
sys.exit(main() or 0)
| PShellCommand |
python | keras-team__keras | keras/src/ops/einops_test.py | {
"start": 188,
"end": 2091
} | class ____(testing.TestCase):
def test_basic_rearrangement_symbolic(self):
x = keras_tensor.KerasTensor((2, 3, 4))
y = rearrange(x, "b c h -> b h c")
self.assertIsInstance(y, keras_tensor.KerasTensor)
self.assertEqual(y.shape, (2, 4, 3))
@skip_if_backend("openvino", "Test operation not supported by openvino")
def test_basic_rearrangement(self):
x = ops.random.uniform((2, 3, 4))
y = rearrange(x, "b c h -> b h c")
self.assertEqual(y.shape, (2, 4, 3))
self.assertTrue(ops.all(ops.equal(y, ops.transpose(x, (0, 2, 1)))))
@skip_if_backend("openvino", "Test operation not supported by openvino")
def test_output_composition(self):
x = ops.random.uniform((2, 4, 4, 3))
y = rearrange(x, "b h w c -> (b h) w c")
target_shape = (8, 4, 3)
self.assertEqual(y.shape, target_shape)
self.assertTrue(ops.all(ops.equal(y, ops.reshape(x, (8, 4, 3)))))
def test_basic_decomposition_and_rearrangement_symbolic(self):
x = keras_tensor.KerasTensor((6, 8))
y = rearrange(x, "(h w) c -> h w c", h=2, w=3)
self.assertIsInstance(y, keras_tensor.KerasTensor)
self.assertEqual(y.shape, (2, 3, 8))
def test_basic_decomposition_and_rearrangement(self):
x = ops.random.uniform((6, 8))
y = rearrange(x, "(h w) c -> h w c", h=2, w=3)
self.assertEqual(y.shape, (2, 3, 8))
@skip_if_backend("openvino", "Test operation not supported by openvino")
def test_unchanged_shape(self):
x = ops.ones([2, 3, 4])
y = rearrange(x, "b h c -> b h c")
self.assertTrue(ops.all(ops.equal(y, x)))
self.assertTrue(x.shape, y.shape)
def test_unchanged_shape_symbolic(self):
x = keras_tensor.KerasTensor((2, 3, 4))
y = rearrange(x, "b h c -> b h c")
self.assertTrue(x.shape, y.shape)
| RearrangeTest |
python | mahmoud__boltons | boltons/tableutils.py | {
"start": 4259,
"end": 4622
} | class ____(InputType):
def check_type(self, obj):
return isinstance(obj, Mapping)
def guess_headers(self, obj):
return sorted(obj.keys())
def get_entry(self, obj, headers):
return [obj.get(h) for h in headers]
def get_entry_seq(self, obj, headers):
return [[ci.get(h) for h in headers] for ci in obj]
| DictInputType |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 123506,
"end": 128834
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True, arbitrary_types_allowed=True)
dbt_task: Optional[DbtTask] = Field(
None,
description=(
"If dbt_task, indicates that this must execute a dbt task. It requires both"
" Databricks SQL and the ability to use a serverless or a pro SQL"
" warehouse."
),
)
depends_on: Optional[TaskDependencies] = None
description: Optional[TaskDescription] = None
email_notifications: Optional[JobEmailNotifications] = Field(
None,
description=(
"An optional set of email addresses that is notified when runs of this task"
" begin or complete as well as when this task is deleted. The default"
" behavior is to not send any emails."
),
)
existing_cluster_id: Optional[str] = Field(
None,
description=(
"If existing_cluster_id, the ID of an existing cluster that is used for all"
" runs of this task. When running tasks on an existing cluster, you may"
" need to manually restart the cluster if it stops responding. We suggest"
" running jobs on new clusters for greater reliability."
),
examples=["0923-164208-meows279"],
)
job_cluster_key: Optional[str] = Field(
None,
description=(
"If job_cluster_key, this task is executed reusing the cluster specified in"
" `job.settings.job_clusters`."
),
max_length=100,
min_length=1,
pattern="^[\\w\\-]+$",
)
libraries: Optional[List[Library]] = Field(
None,
description=(
"An optional list of libraries to be installed on the cluster that executes"
" the task. The default value is an empty list."
),
)
max_retries: Optional[int] = Field(
None,
description=(
"An optional maximum number of times to retry an unsuccessful run. A run is"
" considered to be unsuccessful if it completes with the `FAILED`"
" result_state or `INTERNAL_ERROR` `life_cycle_state`. The value -1 means"
" to retry indefinitely and the value 0 means to never retry. The default"
" behavior is to never retry."
),
examples=[10],
)
min_retry_interval_millis: Optional[int] = Field(
None,
description=(
"An optional minimal interval in milliseconds between the start of the"
" failed run and the subsequent retry run. The default behavior is that"
" unsuccessful runs are immediately retried."
),
examples=[2000],
)
new_cluster: Optional[NewCluster] = Field(
None,
description=(
"If new_cluster, a description of a cluster that is created for each run."
),
)
notebook_task: Optional[NotebookTask] = Field(
None,
description=(
"If notebook_task, indicates that this task must run a notebook. This field"
" may not be specified in conjunction with spark_jar_task."
),
)
pipeline_task: Optional[PipelineTask] = Field(
None,
description=(
"If pipeline_task, indicates that this task must execute a Pipeline."
),
)
python_wheel_task: Optional[PythonWheelTask] = Field(
None,
description=(
"If python_wheel_task, indicates that this job must execute a PythonWheel."
),
)
retry_on_timeout: Optional[bool] = Field(
None,
description=(
"An optional policy to specify whether to retry a task when it times out."
" The default behavior is to not retry on timeout."
),
examples=[True],
)
spark_jar_task: Optional[SparkJarTask] = Field(
None, description="If spark_jar_task, indicates that this task must run a JAR."
)
spark_python_task: Optional[SparkPythonTask] = Field(
None,
description=(
"If spark_python_task, indicates that this task must run a Python file."
),
)
spark_submit_task: Optional[SparkSubmitTask] = Field(
None,
description=(
"If spark_submit_task, indicates that this task must be launched by the"
" spark submit script."
),
)
sql_task: Optional[SqlTask] = Field(
None,
description=(
"If sql_task, indicates that this job must execute a SQL task. It requires"
" both Databricks SQL and a serverless or a pro SQL warehouse."
),
)
task_key: TaskKey
timeout_seconds: Optional[int] = Field(
None,
description=(
"An optional timeout applied to each run of this job task. The default"
" behavior is to have no timeout."
),
examples=[86400],
)
webhook_notifications: Optional[WebhookNotifications] = Field(
None,
description=(
"A collection of system notification IDs to notify when the run begins or"
" completes. The default behavior is to not send any system notifications."
),
)
| JobTaskSettings |
python | ray-project__ray | python/ray/train/lint/check_circular_imports.py | {
"start": 1499,
"end": 1893
} | class ____:
"""
Represents an import statement.
For example, 'from X import A, B' has module 'X' and names ['A', 'B'].
Also supports 'import X'.
"""
def __init__(
self, module: str, names: List[str] = None, is_package: bool = False
) -> None:
self.is_package = is_package
self.module = module
self.names = names if names else []
| Import |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 2729,
"end": 3222
} | class ____(BaseDeclaration):
"""Simplest BaseDeclaration computed by calling the given function.
Attributes:
function (function): a function without arguments and
returning the computed value.
"""
def __init__(self, function):
super().__init__()
self.function = function
def evaluate(self, instance, step, extra):
logger.debug("LazyFunction: Evaluating %r on %r", self.function, step)
return self.function()
| LazyFunction |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 5828,
"end": 6812
} | class ____:
"""Base class for other field types.
In addition to the methods and properties documented here, all fields expose the
same properties and methods defined on `pikepdf.AcroFormField`. These are forwarded
to the underlying field object.
"""
def __init__(self, form: Form, field: AcroFormField):
self._form = form
self._field = field
def __getattr__(self, name):
return getattr(self._field, name)
@property
def is_required(self) -> bool:
"""Is this a required field?"""
return bool(self._field.flags & FormFieldFlag.required)
@property
def is_read_only(self) -> bool:
"""Is this a read-only field?"""
return bool(self._field.flags & FormFieldFlag.read_only)
@property
def export_enabled(self) -> bool:
"""Should this field's value be included when exporting data from the PDF?"""
return not self._field.flags & FormFieldFlag.no_export
| _FieldWrapper |
python | django__django | django/db/models/fields/__init__.py | {
"start": 76725,
"end": 77167
} | class ____(IntegerField):
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
return super().formfield(
**{
"min_value": -BigIntegerField.MAX_BIGINT - 1,
"max_value": BigIntegerField.MAX_BIGINT,
**kwargs,
}
)
| BigIntegerField |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/multinomial_op_big_test.py | {
"start": 1036,
"end": 3514
} | class ____(test.TestCase):
# check that events with tiny probabilities are not over-sampled
def testLargeDynamicRange(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[-30, 0]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True) # pylint: disable=unexpected-keyword-arg
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[1], 100000000)
def testLargeDynamicRange2(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[0, -30]], dtype=dtypes.float32),
num_samples=1000000,
seed=15)
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True) # pylint: disable=unexpected-keyword-arg
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertEqual(counts_by_indices[0], 100000000)
@test_util.run_deprecated_v1
def testLargeDynamicRange3(self):
random_seed.set_random_seed(10)
counts_by_indices = {}
# here the cpu undersamples and won't pass this test either
with self.test_session():
samples = random_ops.multinomial(
constant_op.constant([[0, -17]], dtype=dtypes.float32),
num_samples=1000000,
seed=22)
# we'll run out of memory if we try to draw 1e9 samples directly
# really should fit in 12GB of memory...
for _ in range(100):
x = self.evaluate(samples)
indices, counts = np.unique(x, return_counts=True) # pylint: disable=unexpected-keyword-arg
for index, count in zip(indices, counts):
if index in counts_by_indices.keys():
counts_by_indices[index] += count
else:
counts_by_indices[index] = count
self.assertGreater(counts_by_indices[1], 0)
if __name__ == "__main__":
test.main()
| MultinomialTest |
python | walkccc__LeetCode | solutions/2652. Sum Multiples/2652.py | {
"start": 0,
"end": 182
} | class ____:
def sumOfMultiples(self, n: int) -> int:
ans = 0
for i in range(1, n + 1):
if i % 3 == 0 or i % 5 == 0 or i % 7 == 0:
ans += i
return ans
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_setops.py | {
"start": 243,
"end": 7956
} | class ____:
def test_union(self):
i1 = timedelta_range("1day", periods=5)
i2 = timedelta_range("3day", periods=5)
result = i1.union(i2)
expected = timedelta_range("1day", periods=7)
tm.assert_index_equal(result, expected)
i1 = Index(np.arange(0, 20, 2, dtype=np.int64))
i2 = timedelta_range(start="1 day", periods=10, freq="D")
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_sort_false(self):
tdi = timedelta_range("1day", periods=5)
left = tdi[3:]
right = tdi[:3]
# Check that we are testing the desired code path
assert left._can_fast_union(right)
result = left.union(right)
tm.assert_index_equal(result, tdi)
result = left.union(right, sort=False)
expected = TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"])
tm.assert_index_equal(result, expected)
def test_union_coverage(self):
# GH#59051
msg = "'d' is deprecated and will be removed in a future version."
with tm.assert_produces_warning(Pandas4Warning, match=msg):
idx = TimedeltaIndex(["3d", "1d", "2d"])
ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range("1 day", periods=4, freq="3h")
rng_b = timedelta_range("1 day", periods=4, freq="4h")
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(["1 day 15:19:49.695000"])
right = TimedeltaIndex(
["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30D")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_freq_infer(self):
# When taking the union of two TimedeltaIndexes, we infer
# a freq even if the arguments don't have freq. This matches
# DatetimeIndex behavior.
tdi = timedelta_range("1 Day", periods=5)
left = tdi[[0, 1, 3, 4]]
right = tdi[[2, 3, 1]]
assert left.freq is None
assert right.freq is None
result = left.union(right)
tm.assert_index_equal(result, tdi)
assert result.freq == "D"
def test_intersection_bug_1708(self):
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1.intersection(index_2)
assert len(result) == 0
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1.intersection(index_2)
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_intersection_equal(self, sort):
# GH 24471 Test intersection outcome given the sort keyword
# for equal indices intersection should return the original index
first = timedelta_range("1 day", periods=4, freq="h")
second = timedelta_range("1 day", periods=4, freq="h")
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
tm.assert_index_equal(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
def test_intersection_zero_length(self, period_1, period_2, sort):
# GH 24471 test for non overlap the intersection should be zero length
index_1 = timedelta_range("1 day", periods=period_1, freq="h")
index_2 = timedelta_range("1 day", periods=period_2, freq="h")
expected = timedelta_range("1 day", periods=0, freq="h")
result = index_1.intersection(index_2, sort=sort)
tm.assert_index_equal(result, expected)
def test_zero_length_input_index(self, sort):
# GH 24966 test for 0-len intersections are copied
index_1 = timedelta_range("1 day", periods=0, freq="h")
index_2 = timedelta_range("1 day", periods=3, freq="h")
result = index_1.intersection(index_2, sort=sort)
assert index_1 is not result
assert index_2 is not result
tm.assert_copy(result, index_1)
@pytest.mark.parametrize(
"rng, expected",
# if target has the same name, it is preserved
[
(
timedelta_range("1 day", periods=5, freq="h", name="idx"),
timedelta_range("1 day", periods=4, freq="h", name="idx"),
),
# if target name is different, it will be reset
(
timedelta_range("1 day", periods=5, freq="h", name="other"),
timedelta_range("1 day", periods=4, freq="h", name=None),
),
# if no overlap exists return empty index
(
timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
TimedeltaIndex([], freq="h", name="idx", dtype="m8[ns]"),
),
],
)
def test_intersection(self, rng, expected, sort):
# GH 4690 (with tz)
base = timedelta_range("1 day", periods=4, freq="h", name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
@pytest.mark.parametrize(
"rng, expected",
# part intersection works
[
(
TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
),
# reordered part intersection
(
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
# reversed index
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
],
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
),
],
)
def test_intersection_non_monotonic(self, rng, expected, sort):
# 24471 non-monotonic
base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
# if reversed order, frequency is still the same
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
assert result.freq is None
| TestTimedeltaIndex |
python | eventlet__eventlet | eventlet/event.py | {
"start": 192,
"end": 7496
} | class ____:
"""An abstraction where an arbitrary number of coroutines
can wait for one event from another.
Events are similar to a Queue that can only hold one item, but differ
in two important ways:
1. calling :meth:`send` never unschedules the current greenthread
2. :meth:`send` can only be called once; create a new event to send again.
They are good for communicating results between coroutines, and
are the basis for how
:meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>`
is implemented.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def baz(b):
... evt.send(b + 1)
...
>>> _ = eventlet.spawn_n(baz, 3)
>>> evt.wait()
4
"""
_result = None
_exc = None
def __init__(self):
self._waiters = set()
self.reset()
def __str__(self):
params = (self.__class__.__name__, hex(id(self)),
self._result, self._exc, len(self._waiters))
return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
def reset(self):
# this is kind of a misfeature and doesn't work perfectly well,
# it's better to create a new event rather than reset an old one
# removing documentation so that we don't get new use cases for it
assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
self._result = NOT_USED
self._exc = None
def ready(self):
""" Return true if the :meth:`wait` call will return immediately.
Used to avoid waiting for things that might take a while to time out.
For example, you can put a bunch of events into a list, and then visit
them all repeatedly, calling :meth:`ready` until one returns ``True``,
and then you can :meth:`wait` on that one."""
return self._result is not NOT_USED
def has_exception(self):
return self._exc is not None
def has_result(self):
return self._result is not NOT_USED and self._exc is None
def poll(self, notready=None):
if self.ready():
return self.wait()
return notready
# QQQ make it return tuple (type, value, tb) instead of raising
# because
# 1) "poll" does not imply raising
# 2) it's better not to screw up caller's sys.exc_info() by default
# (e.g. if caller wants to calls the function in except or finally)
def poll_exception(self, notready=None):
if self.has_exception():
return self.wait()
return notready
def poll_result(self, notready=None):
if self.has_result():
return self.wait()
return notready
def wait(self, timeout=None):
"""Wait until another coroutine calls :meth:`send`.
Returns the value the other coroutine passed to :meth:`send`.
>>> import eventlet
>>> evt = eventlet.Event()
>>> def wait_on():
... retval = evt.wait()
... print("waited for {0}".format(retval))
>>> _ = eventlet.spawn(wait_on)
>>> evt.send('result')
>>> eventlet.sleep(0)
waited for result
Returns immediately if the event has already occurred.
>>> evt.wait()
'result'
When the timeout argument is present and not None, it should be a floating point number
specifying a timeout for the operation in seconds (or fractions thereof).
"""
current = greenlet.getcurrent()
if self._result is NOT_USED:
hub = hubs.get_hub()
self._waiters.add(current)
timer = None
if timeout is not None:
timer = hub.schedule_call_local(timeout, self._do_send, None, None, current)
try:
result = hub.switch()
if timer is not None:
timer.cancel()
return result
finally:
self._waiters.discard(current)
if self._exc is not None:
current.throw(*self._exc)
return self._result
def send(self, result=None, exc=None):
"""Makes arrangements for the waiters to be woken with the
result and then returns immediately to the parent.
>>> from eventlet import event
>>> import eventlet
>>> evt = event.Event()
>>> def waiter():
... print('about to wait')
... result = evt.wait()
... print('waited for {0}'.format(result))
>>> _ = eventlet.spawn(waiter)
>>> eventlet.sleep(0)
about to wait
>>> evt.send('a')
>>> eventlet.sleep(0)
waited for a
It is an error to call :meth:`send` multiple times on the same event.
>>> evt.send('whoops') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AssertionError: Trying to re-send() an already-triggered event.
Use :meth:`reset` between :meth:`send` s to reuse an event object.
"""
assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
self._result = result
if exc is not None and not isinstance(exc, tuple):
exc = (exc, )
self._exc = exc
hub = hubs.get_hub()
for waiter in self._waiters:
hub.schedule_call_global(
0, self._do_send, self._result, self._exc, waiter)
def _do_send(self, result, exc, waiter):
if waiter in self._waiters:
if exc is None:
waiter.switch(result)
else:
waiter.throw(*exc)
def send_exception(self, *args):
"""Same as :meth:`send`, but sends an exception to waiters.
The arguments to send_exception are the same as the arguments
to ``raise``. If a single exception object is passed in, it
will be re-raised when :meth:`wait` is called, generating a
new stacktrace.
>>> from eventlet import event
>>> evt = event.Event()
>>> evt.send_exception(RuntimeError())
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eventlet/event.py", line 120, in wait
current.throw(*self._exc)
RuntimeError
If it's important to preserve the entire original stack trace,
you must pass in the entire :func:`sys.exc_info` tuple.
>>> import sys
>>> evt = event.Event()
>>> try:
... raise RuntimeError()
... except RuntimeError:
... evt.send_exception(*sys.exc_info())
...
>>> evt.wait()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "eventlet/event.py", line 120, in wait
current.throw(*self._exc)
File "<stdin>", line 2, in <module>
RuntimeError
Note that doing so stores a traceback object directly on the
Event object, which may cause reference cycles. See the
:func:`sys.exc_info` documentation.
"""
# the arguments and the same as for greenlet.throw
return self.send(None, args)
| Event |
python | pola-rs__polars | py-polars/src/polars/datatype_expr/datatype_expr.py | {
"start": 981,
"end": 9667
} | class ____:
"""
A lazily instantiated :class:`DataType` that can be used in an :class:`Expr`.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
This expression is made to represent a :class:`DataType` that can be used to
reference a datatype in a lazy context.
Examples
--------
>>> lf = pl.LazyFrame({"a": [1, 2, 3]})
>>> lf.with_columns(
... pl.col.a.map_batches(lambda x: x * 2, return_dtype=pl.dtype_of("a"))
... ).collect()
shape: (3, 1)
┌─────┐
│ a │
│ --- │
│ i64 │
╞═════╡
│ 2 │
│ 4 │
│ 6 │
└─────┘
"""
# NOTE: This `= None` is needed to generate the docs with sphinx_accessor.
_pydatatype_expr: PyDataTypeExpr = None # type: ignore[assignment]
_accessors: ClassVar[set[str]] = {
"arr",
"enum",
"list",
"struct",
}
def __eq__(self, value: PolarsDataType | DataTypeExpr) -> pl.Expr: # type: ignore[override]
cmp_with: DataTypeExpr
if isinstance(value, pl.DataType):
cmp_with = value.to_dtype_expr()
elif isinstance(value, pl.DataTypeClass):
cmp_with = value.to_dtype_expr()
elif isinstance(value, DataTypeExpr):
cmp_with = value
else:
msg = f"cannot compare {self!r} to {value!r}"
raise TypeError(msg) from None
return pl.Expr._from_pyexpr(
self._pydatatype_expr.equals(cmp_with._pydatatype_expr)
)
def __ne__(self, value: PolarsDataType | DataTypeExpr) -> pl.Expr: # type: ignore[override]
return (self == value).not_()
@classmethod
def _from_pydatatype_expr(cls, pydatatype_expr: PyDataTypeExpr) -> DataTypeExpr:
slf = cls()
slf._pydatatype_expr = pydatatype_expr
return slf
def inner_dtype(self) -> DataTypeExpr:
"""Get the inner DataType of a List or Array."""
return DataTypeExpr._from_pydatatype_expr(self._pydatatype_expr.inner_dtype())
def display(self) -> pl.Expr:
"""
Get a formatted version of the output DataType.
Examples
--------
>>> df = pl.DataFrame(
... {
... "a": [1, 2, 3],
... "b": ["X", "Y", "Z"],
... "c": [1.3, 3.7, 4.2],
... }
... )
>>> df.select(
... a=pl.dtype_of("a").display(),
... b=pl.dtype_of("b").display(),
... c=pl.dtype_of("c").display(),
... ).transpose(include_header=True, column_names=["dtype"])
shape: (3, 2)
┌────────┬───────┐
│ column ┆ dtype │
│ --- ┆ --- │
│ str ┆ str │
╞════════╪═══════╡
│ a ┆ i64 │
│ b ┆ str │
│ c ┆ f64 │
└────────┴───────┘
"""
return pl.Expr._from_pyexpr(self._pydatatype_expr.display())
def matches(self, selector: pl.Selector) -> pl.Expr:
"""
Get whether the output DataType is matches a certain selector.
Examples
--------
>>> import polars.selectors as cs
>>> pl.DataFrame(
... {
... "a": [1, 2, 3],
... }
... ).select(
... a_is_string=pl.dtype_of("a").matches(cs.string()),
... a_is_integer=pl.dtype_of("a").matches(cs.integer()),
... )
shape: (1, 2)
┌─────────────┬──────────────┐
│ a_is_string ┆ a_is_integer │
│ --- ┆ --- │
│ bool ┆ bool │
╞═════════════╪══════════════╡
│ false ┆ true │
└─────────────┴──────────────┘
"""
return pl.Expr._from_pyexpr(self._pydatatype_expr.matches(selector._pyselector))
def wrap_in_list(self) -> DataTypeExpr:
"""
Get the DataType wrapped in a list.
Examples
--------
>>> pl.Int32.to_dtype_expr().wrap_in_list().collect_dtype({})
List(Int32)
"""
return DataTypeExpr._from_pydatatype_expr(self._pydatatype_expr.wrap_in_list())
def wrap_in_array(self, *, width: int) -> DataTypeExpr:
"""
Get the DataType wrapped in an array.
Examples
--------
>>> pl.Int32.to_dtype_expr().wrap_in_array(width=5).collect_dtype({})
Array(Int32, shape=(5,))
"""
return DataTypeExpr._from_pydatatype_expr(
self._pydatatype_expr.wrap_in_array(width)
)
def to_unsigned_integer(self) -> pl.DataTypeExpr:
"""
Get the unsigned integer version of the same bitsize.
Examples
--------
>>> int32 = pl.Int32.to_dtype_expr()
>>> int32.to_unsigned_integer().collect_dtype({})
UInt32
"""
return pl.DataTypeExpr._from_pydatatype_expr(
self._pydatatype_expr.to_unsigned_integer()
)
def to_signed_integer(self) -> pl.DataTypeExpr:
"""
Get the signed integer version of the same bitsize.
Examples
--------
>>> uint32 = pl.UInt32.to_dtype_expr()
>>> uint32.to_signed_integer().collect_dtype({})
Int32
"""
return pl.DataTypeExpr._from_pydatatype_expr(
self._pydatatype_expr.to_signed_integer()
)
def default_value(
self,
n: int = 1,
*,
numeric_to_one: bool = False,
num_list_values: int = 0,
) -> pl.Expr:
"""
Get a default value of a specific type.
- Integers and floats are their zero value as default, unless otherwise
specified
- Temporals are a physical zero as default
- `pl.Decimal` is zero as default
- `pl.String` and `pl.Binary` are an empty string
- `pl.List` is an empty list, unless otherwise specified
- `pl.Array` is the inner default value repeated over the shape
- `pl.Struct` is the inner default value for all fields
- `pl.Enum` is the first category if it exists
- `pl.Null`, `pl.Object` and `pl.Categorical` are `null`.
Parameters
----------
n
Number of types you want the value
numeric_to_one
Use `1` instead of `0` as the default value for numeric types
num_list_values
The amount of values a list contains
Examples
--------
>>> uint32 = pl.UInt32.to_dtype_expr()
>>> pl.select(default=uint32.default_value())
shape: (1, 1)
┌─────────┐
│ default │
│ --- │
│ u32 │
╞═════════╡
│ 0 │
└─────────┘
"""
return pl.Expr._from_pyexpr(
self._pydatatype_expr.default_value(
n=n, numeric_to_one=numeric_to_one, num_list_values=num_list_values
)
)
@property
def list(self) -> DataTypeExprListNameSpace:
"""Create an object namespace of all list related methods."""
return DataTypeExprListNameSpace(self)
@property
def arr(self) -> DataTypeExprArrNameSpace:
"""Create an object namespace of all array related methods."""
return DataTypeExprArrNameSpace(self)
@property
def struct(self) -> DataTypeExprStructNameSpace:
"""Create an object namespace of all struct related methods."""
return DataTypeExprStructNameSpace(self)
def collect_dtype(
self, context: SchemaDict | pl.Schema | pl.DataFrame | pl.LazyFrame
) -> DataType:
"""
Materialize the :class:`DataTypeExpr` in a specific context.
This is a useful function when debugging datatype expressions.
Examples
--------
>>> lf = pl.LazyFrame(
... {
... "a": [1, 2, 3],
... }
... )
>>> pl.dtype_of("a").collect_dtype(lf)
Int64
>>> pl.dtype_of("a").collect_dtype({"a": pl.String})
String
"""
schema: pl.Schema
if isinstance(context, pl.Schema):
schema = context
elif isinstance(context, Mapping):
schema = pl.Schema(context)
elif isinstance(context, pl.DataFrame):
schema = context.schema
elif isinstance(context, pl.LazyFrame):
schema = context.collect_schema()
else:
msg = f"DataTypeExpr.collect_dtype did not expect {context!r}"
raise TypeError(msg)
return self._pydatatype_expr.collect_dtype(schema)
| DataTypeExpr |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 5980,
"end": 7912
} | class ____(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
__visit_name__ = "INTERVAL"
def __init__(self, day_precision=None, second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs.
:param day_precision: the day precision value. this is the number of
digits to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the
number of digits to store for the fractional seconds field.
Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@classmethod
def adapt_emulated_to_native(
cls, interval: sqltypes.Interval, **kw # type: ignore[override]
):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@property
def _type_affinity(self):
return sqltypes.Interval
def as_generic(self, allow_nulltype=False):
return sqltypes.Interval(
native=True,
second_precision=self.second_precision,
day_precision=self.day_precision,
)
@property
def python_type(self) -> Type[dt.timedelta]:
return dt.timedelta
def literal_processor(
self, dialect: Dialect
) -> Optional[_LiteralProcessorType[dt.timedelta]]:
def process(value: dt.timedelta) -> str:
return f"NUMTODSINTERVAL({value.total_seconds()}, 'SECOND')"
return process
| INTERVAL |
python | huggingface__transformers | tests/models/glm46v/test_modeling_glm46v.py | {
"start": 1304,
"end": 6162
} | class ____:
def __init__(
self,
parent,
batch_size=3,
seq_length=7,
num_channels=3,
ignore_index=-100,
image_size=112,
video_start_token_id=3,
video_end_token_id=4,
image_start_token_id=5,
image_end_token_id=6,
image_token_id=7,
video_token_id=8,
is_training=True,
text_config={
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 22,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 1,
"output_channels": 64,
"hidden_act": "silu",
"max_position_embeddings": 512,
"rope_parameters": {"type": "default", "mrope_section": [2, 1, 1]},
"rope_theta": 10000,
"tie_word_embeddings": True,
"bos_token_id": 0,
"eos_token_id": 0,
"pad_token_id": 0,
},
vision_config={
"depth": 2,
"hidden_act": "silu",
"hidden_size": 48,
"out_hidden_size": 16,
"intermediate_size": 22,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.bos_token_id = text_config["bos_token_id"]
self.eos_token_id = text_config["eos_token_id"]
self.pad_token_id = text_config["pad_token_id"]
self.video_start_token_id = video_start_token_id
self.video_end_token_id = video_end_token_id
self.image_start_token_id = image_start_token_id
self.image_end_token_id = image_end_token_id
self.image_token_id = image_token_id
self.video_token_id = video_token_id
self.text_config = text_config
self.vision_config = vision_config
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.is_training = is_training
self.hidden_size = text_config["hidden_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.vocab_size = text_config["vocab_size"]
self.num_image_tokens = 64
self.seq_length = seq_length + self.num_image_tokens
def get_config(self):
return Glm46VConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
video_token_id=self.video_token_id,
video_start_token_id=self.video_start_token_id,
video_end_token_id=self.video_end_token_id,
image_start_token_id=self.image_start_token_id,
image_end_token_id=self.image_end_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
input_ids[input_ids == self.video_token_id] = self.pad_token_id
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[input_ids == self.video_start_token_id] = self.pad_token_id
input_ids[input_ids == self.image_start_token_id] = self.pad_token_id
input_ids[input_ids == self.video_end_token_id] = self.pad_token_id
input_ids[input_ids == self.image_end_token_id] = self.pad_token_id
input_ids[:, 0] = self.image_start_token_id
input_ids[:, 1 : 1 + self.num_image_tokens] = self.image_token_id
input_ids[:, 1 + self.num_image_tokens] = self.image_end_token_id
patch_size = config.vision_config.patch_size
patches_per_side = self.image_size // patch_size
inputs_dict = {
"pixel_values": pixel_values,
"image_grid_thw": torch.tensor(
[[1, patches_per_side, patches_per_side]] * self.batch_size, device=torch_device
),
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
| Glm46VVisionText2TextModelTester |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/config.py | {
"start": 163,
"end": 1712
} | class ____:
def __init__(self) -> None:
self._subdomain: Optional[str] = None
self._start_date: Optional[str] = None
self._credentials: Dict[str, str] = {}
self._ignore_pagination: Optional[bool] = None
def with_subdomain(self, subdomain: str) -> "ConfigBuilder":
self._subdomain = subdomain
return self
def with_oauth_credentials(self, access_token: str) -> "ConfigBuilder":
self._credentials["access_token"] = access_token
self._credentials["credentials"] = "oauth2.0"
return self
def with_basic_auth_credentials(self, email: str, password: str) -> "ConfigBuilder":
self._credentials["api_token"] = password
self._credentials["credentials"] = "api_token"
self._credentials["email"] = email
return self
def with_start_date(self, start_date: AirbyteDateTime) -> "ConfigBuilder":
self._start_date = start_date.strftime("%Y-%m-%dT%H:%M:%SZ")
return self
def with_ignore_pagination(self) -> "ConfigBuilder":
self._ignore_pagination = True
return self
def build(self) -> Dict[str, Any]:
config = {}
if self._subdomain:
config["subdomain"] = self._subdomain
if self._start_date:
config["start_date"] = self._start_date
if self._credentials:
config["credentials"] = self._credentials
if self._ignore_pagination:
config["ignore_pagination"] = self._ignore_pagination
return config
| ConfigBuilder |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-alloys.py | {
"start": 50,
"end": 1363
} | class ____(object):
def maxNumberOfAlloys(self, n, k, budget, composition, stock, cost):
"""
:type n: int
:type k: int
:type budget: int
:type composition: List[List[int]]
:type stock: List[int]
:type cost: List[int]
:rtype: int
"""
def count(machine, budget):
def cnt(x):
return stock[x]//machine[x]
idxs = range(n)
idxs.sort(key=cnt)
result = cnt(idxs[0])
prefix = curr = discount = 0
for i in xrange(n):
curr += cost[idxs[i]]*machine[idxs[i]]
discount += cost[idxs[i]]*(stock[idxs[i]]%machine[idxs[i]])
if i+1 != n and cnt(idxs[i+1])-cnt(idxs[i]) == 0:
continue
prefix += curr
budget += discount
curr = discount = 0
mn = min((cnt(idxs[i+1])-cnt(idxs[i]) if i+1 < n else float("inf")), budget//prefix)
if mn == 0:
break
budget -= prefix*mn
result += mn
return result
return max(count(machine, budget) for machine in composition)
# Time: O(k * n * logr), r = min(stock)+budget
# Space: O(1)
# binary search
| Solution |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 18337,
"end": 20214
} | class ____(TestCase, RowDeprecationTestMixin):
def setUp(self):
self.value = 0
self.widget = widgets.IntegerWidget()
self.bigintvalue = 163371428940853127
self.widget_coerce_to_string = widgets.IntegerWidget(coerce_to_string=True)
def test_clean_integer_zero(self):
self.assertEqual(self.widget.clean(0), self.value)
def test_clean_big_integer(self):
self.assertEqual(self.widget.clean(163371428940853127), self.bigintvalue)
def test_clean_string_zero(self):
self.assertEqual(self.widget.clean("0"), self.value)
self.assertEqual(self.widget.clean("0.0"), self.value)
def test_clean_empty_string(self):
self.assertEqual(self.widget.clean(""), None)
self.assertEqual(self.widget.clean(" "), None)
self.assertEqual(self.widget.clean("\n\t\r"), None)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators(self):
self.assertEqual(self.widget.clean("1,234.5"), 1234)
@override_settings(LANGUAGE_CODE="ar", USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators_arabic(self):
self.assertEqual(self.widget.clean("1.234,5"), 1234)
@override_settings(LANGUAGE_CODE="zh-hans", USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators_chinese_simplified(self):
self.assertEqual(self.widget.clean("1234.5"), 1234)
@override_settings(LANGUAGE_CODE="fr", USE_THOUSAND_SEPARATOR=True)
def test_clean_numeric_separators_french(self):
self.assertEqual(self.widget.clean("1\xa0234,5"), 1234)
def test_render_invalid_type(self):
self.assertEqual(self.widget.render("a"), "")
@override_settings(LANGUAGE_CODE="fr-fr")
def test_locale_render_gte_django4(self):
self.assertEqual(self.widget_coerce_to_string.render(self.value), "0")
| IntegerWidgetTest |
python | ipython__ipython | IPython/core/interactiveshell.py | {
"start": 3788,
"end": 5555
} | class ____(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
from ast import Module
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (
_asyncio_runner,
_curio_runner,
_pseudo_sync_runner,
_should_be_async,
_trio_runner,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def is_integer_string(s: str):
"""
Variant of "str.isnumeric()" that allow negative values and other ints.
"""
try:
int(s)
return True
except ValueError:
return False
raise ValueError("Unexpected error")
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
| ProvisionalWarning |
python | lazyprogrammer__machine_learning_examples | cnn_class2/tf_resnet_convblock_starter.py | {
"start": 354,
"end": 765
} | class ____:
def __init__(self):
pass
def predict(self, X):
pass
if __name__ == '__main__':
conv_block = ConvBlock()
# make a fake image
X = np.random.random((1, 224, 224, 3))
init = tf.global_variables_initializer()
with tf.Session() as session:
conv_block.session = session
session.run(init)
output = conv_block.predict(X):
print("output.shape:", output.shape) | ConvBlock |
python | getsentry__sentry-python | sentry_sdk/utils.py | {
"start": 7927,
"end": 10301
} | class ____:
"""Represents a DSN."""
ORG_ID_REGEX = re.compile(r"^o(\d+)\.")
def __init__(self, value, org_id=None):
# type: (Union[Dsn, str], Optional[str]) -> None
if isinstance(value, Dsn):
self.__dict__ = dict(value.__dict__)
return
parts = urlsplit(str(value))
if parts.scheme not in ("http", "https"):
raise BadDsn("Unsupported scheme %r" % parts.scheme)
self.scheme = parts.scheme
if parts.hostname is None:
raise BadDsn("Missing hostname")
self.host = parts.hostname
if org_id is not None:
self.org_id = org_id # type: Optional[str]
else:
org_id_match = Dsn.ORG_ID_REGEX.match(self.host)
self.org_id = org_id_match.group(1) if org_id_match else None
if parts.port is None:
self.port = self.scheme == "https" and 443 or 80 # type: int
else:
self.port = parts.port
if not parts.username:
raise BadDsn("Missing public key")
self.public_key = parts.username
self.secret_key = parts.password
path = parts.path.rsplit("/", 1)
try:
self.project_id = str(int(path.pop()))
except (ValueError, TypeError):
raise BadDsn("Invalid project in DSN (%r)" % (parts.path or "")[1:])
self.path = "/".join(path) + "/"
@property
def netloc(self):
# type: () -> str
"""The netloc part of a DSN."""
rv = self.host
if (self.scheme, self.port) not in (("http", 80), ("https", 443)):
rv = "%s:%s" % (rv, self.port)
return rv
def to_auth(self, client=None):
# type: (Optional[Any]) -> Auth
"""Returns the auth info object for this dsn."""
return Auth(
scheme=self.scheme,
host=self.netloc,
path=self.path,
project_id=self.project_id,
public_key=self.public_key,
secret_key=self.secret_key,
client=client,
)
def __str__(self):
# type: () -> str
return "%s://%s%s@%s%s%s" % (
self.scheme,
self.public_key,
self.secret_key and "@" + self.secret_key or "",
self.netloc,
self.path,
self.project_id,
)
| Dsn |
python | pytorch__pytorch | test/distributed/pipelining/test_stage.py | {
"start": 1540,
"end": 10127
} | class ____(MultiProcContinuousTest):
@classmethod
def backend_str(cls) -> str:
# Testing with NCCL backend
return backend
@classmethod
def device_type(cls) -> str:
return device_type
@property
def device(self) -> torch.device:
return torch.device(device_type, self.rank)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ModelClass", [ExampleCode, MultiMLP])
def test_tracer(self, ModelClass):
mod = ModelClass(d_hid, self.world_size)
mod.to(self.device)
x = torch.randn(batch_size, d_hid, device=self.device)
x_mb = x.chunk(chunks)[0]
split_spec = mod.split_spec if hasattr(mod, "split_spec") else None
pipe = pipeline(
mod,
mb_args=(x_mb,),
split_spec=split_spec,
)
stage = pipe.build_stage(
self.rank,
self.device,
)
# Attach to a schedule
schedule = ScheduleGPipe(stage, chunks)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
@parametrize("ModelClass", [ModelWithKwargs])
def test_tracer_kwargs(self, ModelClass):
mod = ModelClass(d_hid, self.world_size)
mod.to(self.device)
x = torch.randn(batch_size, d_hid, device=self.device)
y = torch.randn(batch_size, d_hid, device=self.device)
x_mb = x.chunk(chunks)[0]
y_mb = y.chunk(chunks)[0]
pipe = pipeline(
mod,
mb_args=(x_mb,),
mb_kwargs={"y": y_mb},
)
stage_mod = pipe.get_stage_module(self.rank)
# Test build_stage
stage = build_stage(
stage_mod,
self.rank,
pipe.info(),
self.device,
)
# Attach to a schedule
schedule = ScheduleGPipe(stage, chunks)
# Run
if self.rank == 0:
out = schedule.step(x, y=y)
else:
out = schedule.step()
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = mod(x, y=y)
torch.testing.assert_close(out, ref_out, atol=1e-3, rtol=5e-2)
# Test qualname mapping
submod_keys = stage.submod.state_dict().keys()
# Confirm keys are consistent with original model
old_keys = mod.state_dict().keys()
assert all(k in old_keys for k in submod_keys)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
def test_manual(self):
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
stage = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
)
# Attach to a schedule
schedule = ScheduleGPipe(stage, chunks)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
else:
return schedule.step()
out = _run_step(x)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = full_mod(x)
torch.testing.assert_close(out, ref_out)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
def test_custom_dw_with_fb_schedule(self):
"""Tests that separate weight grad function 'dw_runner' gets run under a schedule that's only aware of F/B."""
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
target = torch.randn(batch_size, d_hid, device=self.device)
class CustomState:
def __init__(self) -> None:
self.i = 0
def dw_builder(self):
"""This simulates a function attached to a model with a custom backward.
Each call to builder gives a new dw_runner that has some updated state to compute the latest dw.
"""
def dw_runner():
# This inner function would be called by PipelineStage during `backward_weight_one_chunk`
print(f"dw called {self.i}th time")
self.i += 1
return dw_runner
cs = CustomState()
stage = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
dw_builder=cs.dw_builder,
)
# Attach to a schedule
schedule = ScheduleGPipe(
stage, chunks, loss_fn=torch.nn.MSELoss(reduction="sum")
)
# Run
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
elif self.rank == self.world_size - 1:
return schedule.step(target=target)
else:
return schedule.step()
out = _run_step(x)
self.assertEqual(cs.i, chunks)
# Last rank checks result
if self.rank == self.world_size - 1:
ref_out = full_mod(x)
torch.testing.assert_close(out, ref_out)
@requires_accelerator_dist_backend(["nccl", "xccl"])
@skip_but_pass_in_sandcastle_if(
not TEST_MULTIACCELERATOR, f"{backend} test requires 2+ GPUs"
)
def test_output_chunks_memory_usage(self):
"""Test that output_chunks doesn't store memory for non-first stages."""
full_mod = MultiMLP(d_hid, n_layers=self.world_size)
full_mod.to(self.device)
stage_mod = full_mod.get_submodule(f"layers.{self.rank}")
x = torch.randn(batch_size, d_hid, device=self.device)
target = torch.randn(batch_size, d_hid, device=self.device)
stage = PipelineStage(
stage_mod,
self.rank,
self.world_size,
self.device,
)
self.assertEqual(
len(stage.output_chunks), 0, "output_chunks should be empty initially"
)
schedule = ScheduleGPipe(
stage, chunks, loss_fn=torch.nn.MSELoss(reduction="sum")
)
def _run_step(x):
if self.rank == 0:
return schedule.step(x)
elif self.rank == self.world_size - 1:
return schedule.step(target=target)
else:
return schedule.step()
_run_step(x)
# Verify fwd_cache is empty
self.assertEqual(len(stage.fwd_cache), 0, "fwd_cache should be cleared")
# Check output_chunks state after step
if self.rank == self.world_size - 1:
self.assertEqual(
len(stage.output_chunks),
chunks,
"Last stage should store output chunks",
)
else:
self.assertEqual(
len(stage.output_chunks),
0,
f"Non-last stage (rank {self.rank}) should not store output chunks",
)
# Clear the schedule and stage caches
stage.clear_runtime_states()
if self.rank == self.world_size - 1:
# Last stage should have output_chunks populated
self.assertEqual(
len(stage.output_chunks), 0, "Last stage should store output chunks"
)
instantiate_parametrized_tests(StageTest)
| StageTest |
python | eventlet__eventlet | eventlet/green/http/cookiejar.py | {
"start": 73886,
"end": 79435
} | class ____(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = re.compile("#( Netscape)? HTTP Cookie File")
header = """\
# Netscape HTTP Cookie File
# http://curl.haxx.se/rfc/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not self.magic_re.search(magic):
raise LoadError(
"%r does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith(("#", "$")) or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
line.split("\t")
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = value
value = None
initial_dot = domain.startswith(".")
assert domain_specified == initial_dot
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except OSError:
raise
except Exception:
_warn_unhandled_exception()
raise LoadError("invalid Netscape format cookies file %r: %r" %
(filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
with open(filename, "w") as f:
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
| MozillaCookieJar |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 15447,
"end": 15621
} | class ____(VegaLiteSchema):
"""Align schema wrapper."""
_schema = {"$ref": "#/definitions/Align"}
def __init__(self, *args):
super().__init__(*args)
| Align |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 639137,
"end": 639702
} | class ____(VegaLiteSchema):
"""
Locale schema wrapper.
Parameters
----------
number : dict, :class:`NumberLocale`
Locale definition for formatting numbers.
time : dict, :class:`TimeLocale`
Locale definition for formatting dates and times.
"""
_schema = {"$ref": "#/definitions/Locale"}
def __init__(
self,
number: Optional[SchemaBase | Map] = Undefined,
time: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(number=number, time=time, **kwds)
| Locale |
python | ethereum__web3.py | web3/utils/subscriptions.py | {
"start": 8722,
"end": 9234
} | class ____(EthSubscription[SyncProgress]):
def __init__(
self,
label: str | None = None,
handler: SyncingSubscriptionHandler | None = None,
handler_context: dict[str, Any] | None = None,
parallelize: bool | None = None,
) -> None:
super().__init__(
subscription_params=("syncing",),
handler=handler,
handler_context=handler_context,
label=label,
parallelize=parallelize,
)
| SyncingSubscription |
python | keras-team__keras | keras/src/trainers/trainer_test.py | {
"start": 3210,
"end": 3770
} | class ____(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense_1 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
self.dense_2 = layers.Dense(
units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
def call(self, x):
assert isinstance(x, (list, tuple))
return self.dense_1(x[0]) + self.dense_2(x[1])
| ListInputModel |
python | viewflow__viewflow | tests/fsm/test_fsm__advanced.py | {
"start": 1328,
"end": 3373
} | class ____(TestCase): # noqa: D101
def test_no_target_transition(self):
publication = Publication(text="test")
publication.notify()
self.assertEqual(publication.stage, ReviewState.NEW)
def test_big_publication_process(self):
publication = Publication(text="test" * 251)
self.assertEqual(publication.stage, ReviewState.NEW)
publication.publish()
self.assertEqual(publication.stage, ReviewState.PUBLISHED)
publication.trash()
self.assertEqual(publication.stage, ReviewState.HIDDEN)
def test_small_publication_process(self):
publication = Publication(text="test" * 249)
self.assertEqual(publication.stage, ReviewState.NEW)
publication.publish()
self.assertEqual(publication.stage, ReviewState.PUBLISHED)
publication.trash()
self.assertEqual(publication.stage, ReviewState.REMOVED)
def test_available_transitions(self):
self.assertEqual(
[
(transition.target, transition.slug)
for transition in Publication.stage.get_outgoing_transitions(
ReviewState.NEW
)
],
[
(DEFAULT, "notify"),
(ReviewState.PUBLISHED, "publish"),
(ReviewState.PUBLISHED, "toggle"),
],
)
self.assertEqual(
[
(transition.target, transition.slug)
for transition in Publication.stage.get_outgoing_transitions(
ReviewState.PUBLISHED
)
],
[(ReviewState.NEW, "toggle"), (ReviewState.REJECTED, "trash")],
)
self.assertEqual(
[
(transition.target, transition.slug)
for transition in Publication.stage.get_outgoing_transitions(
ReviewState.REJECTED
)
],
[(ReviewState.HIDDEN, "hide"), (ReviewState.REMOVED, "remove")],
)
| Test |
python | boto__boto3 | tests/integration/test_s3.py | {
"start": 5469,
"end": 9351
} | class ____(unittest.TestCase):
def setUp(self):
self.region = _DEFAULT_REGION
self.bucket_name = _SHARED_BUCKET
clear_out_bucket(self.bucket_name, self.region)
self.session = boto3.session.Session(region_name=self.region)
self.s3 = self.session.resource('s3')
self.bucket = self.s3.Bucket(self.bucket_name)
def create_bucket_resource(self, bucket_name=None, region=None):
if bucket_name is None:
bucket_name = random_bucket_name()
if region is None:
region = self.region
kwargs = {'Bucket': bucket_name}
if region != 'us-east-1':
kwargs['CreateBucketConfiguration'] = {
'LocationConstraint': region
}
bucket = self.s3.create_bucket(**kwargs)
self.addCleanup(bucket.delete)
for _ in range(3):
bucket.wait_until_exists()
return bucket
def test_s3(self):
client = self.s3.meta.client
# Create an object
obj = self.bucket.Object('test.txt')
obj.put(Body='hello, world')
waiter = client.get_waiter('object_exists')
waiter.wait(Bucket=self.bucket_name, Key='test.txt')
self.addCleanup(obj.delete)
# List objects and make sure ours is present
self.assertIn('test.txt', [o.key for o in self.bucket.objects.all()])
# Lazy-loaded attribute
self.assertEqual(12, obj.content_length)
# Load a similar attribute from the collection response
self.assertEqual(12, list(self.bucket.objects.all())[0].size)
# Perform a resource action with a low-level response
self.assertEqual(b'hello, world', obj.get()['Body'].read())
def test_s3_resource_waiter(self):
# Create a bucket
bucket_name = random_bucket_name()
bucket = self.create_bucket_resource(bucket_name)
# Wait till the bucket exists
bucket.wait_until_exists()
# Confirm the bucket exists by finding it in a list of all of our
# buckets
self.assertIn(bucket_name, [b.name for b in self.s3.buckets.all()])
# Create an object
obj = bucket.Object('test.txt')
obj.put(Body='hello, world')
self.addCleanup(obj.delete)
# Wait till the bucket exists
obj.wait_until_exists()
# List objects and make sure ours is present
self.assertIn('test.txt', [o.key for o in bucket.objects.all()])
def test_can_create_object_directly(self):
obj = self.s3.Object(self.bucket_name, 'test.txt')
self.assertEqual(obj.bucket_name, self.bucket_name)
self.assertEqual(obj.key, 'test.txt')
def test_s3_multipart(self):
# Create the multipart upload
mpu = self.bucket.Object('mp-test.txt').initiate_multipart_upload()
self.addCleanup(mpu.abort)
# Create and upload a part
part = mpu.Part(1)
response = part.upload(Body='hello, world!')
# Complete the upload, which requires info on all of the parts
part_info = {'Parts': [{'PartNumber': 1, 'ETag': response['ETag']}]}
mpu.complete(MultipartUpload=part_info)
self.addCleanup(self.bucket.Object('mp-test.txt').delete)
contents = self.bucket.Object('mp-test.txt').get()['Body'].read()
self.assertEqual(contents, b'hello, world!')
def test_s3_batch_delete(self):
bucket = self.create_bucket_resource()
bucket.Versioning().enable()
# Create several versions of an object
obj = self.bucket.Object('test.txt')
for i in range(10):
obj.put(Body=f"Version {i}")
# Delete all the versions of the object
bucket.object_versions.all().delete()
versions = list(bucket.object_versions.all())
self.assertEqual(len(versions), 0)
| TestS3Resource |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 26391,
"end": 27421
} | class ____(Base):
__tablename__ = "trace_request_metadata"
key = Column(String(250))
"""
Metadata key: `String` (limit 250 characters).
"""
value = Column(String(8000), nullable=True)
"""
Value associated with metadata: `String` (limit 250 characters). Could be *null*.
"""
request_id = Column(
String(50), ForeignKey("trace_info.request_id", ondelete="CASCADE"), nullable=False
)
"""
Request ID to which this metadata belongs: *Foreign Key* into ``trace_info`` table.
**Corresponding to the "trace_id" in V3 format.**
"""
trace_info = relationship("SqlTraceInfo", backref=backref("request_metadata", cascade="all"))
"""
SQLAlchemy relationship (many:one) with
:py:class:`mlflow.store.dbmodels.models.SqlTraceInfo`.
"""
# Key is unique within a request_id
__table_args__ = (
PrimaryKeyConstraint("request_id", "key", name="trace_request_metadata_pk"),
Index(f"index_{__tablename__}_request_id"),
)
| SqlTraceMetadata |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 280203,
"end": 293264
} | class ____(CallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
# kwdict ExprNode or None keyword dictionary (if present)
# kwargs_key_value_pairs [ExprNode] or None list of unpacked kwargs key-value pairs, if known
# function_obj ExprNode or None == self.function.obj when using PyObject_VectorcallMethod()
# unpack bool
subexprs = ['function', 'arg_tuple', 'kwdict', 'kwargs_key_value_pairs']
is_temp = True
use_method_vectorcall = False
kwdict = None
kwargs_key_value_pairs = None
function_obj = None
def __init__(self, pos, **kw):
super().__init__(pos, **kw)
if self.can_avoid_attribute_lookup():
self.use_method_vectorcall = True
self.function_obj = self.function.obj
if self.kwdict and self.kwdict.is_dict_literal:
self.kwargs_key_value_pairs = self.kwdict.key_value_pairs
self.kwdict = None
def can_avoid_attribute_lookup(self):
# Essentially, if the signature matches PyObject_VectorcallMethod
# then it's worth doing that directly and not creating a new method in
# the attribute lookup.
if self.kwdict and not isinstance(self.kwdict, DictNode):
return False
function = self.function
if not function.is_attribute:
return False
# These two determine that it's not just a plain getattr
if not function.is_py_attr:
return False
if function.is_special_lookup:
return False
if not PyMethodCallNode.attribute_is_likely_method(function):
# PyObject_VectorcallMethod would work, but is more likely to
# be a pessimization.
return False
return True
@staticmethod
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
@staticmethod
def can_be_used_for_posargs(positional_args, has_kwargs, has_explicit_kwargs=False):
"""
Test whether the positional args given are compatible with
being translated into a PyMethodCallNode.
"""
if not isinstance(positional_args, TupleNode):
return False
if positional_args.mult_factor:
return False
if positional_args.is_literal and len(positional_args.args) > 1:
return False
if not len(positional_args.args):
# If positional_args is an empty tuple, it's probably only worth optimizing
# if the kwds are f(a=1, b=2) or none at all, and not if they're f(**kwds).
return has_explicit_kwargs or not has_kwargs
return True
@staticmethod
def can_be_used_for_function(function):
"""
Test whether the function passed is suitable to be translated
into a PyMethodCallNode
"""
may_be_a_method = True
if function.is_attribute:
if function.entry and function.entry.type.is_cfunction:
# optimised builtin method
may_be_a_method = False
elif function.is_name:
entry = function.entry
if entry.type.is_cfunction:
may_be_a_method = False
elif entry.cf_assignments:
# local functions/classes are definitely not methods
non_method_nodes = (PyCFunctionNode, ClassNode, Py3ClassNode)
may_be_a_method = any(
assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
for assignment in entry.cf_assignments)
return may_be_a_method
def generate_evaluate_function(self, code, self_arg) -> str:
# Returns the cname of the function variable, temp or name (for VectorcallMethod).
if self.use_method_vectorcall:
self.function_obj.generate_evaluation_code(code)
code.putln(f"{self_arg} = {self.function_obj.py_result()};")
code.put_incref(self_arg, py_object_type)
return code.get_py_string_const(self.function.attribute)
code.putln(f"{self_arg} = NULL;")
self.function.generate_evaluation_code(code)
# Make sure function is in temp so that we can replace the reference if it's a method.
if self.function.result_in_temp() or (not self.unpack and self.function.nonlocally_immutable()):
return self.function.result()
# FIXME: Should use "coerce_to_temp()" in "__init__()" instead, but that needs "env".
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.putln("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
return function
def generate_dispose_function(self, code, function):
if self.use_method_vectorcall:
self.function_obj.generate_disposal_code(code)
self.function_obj.free_temps(code)
elif self.function.result_in_temp() or (not self.unpack and self.function.nonlocally_immutable()):
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
def generate_runtime_method_unpacking_code(self, code, self_arg, space_for_selfarg_var, method_obj):
if self.use_method_vectorcall or not self.unpack:
return
if self.function.is_attribute:
likely_method = 'likely' if self.attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type and value.obj.type.is_pyobject:
if self.attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
# Unpacking is ultimately governed by "optimize.unpack_method_calls"
# and is a separate decision to whether we want vectorcall-type behaviour.
code.putln("#if CYTHON_UNPACK_METHODS")
code.putln("if (%s(PyMethod_Check(%s))) {" % (likely_method, method_obj))
code.putln(f"{self_arg} = PyMethod_GET_SELF({method_obj});")
# The result of PyMethod_GET_SELF is always true in Py3.
code.putln(f"assert({self_arg});")
code.putln(f"PyObject* __pyx__function = PyMethod_GET_FUNCTION({method_obj});")
code.put_incref(self_arg, py_object_type)
code.put_incref("__pyx__function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(method_obj, py_object_type, "__pyx__function")
code.putln(f"{space_for_selfarg_var} = 0;")
code.putln("}")
code.putln("#endif") # CYTHON_UNPACK_METHODS
# TODO may need to deal with unused variables in the #else case
def generate_keyvalue_args(self, code, args, kwargs_key_value_pairs, kwnames_temp):
code.putln(
f"{kwnames_temp} = __Pyx_MakeVectorcallBuilderKwds({len(kwargs_key_value_pairs)}); "
f"{code.error_goto_if_null(kwnames_temp, self.pos)}"
)
code.put_gotref(kwnames_temp, py_object_type)
for n, keyvalue in enumerate(kwargs_key_value_pairs):
key_is_str = keyvalue.key.type is Builtin.unicode_type and not keyvalue.key.may_be_none()
code.put_error_if_neg(
self.pos,
f"__Pyx_VectorcallBuilder_AddArg{'' if key_is_str else '_Check'}("
f"{keyvalue.key.py_result()}, "
f"{keyvalue.value.py_result()}, "
f"{kwnames_temp}, "
f"{Naming.callargs_cname}+{len(args) + 1}, "
f"{n:d}"
")"
)
def select_utility_code(self, code):
# ... and return the utility function's cname.
if self.use_method_vectorcall:
if self.kwargs_key_value_pairs:
name = "PyObjectVectorCallMethodKwBuilder"
cfunc = "__Pyx_Object_VectorcallMethod_CallFromBuilder"
else:
name = "PyObjectFastCallMethod"
cfunc = "__Pyx_PyObject_FastCallMethod"
elif self.kwargs_key_value_pairs:
name = "PyObjectVectorCallKwBuilder"
cfunc = "__Pyx_Object_Vectorcall_CallFromBuilder"
elif self.kwdict:
name = "PyObjectFastCall"
cfunc = "__Pyx_PyObject_FastCallDict"
else:
name = "PyObjectFastCall"
cfunc = "__Pyx_PyObject_FastCall"
code.globalstate.use_utility_code(
UtilityCode.load_cached(name, "ObjectHandling.c"))
return cfunc
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
kwargs_key_value_pairs = self.kwargs_key_value_pairs
kwdict = self.kwdict
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
function = self.generate_evaluate_function(code, self_arg)
args = self.arg_tuple.args
assert self.arg_tuple.mult_factor is None
for arg in args:
arg.generate_evaluation_code(code)
if kwargs_key_value_pairs:
for keyvalue in kwargs_key_value_pairs:
keyvalue.generate_evaluation_code(code)
elif kwdict:
kwdict.generate_evaluation_code(code)
# Leave space for self argument in before-first argument?
space_for_selfarg = code.funcstate.allocate_temp(PyrexTypes.c_size_t_type, manage_ref=False)
code.putln(f"{space_for_selfarg} = {'0' if self.use_method_vectorcall else '1'};")
self.generate_runtime_method_unpacking_code(
code,
self_arg=self_arg,
space_for_selfarg_var=space_for_selfarg,
method_obj=function,
)
function_caller = self.select_utility_code(code)
# Actually call the function.
code.putln("{")
# To avoid passing an out-of-bounds argument pointer in the no-args case,
# we need at least two entries, so we pad with NULL and point to that.
# See https://github.com/cython/cython/issues/5668
args_list = ', '.join(arg.py_result() for arg in args) if args else "NULL"
extra_keyword_args = f" + ((CYTHON_VECTORCALL) ? {len(kwargs_key_value_pairs)} : 0)" if kwargs_key_value_pairs else ""
code.putln(
f"PyObject *{Naming.callargs_cname}[{(len(args) + 1) if args else 2:d}{extra_keyword_args}] = {{{self_arg}, {args_list}}};"
)
keyword_variable = ""
if kwargs_key_value_pairs:
keyword_variable = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.generate_keyvalue_args(code, args, kwargs_key_value_pairs, keyword_variable)
elif kwdict:
keyword_variable = kwdict.result()
code.putln(
f"{self.result()} = {function_caller}("
f"(PyObject*){function}, "
f"{Naming.callargs_cname}+{space_for_selfarg}, "
f"({len(args)+1:d}-{space_for_selfarg})"
f" | ({'1' if self.use_method_vectorcall else space_for_selfarg}*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)"
f"{', ' if keyword_variable else ''}{keyword_variable}"
");")
# Clean up.
code.put_xdecref_clear(self_arg, py_object_type)
for tmp in [self_arg, space_for_selfarg]:
code.funcstate.release_temp(tmp)
for arg in args:
arg.generate_disposal_code(code)
arg.free_temps(code)
if kwargs_key_value_pairs:
for kw_node in kwargs_key_value_pairs:
kw_node.generate_disposal_code(code)
kw_node.free_temps(code)
code.put_decref_clear(keyword_variable, py_object_type)
code.funcstate.release_temp(keyword_variable)
elif kwdict:
kwdict.generate_disposal_code(code)
kwdict.free_temps(code)
self.generate_dispose_function(code, function)
code.putln(code.error_goto_if_null(self.result(), self.pos))
self.generate_gotref(code)
code.putln("}")
| PyMethodCallNode |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass6.py | {
"start": 316,
"end": 664
} | class ____:
prop_1: str = field(init=False)
prop_2: str = field(default="hello")
prop_3: str = field(default_factory=lambda: "hello")
# This should generate an error because it appears after
# a property with a default value.
prop_4: str = field()
def __post_init__(self):
self.prop_1 = "test"
@dataclass
| ParentA |
python | scipy__scipy | scipy/stats/tests/test_stats.py | {
"start": 261899,
"end": 267413
} | class ____:
@pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning:dask")
@pytest.mark.filterwarnings("ignore:divide by zero encountered:RuntimeWarning:dask")
def test_describe_scalar(self, xp):
with warnings.catch_warnings(), \
np.errstate(invalid="ignore", divide="ignore"):
warnings.filterwarnings(
"ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)
n, mm, m, v, sk, kurt = stats.describe(xp.asarray(4.)[()])
assert n == 1
xp_assert_equal(mm[0], xp.asarray(4.0))
xp_assert_equal(mm[1], xp.asarray(4.0))
xp_assert_equal(m, xp.asarray(4.0))
xp_assert_equal(v ,xp.asarray(xp.nan))
xp_assert_equal(sk, xp.asarray(xp.nan))
xp_assert_equal(kurt, xp.asarray(xp.nan))
def test_describe_numbers(self, xp):
x = xp.concat((xp.ones((3, 4)), xp.full((2, 4), 2.)))
nc = 5
mmc = (xp.asarray([1., 1., 1., 1.]), xp.asarray([2., 2., 2., 2.]))
mc = xp.asarray([1.4, 1.4, 1.4, 1.4])
vc = xp.asarray([0.3, 0.3, 0.3, 0.3])
skc = xp.asarray([0.40824829046386357] * 4)
kurtc = xp.asarray([-1.833333333333333] * 4)
n, mm, m, v, sk, kurt = stats.describe(x)
assert n == nc
xp_assert_equal(mm[0], mmc[0])
xp_assert_equal(mm[1], mmc[1])
xp_assert_close(m, mc, rtol=4 * xp.finfo(m.dtype).eps)
xp_assert_close(v, vc, rtol=4 * xp.finfo(m.dtype).eps)
xp_assert_close(sk, skc)
xp_assert_close(kurt, kurtc)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert n == nc
xp_assert_equal(mm[0], mmc[0])
xp_assert_equal(mm[1], mmc[1])
xp_assert_close(m, mc, rtol=4 * xp.finfo(m.dtype).eps)
xp_assert_close(v, vc, rtol=4 * xp.finfo(m.dtype).eps)
xp_assert_close(sk, skc)
xp_assert_close(kurt, kurtc)
def describe_nan_policy_omit_test(self):
x = np.arange(10.)
x[9] = np.nan
nc, mmc = (9, (0.0, 8.0))
mc = 4.0
vc = 7.5
skc = 0.0
kurtc = -1.2300000000000002
n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='omit')
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc)
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_describe_nan_policy_other(self, xp):
x = xp.arange(10.)
x = xp.where(x==9, xp.nan, x)
if is_lazy_array(x):
with pytest.raises(TypeError, match='not supported for lazy arrays'):
stats.describe(x, nan_policy='raise')
else:
with pytest.raises(ValueError, match='The input contains nan values'):
stats.describe(x, nan_policy='raise')
n, mm, m, v, sk, kurt = stats.describe(x, nan_policy='propagate')
ref = xp.asarray(xp.nan)[()]
assert n == 10
xp_assert_equal(mm[0], ref)
xp_assert_equal(mm[1], ref)
xp_assert_equal(m, ref)
xp_assert_equal(v, ref)
xp_assert_equal(sk, ref)
xp_assert_equal(kurt, ref)
if is_numpy(xp):
self.describe_nan_policy_omit_test()
elif is_lazy_array(x):
with pytest.raises(TypeError, match='not supported for lazy arrays'):
stats.describe(x, nan_policy='omit')
message = 'nan_policy must be one of...'
with pytest.raises(ValueError, match=message):
stats.describe(x, nan_policy='foobar')
def test_describe_result_attributes(self):
# some result attributes are tuples, which aren't meant to be compared
# with `xp_assert_close`
actual = stats.describe(np.arange(5.))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness', 'kurtosis')
check_named_results(actual, attributes)
def test_describe_ddof(self, xp):
x = xp.concat((xp.ones((3, 4)), xp.full((2, 4), 2.)))
nc = 5
mmc = (xp.asarray([1., 1., 1., 1.]), xp.asarray([2., 2., 2., 2.]))
mc = xp.asarray([1.4, 1.4, 1.4, 1.4])
vc = xp.asarray([0.24, 0.24, 0.24, 0.24])
skc = xp.asarray([0.40824829046386357] * 4)
kurtc = xp.asarray([-1.833333333333333] * 4)
n, mm, m, v, sk, kurt = stats.describe(x, ddof=0)
assert n == nc
xp_assert_equal(mm[0], mmc[0])
xp_assert_equal(mm[1], mmc[1])
xp_assert_close(m, mc)
xp_assert_close(v, vc)
xp_assert_close(sk, skc)
xp_assert_close(kurt, kurtc)
def test_describe_axis_none(self, xp):
x = xp.concat((xp.ones((3, 4)), xp.full((2, 4), 2.)))
# expected values
nc = 20
mmc = (xp.asarray(1.0), xp.asarray(2.0))
mc = xp.asarray(1.3999999999999999)
vc = xp.asarray(0.25263157894736848)
skc = xp.asarray(0.4082482904638634)
kurtc = xp.asarray(-1.8333333333333333)
# actual values
n, mm, m, v, sk, kurt = stats.describe(x, axis=None)
assert n == nc
xp_assert_equal(mm[0], mmc[0])
xp_assert_equal(mm[1], mmc[1])
xp_assert_close(m, mc)
xp_assert_close(v, vc)
xp_assert_close(sk, skc)
xp_assert_close(kurt, kurtc)
def test_describe_empty(self, xp):
message = "The input must not be empty."
with pytest.raises(ValueError, match=message):
stats.describe(xp.asarray([]))
| TestDescribe |
python | ray-project__ray | rllib/examples/curriculum/curriculum_learning.py | {
"start": 5309,
"end": 9246
} | class ____(RLlibCallback):
"""Custom callback implementing `on_train_result()` for changing the envs' maps."""
def on_algorithm_init(
self,
*,
algorithm: "Algorithm",
**kwargs,
) -> None:
# Set the initial task to 0.
algorithm._counters["current_env_task"] = 0
def on_train_result(
self,
*,
algorithm: Algorithm,
metrics_logger=None,
result: dict,
**kwargs,
) -> None:
# Hack: Store the current task inside a counter in our Algorithm.
# W/o a curriculum, the task is always 2 (hardest).
if args.no_curriculum:
algorithm._counters["current_env_task"] = 2
current_task = algorithm._counters["current_env_task"]
# If episode return is consistently `args.upgrade_task_threshold`, we switch
# to a more difficult task (if possible). If we already mastered the most
# difficult task, we publish our victory in the result dict.
result["task_solved"] = 0.0
current_return = result[ENV_RUNNER_RESULTS][EPISODE_RETURN_MEAN]
if current_return > args.upgrade_task_threshold:
if current_task < 2:
new_task = current_task + 1
print(
f"Switching task/map on all EnvRunners to #{new_task} (0=easiest, "
f"2=hardest), b/c R={current_return} on current task."
)
algorithm.env_runner_group.foreach_env_runner(
func=partial(_remote_fn, new_task=new_task)
)
algorithm._counters["current_env_task"] = new_task
# Hardest task was solved (1.0) -> report this in the results dict.
elif current_return == 1.0:
result["task_solved"] = 1.0
# Emergency brake: If return is 0.0 AND we are already at a harder task (1 or
# 2), we go back to task=0.
elif current_return == 0.0 and current_task > 0:
print(
"Emergency brake: Our policy seemed to have collapsed -> Setting task "
"back to 0."
)
algorithm.env_runner_group.foreach_env_runner(
func=partial(_remote_fn, new_task=0)
)
algorithm._counters["current_env_task"] = 0
if __name__ == "__main__":
args = parser.parse_args()
base_config = (
get_trainable_cls(args.algo)
.get_default_config()
# Plug in our curriculum callbacks that controls when we should upgrade the env
# task based on the received return for the current task.
.callbacks(EnvTaskCallback)
.environment(
"FrozenLake-v1",
env_config={
# w/ curriculum: start with task=0 (easiest)
# w/o curriculum: start directly with hardest task 2.
"desc": ENV_MAPS[2 if args.no_curriculum else 0],
**ENV_OPTIONS,
},
)
.env_runners(
num_envs_per_env_runner=5,
env_to_module_connector=lambda env, spaces, device: FlattenObservations(),
)
.training(
num_epochs=6,
vf_loss_coeff=0.01,
lr=0.0002,
)
.rl_module(model_config=DefaultModelConfig(vf_share_layers=True))
)
stop = {
TRAINING_ITERATION: args.stop_iters,
# Reward directly does not matter to us as we would like to continue
# after the policy reaches a return of ~1.0 on the 0-task (easiest).
# But we DO want to stop, once the entire task is learned (policy achieves
# return of 1.0 on the most difficult task=2).
"task_solved": 1.0,
NUM_ENV_STEPS_SAMPLED_LIFETIME: args.stop_timesteps,
}
run_rllib_example_script_experiment(
base_config, args, stop=stop, success_metric={"task_solved": 1.0}
)
| EnvTaskCallback |
python | walkccc__LeetCode | solutions/1833. Maximum Ice Cream Bars/1833.py | {
"start": 0,
"end": 221
} | class ____:
def maxIceCream(self, costs: list[int], coins: int) -> int:
for i, cost in enumerate(sorted(costs)):
if coins >= cost:
coins -= cost
else:
return i
return len(costs)
| Solution |
python | mamba-org__mamba | micromamba/tests/test_install.py | {
"start": 263,
"end": 35264
} | class ____:
current_root_prefix = os.environ["MAMBA_ROOT_PREFIX"]
current_prefix = os.environ["CONDA_PREFIX"]
env_name = helpers.random_string()
root_prefix = os.path.expanduser(os.path.join("~", "tmproot" + helpers.random_string()))
prefix = os.path.join(root_prefix, "envs", env_name)
@classmethod
def setup_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
@classmethod
def setup_method(cls):
helpers.create("-n", TestInstall.env_name, "--offline", no_dry_run=True)
@classmethod
def teardown_class(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.current_root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.current_prefix
shutil.rmtree(TestInstall.root_prefix)
@classmethod
def teardown_method(cls):
os.environ["MAMBA_ROOT_PREFIX"] = TestInstall.root_prefix
os.environ["CONDA_PREFIX"] = TestInstall.prefix
for v in ("CONDA_CHANNELS", "MAMBA_TARGET_PREFIX"):
if v in os.environ:
os.environ.pop(v)
if Path(TestInstall.prefix).exists():
helpers.rmtree(TestInstall.prefix)
@classmethod
def config_tests(cls, res, root_prefix=root_prefix, target_prefix=prefix):
assert res["root_prefix"] == root_prefix
assert res["target_prefix"] == target_prefix
assert res["use_target_prefix_fallback"]
assert res["use_default_prefix_fallback"]
assert res["use_root_prefix_fallback"]
checks = (
helpers.MAMBA_ALLOW_EXISTING_PREFIX
| helpers.MAMBA_NOT_ALLOW_MISSING_PREFIX
| helpers.MAMBA_NOT_ALLOW_NOT_ENV_PREFIX
| helpers.MAMBA_EXPECT_EXISTING_PREFIX
)
assert res["target_prefix_checks"] == checks
@pytest.mark.parametrize(
"source,file_type",
[
("cli_only", None),
("spec_file_only", "classic"),
("spec_file_only", "explicit"),
("spec_file_only", "yaml"),
("both", "classic"),
("both", "explicit"),
("both", "yaml"),
],
)
def test_specs(self, source, file_type, existing_cache):
cmd = []
specs = []
if source in ("cli_only", "both"):
specs = ["xtensor-python", "xtl"]
cmd = list(specs)
if source in ("spec_file_only", "both"):
f_name = helpers.random_string()
spec_file = os.path.join(TestInstall.root_prefix, f_name)
if file_type == "classic":
file_content = ["xtensor >0.20", "xsimd"]
specs += file_content
elif file_type == "explicit":
channel = "https://conda.anaconda.org/conda-forge/linux-64/"
explicit_specs = [
channel + "xtensor-0.21.5-hc9558a2_0.tar.bz2#d330e02e5ed58330638a24601b7e4887",
channel + "xsimd-7.4.8-hc9558a2_0.tar.bz2#32d5b7ad7d6511f1faacf87e53a63e5f",
]
file_content = ["@EXPLICIT"] + explicit_specs
specs = explicit_specs
else: # yaml
spec_file += ".yaml"
file_content = ["dependencies:", " - xtensor >0.20", " - xsimd"]
specs += ["xtensor >0.20", "xsimd"]
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", spec_file]
res = helpers.install(*cmd, "--print-config-only")
TestInstall.config_tests(res)
assert res["env_name"] == ""
assert res["specs"] == specs
@pytest.mark.parametrize("root_prefix", (None, "env_var", "cli"))
@pytest.mark.parametrize("target_is_root", (False, True))
@pytest.mark.parametrize("cli_prefix", (False, True))
@pytest.mark.parametrize("cli_env_name", (False, True))
@pytest.mark.parametrize("yaml_name", (False, True, "prefix"))
@pytest.mark.parametrize("env_var", (False, True))
@pytest.mark.parametrize("current_target_prefix_fallback", (False, True))
def test_target_prefix(
self,
root_prefix,
target_is_root,
cli_prefix,
cli_env_name,
yaml_name,
env_var,
current_target_prefix_fallback,
existing_cache,
):
cmd = []
if root_prefix in (None, "cli"):
os.environ["MAMBA_DEFAULT_ROOT_PREFIX"] = os.environ.pop("MAMBA_ROOT_PREFIX")
if root_prefix == "cli":
cmd += ["-r", TestInstall.root_prefix]
r = TestInstall.root_prefix
if target_is_root:
p = r
n = "base"
else:
p = TestInstall.prefix
n = TestInstall.env_name
expected_p = p
if cli_prefix:
cmd += ["-p", p]
if cli_env_name:
cmd += ["-n", n]
if yaml_name:
f_name = helpers.random_string() + ".yaml"
spec_file = os.path.join(TestInstall.prefix, f_name)
if yaml_name == "prefix":
yaml_n = p
else:
yaml_n = n
if not (cli_prefix or cli_env_name or target_is_root):
expected_p = os.path.join(TestInstall.root_prefix, "envs", yaml_n)
file_content = [
f"name: {yaml_n}",
"dependencies: [xtensor]",
]
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", spec_file]
if env_var:
os.environ["MAMBA_TARGET_PREFIX"] = p
if not current_target_prefix_fallback:
os.environ.pop("CONDA_PREFIX")
os.environ.pop("CONDA_DEFAULT_ENV")
else:
os.environ["CONDA_PREFIX"] = p
if (cli_prefix and cli_env_name) or (yaml_name == "prefix"):
with pytest.raises(subprocess.CalledProcessError):
helpers.install(*cmd, "--print-config-only")
elif not (
cli_prefix or cli_env_name or yaml_name or env_var or current_target_prefix_fallback
):
# Fallback on root prefix
res = helpers.install(*cmd, "--print-config-only")
TestInstall.config_tests(res, root_prefix=r, target_prefix=r)
else:
res = helpers.install(*cmd, "--print-config-only")
TestInstall.config_tests(res, root_prefix=r, target_prefix=expected_p)
def test_target_prefix_with_no_settings(
self,
existing_cache,
):
# Specify no arg
cmd = []
# Get the actual set MAMBA_ROOT_PREFIX when setting up `TestInstall` class
os.environ["MAMBA_DEFAULT_ROOT_PREFIX"] = os.environ.pop("MAMBA_ROOT_PREFIX")
os.environ.pop("CONDA_PREFIX")
os.environ.pop("CONDA_DEFAULT_ENV")
# Fallback on root prefix
res = helpers.install(*cmd, "--print-config-only")
TestInstall.config_tests(
res,
root_prefix=TestInstall.root_prefix,
target_prefix=TestInstall.root_prefix,
)
@pytest.mark.skipif(
sys.platform == "win32",
reason="MAMBA_ROOT_PREFIX is set in windows GH workflow",
)
def test_target_prefix_with_no_settings_and_no_env_var(
self,
existing_cache,
):
# Specify no arg
cmd = []
os.environ.pop("MAMBA_ROOT_PREFIX")
os.environ.pop("CONDA_PREFIX")
os.environ.pop("CONDA_DEFAULT_ENV")
# Fallback on root prefix
res = helpers.install(*cmd, "--print-config-only")
TestInstall.config_tests(
res,
root_prefix=TestInstall.current_root_prefix,
target_prefix=TestInstall.current_root_prefix,
)
@pytest.mark.parametrize("cli", (False, True))
@pytest.mark.parametrize("yaml", (False, True))
@pytest.mark.parametrize("env_var", (False, True))
@pytest.mark.parametrize("rc_file", (False, True))
def test_channels(self, cli, yaml, env_var, rc_file, existing_cache):
cmd = []
expected_channels = []
if cli:
cmd += ["-c", "cli"]
expected_channels += ["cli"]
if yaml:
f_name = helpers.random_string() + ".yaml"
spec_file = os.path.join(TestInstall.prefix, f_name)
file_content = [
"channels: [yaml]",
"dependencies: [xtensor]",
]
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", spec_file]
expected_channels += ["yaml"]
if env_var:
os.environ["CONDA_CHANNELS"] = "env_var"
expected_channels += ["env_var"]
if rc_file:
f_name = helpers.random_string() + ".yaml"
rc_file = os.path.join(TestInstall.prefix, f_name)
file_content = ["channels: [rc]"]
with open(rc_file, "w") as f:
f.write("\n".join(file_content))
cmd += ["--rc-file", rc_file]
expected_channels += ["rc"]
res = helpers.install(*cmd, "--print-config-only", no_rc=not rc_file, default_channel=False)
TestInstall.config_tests(res)
if expected_channels:
assert res["channels"] == expected_channels
else:
assert res["channels"] == ["conda-forge"]
@pytest.mark.parametrize("type", ("yaml", "classic", "explicit"))
def test_multiple_spec_files(self, type, existing_cache):
cmd = []
specs = ["xtensor", "xsimd"]
channel = "https://conda.anaconda.org/conda-forge/linux-64/"
explicit_specs = [
channel + "xtensor-0.21.5-hc9558a2_0.tar.bz2#d330e02e5ed58330638a24601b7e4887",
channel + "linux-64/xsimd-7.4.8-hc9558a2_0.tar.bz2#32d5b7ad7d6511f1faacf87e53a63e5f",
]
for i in range(2):
f_name = helpers.random_string()
file = os.path.join(TestInstall.prefix, f_name)
if type == "yaml":
file += ".yaml"
file_content = [f"dependencies: [{specs[i]}]"]
elif type == "classic":
file_content = [specs[i]]
else: # explicit
file_content = ["@EXPLICIT", explicit_specs[i]]
with open(file, "w") as f:
f.write("\n".join(file_content))
cmd += ["-f", file]
res = helpers.install(*cmd, "--print-config-only")
if type == "yaml" or type == "classic":
assert res["specs"] == specs
else: # explicit
assert res["specs"] == [explicit_specs[0]]
@pytest.mark.parametrize("priority", (None, "disabled", "flexible", "strict"))
@pytest.mark.parametrize("no_priority", (None, True))
@pytest.mark.parametrize("strict_priority", (None, True))
def test_channel_priority(self, priority, no_priority, strict_priority, existing_cache):
cmd = ["-p", TestInstall.prefix, "xtensor"]
expected_priority = "flexible"
if priority:
cmd += ["--channel-priority", priority]
expected_priority = priority
if no_priority:
cmd += ["--no-channel-priority"]
expected_priority = "disabled"
if strict_priority:
cmd += ["--strict-channel-priority"]
expected_priority = "strict"
if (
(priority is not None)
and (
(no_priority and priority != "disabled")
or (strict_priority and priority != "strict")
)
or (no_priority and strict_priority)
):
with pytest.raises(subprocess.CalledProcessError):
helpers.install(*cmd, "--print-config-only")
else:
res = helpers.install(*cmd, "--print-config-only")
assert res["channel_priority"] == expected_priority
def test_quotes(self, existing_cache):
cmd = ["-p", f"{TestInstall.prefix}", "xtensor", "--print-config-only"]
res = helpers.install(*cmd)
assert res["target_prefix"] == TestInstall.prefix
@pytest.mark.parametrize("prefix", ("target", "root"))
def test_expand_user(self, prefix, existing_cache):
if prefix == "target":
r = TestInstall.root_prefix
p = TestInstall.prefix.replace(os.path.expanduser("~"), "~")
else:
r = TestInstall.root_prefix.replace(os.path.expanduser("~"), "~")
p = TestInstall.prefix
cmd = [
"-r",
r,
"-p",
p,
"xtensor",
"--print-config-only",
]
res = helpers.install(*cmd)
assert res["target_prefix"] == TestInstall.prefix
assert res["root_prefix"] == TestInstall.root_prefix
def test_empty_specs(self, existing_cache):
assert "Nothing to do." in helpers.install().strip()
@pytest.mark.skipif(
helpers.dry_run_tests is helpers.DryRun.ULTRA_DRY,
reason="Running only ultra-dry tests",
)
@pytest.mark.parametrize("already_installed", [False, True])
def test_non_explicit_spec(self, already_installed, existing_cache):
cmd = ["-p", TestInstall.prefix, "xtensor", "--json"]
if already_installed:
helpers.install(*cmd, no_dry_run=True)
res = helpers.install(*cmd)
assert res["success"]
assert res["dry_run"] == (helpers.dry_run_tests == helpers.DryRun.DRY)
if already_installed:
keys = {"dry_run", "success", "prefix", "message"}
assert keys.issubset(set(res.keys()))
else:
keys = {"success", "prefix", "actions", "dry_run"}
assert keys.issubset(set(res.keys()))
action_keys = {"LINK", "PREFIX"}
assert action_keys.issubset(set(res["actions"].keys()))
packages = {pkg["name"] for pkg in res["actions"]["LINK"]}
expected_packages = {"xtensor", "xtl"}
assert expected_packages.issubset(packages)
if not helpers.dry_run_tests:
pkg_name = helpers.get_concrete_pkg(res, "xtensor")
checker = helpers.PackageChecker("xtensor", TestInstall.current_root_prefix)
checker.check_install_integrity()
assert checker.get_name_version_build() == pkg_name
@pytest.mark.skipif(
helpers.dry_run_tests is helpers.DryRun.ULTRA_DRY,
reason="Running only ultra-dry tests",
)
@pytest.mark.parametrize("already_installed", [False, True])
@pytest.mark.parametrize("valid", [False, True])
def test_explicit_specs(self, already_installed, valid, existing_cache):
spec_file_content = [
"@EXPLICIT",
"https://conda.anaconda.org/conda-forge/linux-64/xtensor-0.21.5-hc9558a2_0.tar.bz2#d330e02e5ed58330638a24601b7e4887",
]
if not valid:
spec_file_content += ["https://conda.anaconda.org/conda-forge/linux-64/xtl"]
spec_file = os.path.join(TestInstall.root_prefix, "explicit_specs.txt")
with open(spec_file, "w") as f:
f.write("\n".join(spec_file_content))
cmd = ("-p", TestInstall.prefix, "-q", "-f", spec_file)
if valid:
helpers.install(*cmd, default_channel=False)
list_res = helpers.umamba_list("-p", TestInstall.prefix, "--json")
assert len(list_res) == 1
pkg = list_res[0]
assert pkg["name"] == "xtensor"
assert pkg["version"] == "0.21.5"
assert pkg["build_string"] == "hc9558a2_0"
else:
with pytest.raises(subprocess.CalledProcessError):
helpers.install(*cmd, default_channel=False)
@pytest.mark.skipif(
helpers.dry_run_tests is helpers.DryRun.ULTRA_DRY,
reason="Running only ultra-dry tests",
)
@pytest.mark.parametrize(
"alias",
[
"",
"https://conda.anaconda.org/",
"https://repo.mamba.pm/",
"https://repo.mamba.pm",
],
)
def test_channel_alias(self, alias, existing_cache):
if alias:
res = helpers.install("xtensor", "--json", "--channel-alias", alias)
else:
res = helpers.install("xtensor", "--json")
for to_link in res["actions"]["LINK"]:
assert to_link["channel"] == "conda-forge"
@pytest.mark.skipif(
helpers.dry_run_tests is helpers.DryRun.ULTRA_DRY,
reason="Running only ultra-dry tests",
)
def test_no_python_pinning(self, existing_cache):
helpers.install("python=3.9.19", no_dry_run=True)
res = helpers.install("setuptools=63.4.3", "--no-py-pin", "--json")
keys = {"success", "prefix", "actions", "dry_run"}
assert keys.issubset(set(res.keys()))
action_keys = {"LINK", "UNLINK", "PREFIX"}
assert action_keys.issubset(set(res["actions"].keys()))
# When using `--no-py-pin`, it may or may not update the already installed
# python version, but `python_abi` is installed in any case
# The following tests/assertions consider both cases
expected_link_packages = {"python_abi"}
link_packages = {pkg["name"] for pkg in res["actions"]["LINK"]}
assert expected_link_packages.issubset(link_packages)
unlink_packages = {pkg["name"] for pkg in res["actions"]["UNLINK"]}
if {"python"}.issubset(link_packages):
assert {"python"}.issubset(unlink_packages)
py_pkg = [pkg for pkg in res["actions"]["LINK"] if pkg["name"] == "python"][0]
assert py_pkg["version"] != ("3.9.19")
py_pkg = [pkg for pkg in res["actions"]["UNLINK"] if pkg["name"] == "python"][0]
assert py_pkg["version"] == ("3.9.19")
else:
assert len(res["actions"]["LINK"]) == 2 # Should be setuptools and python_abi
py_abi_pkg = [pkg for pkg in res["actions"]["LINK"] if pkg["name"] == "python_abi"][0]
assert py_abi_pkg["version"] == ("3.9")
setuptools_pkg = [pkg for pkg in res["actions"]["LINK"] if pkg["name"] == "setuptools"][
0
]
assert setuptools_pkg["version"] == ("63.4.3")
assert len(res["actions"]["UNLINK"]) == 1 # Should be setuptools
assert res["actions"]["UNLINK"][0]["name"] == "setuptools"
@pytest.mark.skipif(
helpers.dry_run_tests is helpers.DryRun.ULTRA_DRY,
reason="Running only ultra-dry tests",
)
@pytest.mark.skipif(
sys.platform == "win32" or (sys.platform == "darwin" and platform.machine() == "arm64"),
reason="Python2 no available",
)
def test_python_pinning(self, existing_cache):
"""Black fails to install as it is not available for pinned Python 2."""
res = helpers.install("python=2", "--json", no_dry_run=True)
assert res["success"]
# We do not have great way to check for the type of error for now
try:
helpers.install("black", "--py-pin", "--json")
assert False
except subprocess.CalledProcessError:
pass
@pytest.mark.skipif(
helpers.dry_run_tests is helpers.DryRun.ULTRA_DRY,
reason="Running only ultra-dry tests",
)
def test_freeze_installed(self, existing_cache):
helpers.install("xtensor=0.24", no_dry_run=True)
res = helpers.install("xtensor-blas", "--freeze-installed", "--json")
# without freeze installed, xtensor-blas 0.21.0 should be installed and xtensor updated to 0.25
keys = {"success", "prefix", "actions", "dry_run"}
assert keys.issubset(set(res.keys()))
action_keys = {"LINK", "PREFIX"}
assert action_keys.issubset(set(res["actions"].keys()))
expected_packages = {"xtensor-blas"}
link_packages = {pkg["name"] for pkg in res["actions"]["LINK"]}
assert expected_packages.issubset(link_packages)
assert res["actions"]["LINK"][-1]["version"] == "0.20.0"
def test_channel_specific(self, existing_cache):
res = helpers.install("conda-forge::xtensor", "--json", default_channel=False, no_rc=True)
keys = {"success", "prefix", "actions", "dry_run"}
assert keys.issubset(set(res.keys()))
action_keys = {"LINK", "PREFIX"}
assert action_keys.issubset(set(res["actions"].keys()))
expected_packages = {"xtensor", "xtl"}
link_packages = {pkg["name"] for pkg in res["actions"]["LINK"]}
assert expected_packages.issubset(link_packages)
for pkg in res["actions"]["LINK"]:
assert pkg["channel"] == "conda-forge"
def test_explicit_noarch(self, existing_cache):
helpers.install("python", no_dry_run=True)
channel = "https://conda.anaconda.org/conda-forge/noarch/"
explicit_spec = (
channel + "appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2#5f095bc6454094e96f146491fd03633b"
)
file_content = ["@EXPLICIT", explicit_spec]
spec_file = os.path.join(TestInstall.root_prefix, "explicit_specs_no_arch.txt")
with open(spec_file, "w") as f:
f.write("\n".join(file_content))
cmd = ("-p", TestInstall.prefix, "-q", "-f", spec_file)
helpers.install(*cmd, default_channel=False)
list_res = helpers.umamba_list("-p", TestInstall.prefix, "--json")
pkgs = [p for p in list_res if p["name"] == "appdirs"]
assert len(pkgs) == 1
pkg = pkgs[0]
assert pkg["version"] == "1.4.4"
assert pkg["build_string"] == "pyh9f0ad1d_0"
def test_broken_package_name(self):
non_existing_url = "https://026e9ab9-6b46-4285-ae0d-427553801720.de/mypackage.tar.bz2"
try:
helpers.install(non_existing_url, default_channel=False)
except subprocess.CalledProcessError as e:
assert 'Missing name and version in filename "mypackage.tar.bz2"' in e.stderr.decode(
"utf-8"
)
def test_no_reinstall(self, existing_cache):
"""Reinstalling is a no op."""
res = helpers.install("xtensor", "--json")
assert "xtensor" in {pkg["name"] for pkg in res["actions"]["LINK"]}
reinstall_res = helpers.install("xtensor", "--json")
assert "actions" not in reinstall_res
def test_install_local_package_relative_path(self):
"""Attempts to install a locally built package from a relative local path."""
spec = "./micromamba/tests/test-server/repo::test-package"
res = helpers.install(spec, "--json", default_channel=False)
assert res["success"]
pkgs = res["actions"]["LINK"]
assert len(pkgs) == 1
pkg = pkgs[0]
assert pkg["name"] == "test-package"
assert pkg["version"] == "0.1"
assert pkg["url"].startswith("file://")
def test_force_reinstall(self, existing_cache):
"""Force reinstall installs existing package again."""
res = helpers.install("xtensor", "--json")
assert "xtensor" in {pkg["name"] for pkg in res["actions"]["LINK"]}
reinstall_res = helpers.install("xtensor", "--force-reinstall", "--json")
assert "xtensor" in {pkg["name"] for pkg in reinstall_res["actions"]["LINK"]}
def test_force_reinstall_not_installed(self, existing_cache):
"""Force reinstall on non-installed packages is valid."""
reinstall_res = helpers.install("xtensor", "--force-reinstall", "--json")
assert "xtensor" in {pkg["name"] for pkg in reinstall_res["actions"]["LINK"]}
def test_install_compatible_release(self, existing_cache):
"""Install compatible release."""
res = helpers.install("numpy~=1.26.0", "--force-reinstall", "--json")
assert "numpy" in {pkg["name"] for pkg in res["actions"]["LINK"]}
numpy = [pkg for pkg in res["actions"]["LINK"] if pkg["name"] == "numpy"][0]
assert Version(numpy["version"]) >= Version("1.26.0")
def test_install_check_dirs(tmp_home, tmp_root_prefix):
env_name = "myenv"
env_prefix = tmp_root_prefix / "envs" / env_name
helpers.create("-n", env_name, "python=3.8")
res = helpers.install("-n", env_name, "nodejs", "--json")
assert os.path.isdir(env_prefix)
assert "nodejs" in {pkg["name"] for pkg in res["actions"]["LINK"]}
if helpers.platform.system() == "Windows":
assert os.path.isdir(env_prefix / "lib" / "site-packages")
else:
assert os.path.isdir(env_prefix / "lib" / "python3.8" / "site-packages")
def test_install_python_site_packages_path(tmp_home, tmp_root_prefix):
env_name = "myenv"
env_prefix = tmp_root_prefix / "envs" / env_name
helpers.create("-n", env_name, "python=3.13", "python-freethreading")
helpers.install("-n", env_name, "imagesize")
if helpers.platform.system() == "Windows":
assert os.path.isdir(env_prefix / "lib" / "site-packages" / "imagesize")
assert not os.path.isdir(env_prefix / "lib" / "python3.13t")
else:
# check that the noarch: python package installs into the python_site_packages_path directory
assert os.path.isdir(env_prefix / "lib" / "python3.13t" / "site-packages" / "imagesize")
# and not into the "standard" site-packages directory
assert not os.path.isdir(env_prefix / "lib" / "python3.13" / "site-packages" / "imagesize")
def test_python_site_packages_path_with_python_version(tmp_home, tmp_root_prefix):
"""
Check the consistent update of the `python_site_packages_path` when
switching from python 3.13 to python 3.13t.
"""
is_windows = helpers.platform.system() == "Windows"
env_name = "test_python_site_packages_path_with_python_version"
env_prefix = tmp_root_prefix / "envs" / env_name
# A arch and noarch: python package
res_install = helpers.create("-n", env_name, "--json", "python=3.13", "numpy", "boltons")
assert res_install["success"]
if is_windows:
# On Windows, the `python_site_packages_path`` is the same regardless of the python_version
# and of the freethreading builds.
python_site_packages_path_313 = env_prefix / "lib" / "site-packages"
python_site_packages_path_313t = python_site_packages_path_313
else:
python_site_packages_path_313 = env_prefix / "lib" / "python3.13" / "site-packages"
python_site_packages_path_313t = env_prefix / "lib" / "python3.13t" / "site-packages"
assert os.path.isdir(python_site_packages_path_313)
assert os.path.isdir(python_site_packages_path_313 / "numpy")
assert os.path.isdir(python_site_packages_path_313 / "boltons")
if not is_windows:
assert not os.path.isdir(python_site_packages_path_313t)
res_update = helpers.install("-n", env_name, "--json", "python=3.13", "python-freethreading")
# Check that the builds of numpy and boltons are being updated with python, python_abi and python-freethreading
assert res_update["success"]
# Get all package names from LINK actions
linked_packages = [action["name"] for action in res_update["actions"]["LINK"]]
# Check that all expected packages are present (order doesn't matter)
expected_packages = ["python-freethreading", "python", "python_abi", "numpy"]
if not is_windows:
expected_packages.append("boltons")
for package in expected_packages:
assert package in linked_packages, f"Expected package '{package}' not found in LINK actions"
assert os.path.isdir(python_site_packages_path_313t)
assert os.path.isdir(python_site_packages_path_313t / "numpy")
assert os.path.isdir(python_site_packages_path_313t / "boltons")
if not is_windows:
assert not os.path.isdir(python_site_packages_path_313 / "numpy")
assert not os.path.isdir(python_site_packages_path_313 / "boltons")
# Uninstall python
res_uninstall = helpers.remove("-n", env_name, "--json", "python")
assert res_uninstall["success"]
# Get all package names from the UNLINK actions
unlinked_packages = [action["name"] for action in res_uninstall["actions"]["UNLINK"]]
# Check that all expected packages are present (order doesn't matter)
expected_packages = ["python-freethreading", "python", "python_abi", "numpy", "boltons"]
for package in expected_packages:
assert package in unlinked_packages, (
f"Expected package '{package}' not found in UNLINK actions"
)
assert not os.path.isdir(python_site_packages_path_313 / "numpy")
assert not os.path.isdir(python_site_packages_path_313 / "boltons")
assert not os.path.isdir(python_site_packages_path_313t / "numpy")
assert not os.path.isdir(python_site_packages_path_313t / "boltons")
@pytest.mark.parametrize("output_flag", ["", "--json", "--quiet"])
def test_install_check_logs(tmp_home, tmp_root_prefix, output_flag):
env_name = "env-install-check-logs"
helpers.create("-n", env_name)
res = helpers.install("-n", env_name, "xtensor", output_flag)
if output_flag == "--json":
assert res["success"]
elif output_flag == "--quiet":
assert res == ""
else:
assert "To activate this environment, use:" not in res
def test_install_local_package(tmp_home, tmp_root_prefix):
env_name = "myenv"
tmp_root_prefix / "envs" / env_name
helpers.create("-n", env_name, default_channel=False)
"""Attempts to install a .tar.bz2 package from a local directory."""
file_path = Path(__file__).parent / "data" / "cph_test_data-0.0.1-0.tar.bz2"
res = helpers.install("-n", env_name, file_path, "--json", default_channel=False)
assert len(res["actions"]["LINK"]) == 1
pkg = res["actions"]["LINK"][0]
assert pkg["name"] == "cph_test_data"
assert pkg["version"] == "0.0.1"
assert pkg["fn"] == "cph_test_data-0.0.1-0.tar.bz2"
assert pkg["channel"].startswith("file:///")
assert pkg["channel"].endswith("data")
assert pkg["url"].startswith("file:///")
assert pkg["url"].endswith("cph_test_data-0.0.1-0.tar.bz2")
@pytest.mark.skipif(
sys.platform == "darwin" and platform.machine() == "arm64",
reason="Python 3.7.9 not available",
)
def test_track_features(tmp_home, tmp_root_prefix):
env_name = "myenv"
tmp_root_prefix / "envs" / env_name
# should install CPython since PyPy has track features
version = "3.7.9"
helpers.create("-n", env_name, default_channel=False, no_rc=False)
helpers.install(
"-n",
env_name,
"-q",
f"python={version}",
"--strict-channel-priority",
no_rc=False,
)
res = helpers.umamba_run("-n", env_name, "python", "-c", "import sys; print(sys.version)")
if helpers.platform.system() == "Windows":
assert res.strip().startswith(version)
assert "[MSC v." in res.strip()
elif helpers.platform.system() == "Linux":
assert res.strip().startswith(version)
assert "[GCC" in res.strip()
else:
assert res.strip().startswith(version)
assert "[Clang" in res.strip()
if helpers.platform.system() == "Linux":
# now force PyPy install
helpers.install(
"-n",
env_name,
"-q",
f"python={version}=*pypy",
"--strict-channel-priority",
no_rc=False,
)
res = helpers.umamba_run("-n", env_name, "python", "-c", "import sys; print(sys.version)")
assert res.strip().startswith(version)
assert "[PyPy" in res.strip()
def test_reinstall_with_new_version(tmp_home, tmp_root_prefix):
env_name = "myenv"
tmp_root_prefix / "envs" / env_name
version = "3.8"
helpers.create("-n", env_name, default_channel=False, no_rc=False)
helpers.install(
"-n",
env_name,
"-q",
f"python={version}",
"pip",
no_rc=False,
)
res = helpers.umamba_run("-n", env_name, "python", "-c", "import sys; print(sys.version)")
assert version in res
res = helpers.umamba_run("-n", env_name, "python", "-c", "import pip; print(pip.__version__)")
assert len(res)
# Update python version
version = "3.9"
helpers.install(
"-n",
env_name,
"-q",
f"python={version}",
no_rc=False,
)
res = helpers.umamba_run("-n", env_name, "python", "-c", "import sys; print(sys.version)")
assert version in res
res = helpers.umamba_run("-n", env_name, "python", "-c", "import pip; print(pip.__version__)")
assert len(res)
env_yaml_content_to_install_empty_base = """
channels:
- conda-forge
dependencies:
- python
- xtensor
"""
def test_install_empty_base(tmp_home, tmp_root_prefix, tmp_path):
env_prefix = tmp_path / "env-install-empty-base"
os.environ["MAMBA_ROOT_PREFIX"] = str(env_prefix)
env_file_yml = tmp_path / "test_install_env_empty_base.yaml"
env_file_yml.write_text(env_yaml_content_to_install_empty_base)
cmd = ["-p", env_prefix, f"--file={env_file_yml}", "-y", "--json"]
res = helpers.install(*cmd)
assert res["success"]
packages = helpers.umamba_list("-p", env_prefix, "--json")
assert any(package["name"] == "xtensor" for package in packages)
assert any(package["name"] == "python" for package in packages)
env_specific_pip = """
channels:
- conda-forge
dependencies:
- python
- pip:
- numpy
"""
# Test that dry runs works if package are specified for the `pip:` section
def test_dry_run_pip_section(tmp_home, tmp_root_prefix, tmp_path):
env_prefix = tmp_path / "env-specific-pip"
env_file_yml = tmp_path / "test_install_env_specific_pip.yaml"
env_file_yml.write_text(env_specific_pip)
res = helpers.create("-p", env_prefix, "--json", "pip")
assert res["success"]
packages_at_creation = helpers.umamba_list("-p", env_prefix, "--json")
# Install from the environment file
res = helpers.install("-p", env_prefix, "-f", env_file_yml, "--json", "--dry-run")
assert res["success"]
assert res["dry_run"]
packages = helpers.umamba_list("-p", env_prefix, "--json")
assert packages == packages_at_creation
# Check that the packages are not installed using `pip`
res = helpers.umamba_run("-p", env_prefix, "pip", "list")
assert "numpy" not in res
def test_install_revision(tmp_home, tmp_root_prefix):
env_name = "myenv"
helpers.create("-n", env_name, "python=3.8")
helpers.install("-n", env_name, "xtl=0.7.2", "nlohmann_json=3.12.0")
helpers.update("-n", env_name, "xtl")
helpers.uninstall("-n", env_name, "nlohmann_json")
helpers.install("-n", env_name, "--revision", "1")
res = helpers.umamba_list(
"-n",
env_name,
)
xtl_regex = re.compile(r"xtl\s+0.7.2")
assert xtl_regex.search(res)
assert "nlohmann_json" in res
| TestInstall |
python | tiangolo__fastapi | docs_src/query_param_models/tutorial002_an.py | {
"start": 166,
"end": 518
} | class ____(BaseModel):
model_config = {"extra": "forbid"}
limit: int = Field(100, gt=0, le=100)
offset: int = Field(0, ge=0)
order_by: Literal["created_at", "updated_at"] = "created_at"
tags: List[str] = []
@app.get("/items/")
async def read_items(filter_query: Annotated[FilterParams, Query()]):
return filter_query
| FilterParams |
python | FactoryBoy__factory_boy | tests/test_django.py | {
"start": 17053,
"end": 17533
} | class ____(django_test.TestCase):
def test_build(self):
u = WithPasswordFactory.build()
self.assertTrue(check_password(PASSWORD, u.pw))
def test_build_with_kwargs(self):
password = 'V3R¥.S€C®€T'
u = WithPasswordFactory.build(pw=password)
self.assertTrue(check_password(password, u.pw))
def test_create(self):
u = WithPasswordFactory.create()
self.assertTrue(check_password(PASSWORD, u.pw))
| DjangoPasswordTestCase |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_kubernetes_engine.py | {
"start": 14074,
"end": 23493
} | class ____:
def setup_method(self):
self.operator = GKECreateClusterOperator(
task_id=TEST_TASK_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
body=GKE_CLUSTER_CREATE_BODY_DICT,
gcp_conn_id=TEST_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_template_fields(self):
expected_template_fields = {"body", "api_version", "deferrable", "poll_interval"} | set(
GKEOperatorMixin.template_fields
)
assert set(GKECreateClusterOperator.template_fields) == expected_template_fields
@pytest.mark.parametrize("body", [GKE_CLUSTER_CREATE_BODY_DICT, GKE_CLUSTER_CREATE_BODY_OBJECT])
def test_body(self, body):
op = GKECreateClusterOperator(
task_id=TEST_TASK_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
body=body,
gcp_conn_id=TEST_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
assert op.cluster_name == GKE_CLUSTER_NAME
@pytest.mark.parametrize(
"body",
[
None,
{"missing_name": "test-name", "initial_node_count": 1},
{
"name": "test-name",
"initial_node_count": 1,
"node_pools": [{"name": "a_node_pool", "initial_node_count": 1}],
},
{"missing_name": "test-name", "node_pools": [{"name": "a_node_pool", "initial_node_count": 1}]},
{
"name": "test-name",
"missing_initial_node_count": 1,
"missing_node_pools": [{"name": "a_node_pool", "initial_node_count": 1}],
},
type("Cluster", (object,), {"missing_name": "test-name", "initial_node_count": 1})(),
type(
"Cluster",
(object,),
{
"missing_name": "test-name",
"node_pools": [{"name": "a_node_pool", "initial_node_count": 1}],
},
)(),
type(
"Cluster",
(object,),
{
"name": "test-name",
"missing_initial_node_count": 1,
"missing_node_pools": [{"name": "a_node_pool", "initial_node_count": 1}],
},
)(),
type(
"Cluster",
(object,),
{
"name": "test-name",
"initial_node_count": 1,
"node_pools": [{"name": "a_node_pool", "initial_node_count": 1}],
},
)(),
],
)
def test_body_error(self, body):
deprecated_fields = {"initial_node_count", "node_config", "zone", "instance_group_urls"}
used_deprecated_fields = {}
if body:
if isinstance(body, dict):
used_deprecated_fields = set(body.keys()).intersection(deprecated_fields)
else:
used_deprecated_fields = {getattr(body, field, None) for field in deprecated_fields}
used_deprecated_fields = {field for field in used_deprecated_fields if field}
if used_deprecated_fields:
with pytest.raises(AirflowProviderDeprecationWarning):
GKECreateClusterOperator(
project_id=TEST_PROJECT_ID, location=TEST_LOCATION, body=body, task_id=TEST_TASK_ID
)
else:
with pytest.raises(AirflowException):
GKECreateClusterOperator(
project_id=TEST_PROJECT_ID, location=TEST_LOCATION, body=body, task_id=TEST_TASK_ID
)
@pytest.mark.parametrize(
("deprecated_field_name", "deprecated_field_value"),
[
("initial_node_count", 1),
("node_config", mock.MagicMock()),
("zone", mock.MagicMock()),
("instance_group_urls", mock.MagicMock()),
],
)
def test_alert_deprecated_body_fields(self, deprecated_field_name, deprecated_field_value):
body = deepcopy(GKE_CLUSTER_CREATE_BODY_DICT)
body[deprecated_field_name] = deprecated_field_value
with pytest.raises(AirflowProviderDeprecationWarning):
GKECreateClusterOperator(
project_id=TEST_PROJECT_ID, location=TEST_LOCATION, body=body, task_id=TEST_TASK_ID
)
@mock.patch(GKE_OPERATORS_PATH.format("KubernetesEngineClusterLink"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_execute(self, mock_cluster_hook, mock_link):
mock_create_cluster = mock_cluster_hook.return_value.create_cluster
mock_operation = mock_create_cluster.return_value
mock_operation.target_link = TEST_TARGET_LINK
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_link.persist.assert_called_once_with(context=mock_context, cluster=GKE_CLUSTER_CREATE_BODY_DICT)
mock_create_cluster.assert_called_once_with(
cluster=GKE_CLUSTER_CREATE_BODY_DICT,
project_id=TEST_PROJECT_ID,
wait_to_complete=True,
)
assert result == TEST_TARGET_LINK
@mock.patch(GKE_OPERATORS_PATH.format("GKECreateClusterOperator.log"))
@mock.patch(GKE_OPERATORS_PATH.format("KubernetesEngineClusterLink"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_execute_error(self, mock_cluster_hook, mock_link, mock_log):
mock_create_cluster = mock_cluster_hook.return_value.create_cluster
expected_error_message = "test-message"
mock_create_cluster.side_effect = AlreadyExists(message=expected_error_message)
mock_get_cluster = mock_cluster_hook.return_value.get_cluster
mock_get_cluster.return_value.self_link = TEST_SELF_LINK
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_link.persist.assert_called_once_with(context=mock_context, cluster=GKE_CLUSTER_CREATE_BODY_DICT)
mock_create_cluster.assert_called_once_with(
cluster=GKE_CLUSTER_CREATE_BODY_DICT,
project_id=TEST_PROJECT_ID,
wait_to_complete=True,
)
mock_get_cluster.assert_called_once_with(
name=GKE_CLUSTER_NAME,
project_id=TEST_PROJECT_ID,
)
mock_log.info.assert_called_once_with("Assuming Success: %s", expected_error_message)
assert result == TEST_SELF_LINK
@mock.patch(GKE_OPERATORS_PATH.format("GKEOperationTrigger"))
@mock.patch(GKE_OPERATORS_PATH.format("KubernetesEngineClusterLink"))
@mock.patch(GKE_OPERATORS_PATH.format("GKECreateClusterOperator.defer"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_deferrable(self, mock_cluster_hook, mock_defer, mock_link, mock_trigger):
mock_create_cluster = mock_cluster_hook.return_value.create_cluster
mock_operation = mock_create_cluster.return_value
mock_operation.name = TEST_OPERATION_NAME
mock_trigger_instance = mock_trigger.return_value
mock_context = mock.MagicMock()
self.operator.deferrable = True
self.operator.execute(context=mock_context)
mock_link.persist.assert_called_once_with(context=mock_context, cluster=GKE_CLUSTER_CREATE_BODY_DICT)
mock_create_cluster.assert_called_once_with(
cluster=GKE_CLUSTER_CREATE_BODY_DICT,
project_id=TEST_PROJECT_ID,
wait_to_complete=False,
)
mock_trigger.assert_called_once_with(
operation_name=TEST_OPERATION_NAME,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
gcp_conn_id=TEST_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
poll_interval=10,
)
mock_defer.assert_called_once_with(
trigger=mock_trigger_instance,
method_name="execute_complete",
)
@mock.patch(GKE_OPERATORS_PATH.format("GKECreateClusterOperator.log"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_execute_complete(self, cluster_hook, mock_log):
mock_get_operation = cluster_hook.return_value.get_operation
mock_get_operation.return_value.target_link = TEST_TARGET_LINK
expected_status, expected_message = "success", "test-message"
event = dict(status=expected_status, message=expected_message, operation_name=TEST_OPERATION_NAME)
result = self.operator.execute_complete(context=mock.MagicMock(), event=event)
mock_log.info.assert_called_once_with(expected_message)
mock_get_operation.assert_called_once_with(operation_name=TEST_OPERATION_NAME)
assert result == TEST_TARGET_LINK
@pytest.mark.parametrize("status", ["failed", "error"])
@mock.patch(GKE_OPERATORS_PATH.format("GKECreateClusterOperator.log"))
def test_execute_complete_error(self, mock_log, status):
expected_message = "test-message"
event = dict(status=status, message=expected_message)
with pytest.raises(AirflowException):
self.operator.execute_complete(context=mock.MagicMock(), event=event)
mock_log.exception.assert_called_once_with("Trigger ended with one of the failed statuses.")
| TestGKECreateClusterOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar18.py | {
"start": 317,
"end": 387
} | class ____:
def fn(self, returnable: T1) -> Awaitable[T1]: ...
| Async |
python | django__django | tests/db_functions/math/test_asin.py | {
"start": 269,
"end": 2344
} | class ____(TestCase):
def test_null(self):
IntegerModel.objects.create()
obj = IntegerModel.objects.annotate(null_asin=ASin("normal")).first()
self.assertIsNone(obj.null_asin)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("0.9"), n2=Decimal("0.6"))
obj = DecimalModel.objects.annotate(
n1_asin=ASin("n1"), n2_asin=ASin("n2")
).first()
self.assertIsInstance(obj.n1_asin, Decimal)
self.assertIsInstance(obj.n2_asin, Decimal)
self.assertAlmostEqual(obj.n1_asin, Decimal(math.asin(obj.n1)))
self.assertAlmostEqual(obj.n2_asin, Decimal(math.asin(obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-0.5, f2=0.87)
obj = FloatModel.objects.annotate(
f1_asin=ASin("f1"), f2_asin=ASin("f2")
).first()
self.assertIsInstance(obj.f1_asin, float)
self.assertIsInstance(obj.f2_asin, float)
self.assertAlmostEqual(obj.f1_asin, math.asin(obj.f1))
self.assertAlmostEqual(obj.f2_asin, math.asin(obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=0, normal=1, big=-1)
obj = IntegerModel.objects.annotate(
small_asin=ASin("small"),
normal_asin=ASin("normal"),
big_asin=ASin("big"),
).first()
self.assertIsInstance(obj.small_asin, float)
self.assertIsInstance(obj.normal_asin, float)
self.assertIsInstance(obj.big_asin, float)
self.assertAlmostEqual(obj.small_asin, math.asin(obj.small))
self.assertAlmostEqual(obj.normal_asin, math.asin(obj.normal))
self.assertAlmostEqual(obj.big_asin, math.asin(obj.big))
def test_transform(self):
with register_lookup(DecimalField, ASin):
DecimalModel.objects.create(n1=Decimal("0.1"), n2=Decimal("0"))
DecimalModel.objects.create(n1=Decimal("1.0"), n2=Decimal("0"))
obj = DecimalModel.objects.filter(n1__asin__gt=1).get()
self.assertEqual(obj.n1, Decimal("1.0"))
| ASinTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1067639,
"end": 1068829
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'base_ref_force_pushed' event on a given pull
request.
"""
__schema__ = github_schema
__field_names__ = ("actor", "after_commit", "before_commit", "created_at", "pull_request", "ref")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
after_commit = sgqlc.types.Field("Commit", graphql_name="afterCommit")
"""Identifies the after commit SHA for the 'base_ref_force_pushed'
event.
"""
before_commit = sgqlc.types.Field("Commit", graphql_name="beforeCommit")
"""Identifies the before commit SHA for the 'base_ref_force_pushed'
event.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
pull_request = sgqlc.types.Field(sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest")
"""PullRequest referenced by event."""
ref = sgqlc.types.Field("Ref", graphql_name="ref")
"""Identifies the fully qualified ref name for the
'base_ref_force_pushed' event.
"""
| BaseRefForcePushedEvent |
python | pappasam__jedi-language-server | tests/test_data/completion/completion_test_class_self.py | {
"start": 0,
"end": 131
} | class ____:
def some_method(self, x):
"""Great method."""
return x
instance = SomeClass()
instance.some
| SomeClass |
python | chroma-core__chroma | chromadb/types.py | {
"start": 9308,
"end": 9444
} | class ____(TypedDict):
"""A KNN/ANN query result"""
id: str
distance: float
embedding: Optional[Vector]
| VectorQueryResult |
python | Pylons__pyramid | tests/test_exceptions.py | {
"start": 1165,
"end": 1797
} | class ____(unittest.TestCase):
def _makeOne(self, message):
from pyramid.exceptions import NotFound
return NotFound(message)
def test_it(self):
from pyramid.interfaces import IExceptionResponse
e = self._makeOne('notfound')
self.assertTrue(IExceptionResponse.providedBy(e))
self.assertEqual(e.status, '404 Not Found')
self.assertEqual(e.message, 'notfound')
def test_response_equivalence(self):
from pyramid.exceptions import NotFound
from pyramid.httpexceptions import HTTPNotFound
self.assertTrue(NotFound is HTTPNotFound)
| TestNotFound |
python | getsentry__sentry | tests/sentry/integrations/bitbucket/test_installed.py | {
"start": 923,
"end": 10160
} | class ____(APITestCase):
def setUp(self) -> None:
self.provider = "bitbucket"
self.path = "/extensions/bitbucket/installed/"
self.username = "sentryuser"
self.client_key = "connection:123"
self.public_key = "123abcDEFg"
self.shared_secret = "G12332434SDfsjkdfgsd"
self.base_api_url = "https://api.bitbucket.org"
self.base_url = "https://bitbucket.org"
self.domain_name = "bitbucket.org/sentryuser"
self.user_display_name = "Sentry User"
self.team_display_name = self.username
self.icon = "https://bitbucket.org/account/sentryuser/avatar/32/"
self.team_data = {
"username": self.username,
"display_name": self.team_display_name,
"account_id": "123456t256371u",
"links": {
"self": {"href": "https://api.bitbucket.org/2.0/users/sentryuser/"},
"html": {
"href": "https://bitbucket.org/%8Cde3c29fa-c919-4b59-8c43-59febd16a8e7%7D/"
},
"avatar": {"href": "https://bitbucket.org/account/sentryuser/avatar/32/"},
},
"created_on": "2018-04-18T00:46:37.374621+00:00",
"type": "team",
"uuid": "{e123-f456-g78910}",
}
self.user_data = self.team_data.copy()
self.user_data["type"] = "user"
self.user_data["display_name"] = self.user_display_name
self.metadata = {
"public_key": self.public_key,
"shared_secret": self.shared_secret,
"base_url": self.base_api_url,
"domain_name": self.domain_name,
"icon": self.icon,
"scopes": list(scopes),
"type": self.team_data["type"],
"uuid": self.team_data["uuid"],
}
self.user_metadata = self.metadata.copy()
self.user_metadata["type"] = self.user_data["type"]
self.user_metadata["domain_name"] = self.user_display_name
self.team_data_from_bitbucket: dict[str, Any] = {
"key": "sentry-bitbucket",
"eventType": "installed",
"baseUrl": self.base_url,
"sharedSecret": self.shared_secret,
"publicKey": self.public_key,
"user": self.team_data,
"productType": "bitbucket",
"baseApiUrl": self.base_api_url,
"clientKey": self.client_key,
"principal": self.team_data,
}
self.user_data_from_bitbucket = self.team_data_from_bitbucket.copy()
self.user_data_from_bitbucket["principal"] = self.user_data
self.data_without_public_key = {"identity": {"bitbucket_client_id": self.client_key}}
plugins.register(BitbucketPlugin)
def tearDown(self) -> None:
plugins.unregister(BitbucketPlugin)
super().tearDown()
def test_default_permissions(self) -> None:
# Permissions must be empty so that it will be accessible to bitbucket.
assert BitbucketInstalledEndpoint.authentication_classes == ()
assert BitbucketInstalledEndpoint.permission_classes == ()
def test_installed_with_public_key(self) -> None:
response = self.client.post(self.path, data=self.team_data_from_bitbucket)
assert response.status_code == 200
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
assert integration.name == self.username
del integration.metadata["webhook_secret"]
assert integration.metadata == self.metadata
def test_installed_without_public_key(self) -> None:
integration, created = Integration.objects.get_or_create(
provider=self.provider,
external_id=self.client_key,
defaults={"name": self.user_display_name, "metadata": self.user_metadata},
)
del self.user_data_from_bitbucket["principal"]["username"]
response = self.client.post(self.path, data=self.user_data_from_bitbucket)
assert response.status_code == 200
# assert no changes have been made to the integration
integration_after = Integration.objects.get(
provider=self.provider, external_id=self.client_key
)
assert integration.name == integration_after.name
del integration_after.metadata["webhook_secret"]
assert integration.metadata == integration_after.metadata
def test_installed_without_username(self) -> None:
"""Test a user (not team) installation where the user has hidden their username from public view"""
# Remove username to simulate privacy mode
del self.user_data_from_bitbucket["principal"]["username"]
response = self.client.post(self.path, data=self.user_data_from_bitbucket)
assert response.status_code == 200
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
assert integration.name == self.user_display_name
del integration.metadata["webhook_secret"]
assert integration.metadata == self.user_metadata
@mock.patch("sentry.integrations.bitbucket.integration.generate_token", return_value="0" * 64)
def test_installed_with_secret(self, mock_generate_token: mock.MagicMock) -> None:
response = self.client.post(self.path, data=self.team_data_from_bitbucket)
assert mock_generate_token.called
assert response.status_code == 200
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
assert integration.name == self.username
assert integration.metadata["webhook_secret"] == "0" * 64
@responses.activate
def test_plugin_migration(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name="sentryuser/repo",
url="https://bitbucket.org/sentryuser/repo",
provider="bitbucket",
external_id="123456",
config={"name": "sentryuser/repo"},
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name="otheruser/otherrepo",
url="https://bitbucket.org/otheruser/otherrepo",
provider="bitbucket",
external_id="654321",
config={"name": "otheruser/otherrepo"},
)
self.client.post(self.path, data=self.team_data_from_bitbucket)
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
responses.add(
responses.GET,
f"https://api.bitbucket.org/2.0/repositories/{accessible_repo.name}/hooks",
json={"values": [{"description": "sentry-bitbucket-repo-hook"}]},
)
with self.tasks():
with assume_test_silo_mode(SiloMode.REGION):
org = serialize_rpc_organization(self.organization)
BitbucketIntegrationProvider().post_install(
integration=integration, organization=org, extra={}
)
with assume_test_silo_mode(SiloMode.REGION):
assert (
Repository.objects.get(id=accessible_repo.id).integration_id == integration.id
)
assert (
Repository.objects.get(id=accessible_repo.id).provider
== "integrations:bitbucket"
)
assert Repository.objects.get(id=inaccessible_repo.id).integration_id is None
@responses.activate
def test_disable_plugin_when_fully_migrated(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
project = Project.objects.create(organization_id=self.organization.id)
plugin = plugins.get("bitbucket")
plugin.enable(project)
# Accessible to new Integration
Repository.objects.create(
organization_id=self.organization.id,
name="sentryuser/repo",
url="https://bitbucket.org/sentryuser/repo",
provider="bitbucket",
external_id="123456",
config={"name": "sentryuser/repo"},
)
self.client.post(self.path, data=self.team_data_from_bitbucket)
integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)
responses.add(
responses.GET,
"https://api.bitbucket.org/2.0/repositories/sentryuser/repo/hooks",
json={"values": [{"description": "sentry-bitbucket-repo-hook"}]},
)
assert "bitbucket" in [p.slug for p in plugins.for_project(project)]
with self.tasks():
with assume_test_silo_mode(SiloMode.REGION):
org = serialize_rpc_organization(self.organization)
BitbucketIntegrationProvider().post_install(
integration=integration, organization=org, extra={}
)
assert "bitbucket" not in [p.slug for p in plugins.for_project(project)]
| BitbucketInstalledEndpointTest |
python | kamyu104__LeetCode-Solutions | Python/find-center-of-star-graph.py | {
"start": 29,
"end": 216
} | class ____(object):
def findCenter(self, edges):
"""
:type edges: List[List[int]]
:rtype: int
"""
return edges[0][edges[0][1] in edges[1]]
| Solution |
python | openai__openai-python | src/openai/types/batch_usage.py | {
"start": 521,
"end": 938
} | class ____(BaseModel):
input_tokens: int
"""The number of input tokens."""
input_tokens_details: InputTokensDetails
"""A detailed breakdown of the input tokens."""
output_tokens: int
"""The number of output tokens."""
output_tokens_details: OutputTokensDetails
"""A detailed breakdown of the output tokens."""
total_tokens: int
"""The total number of tokens used."""
| BatchUsage |
python | vyperlang__vyper | tests/functional/grammar/test_grammar.py | {
"start": 1247,
"end": 3352
} | class ____(LarkStrategy):
def __init__(self, grammar, start, explicit_strategies):
super().__init__(grammar, start, explicit_strategies, alphabet=ALLOWED_CHARS)
self.terminal_strategies = {
k: v.map(fix_terminal) for k, v in self.terminal_strategies.items() # type: ignore
}
def draw_symbol(self, data, symbol, draw_state): # type: ignore
count = len(draw_state)
super().draw_symbol(data, symbol, draw_state)
try:
compile(
source="".join(draw_state[count:])
.replace("contract", "class")
.replace("struct", "class"), # HACK: Python ast.parse
filename="<string>",
mode="exec",
)
except SyntaxError:
# Python's grammar doesn't actually fully describe the behaviour of the
# CPython parser and AST-post-processor, so we just filter out errors.
assume(False)
def from_grammar() -> st.SearchStrategy[str]:
"""
Generate syntactically-valid Python source code based on the grammar.
"""
grammar = vyper_grammar()
explicit_strategies = dict(
_INDENT=st.just(" " * 4),
_DEDENT=st.just(""),
NAME=st.from_regex(r"[a-z_A-Z]+", fullmatch=True).filter(str.isidentifier),
)
return GrammarStrategy(grammar, "module", explicit_strategies)
# Avoid examples with *only* single or double quote docstrings
# because they trigger a trivial parser bug
SINGLE_QUOTE_DOCSTRING = re.compile(r"^'''.*'''$")
DOUBLE_QUOTE_DOCSTRING = re.compile(r'^""".*"""$')
def has_no_docstrings(c):
return not (SINGLE_QUOTE_DOCSTRING.match(c) or DOUBLE_QUOTE_DOCSTRING.match(c))
@pytest.mark.fuzzing
@given(code=from_grammar())
@hypothesis.settings(
max_examples=500, suppress_health_check=[HealthCheck.too_slow, HealthCheck.filter_too_much]
)
def test_grammar_bruteforce(code):
pre_parser = PreParser(is_interface=False)
pre_parser.parse(code + "\n")
tree = parse_to_ast(pre_parser.reformatted_code)
assert isinstance(tree, Module)
| GrammarStrategy |
python | django__django | django/utils/feedgenerator.py | {
"start": 8496,
"end": 8728
} | class ____:
"""An RSS enclosure"""
def __init__(self, url, length, mime_type):
"All args are expected to be strings"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
| Enclosure |
python | django-guardian__django-guardian | guardian/testapp/models.py | {
"start": 3532,
"end": 3636
} | class ____(AbstractUser, GuardianUserMixin):
custom_id = models.AutoField(primary_key=True)
| CustomUser |
python | django__django | tests/cache/failing_cache.py | {
"start": 60,
"end": 290
} | class ____(LocMemCache):
def set(self, *args, **kwargs):
raise Exception("Faked exception saving to cache")
async def aset(self, *args, **kwargs):
raise Exception("Faked exception saving to cache")
| CacheClass |
python | doocs__leetcode | solution/0200-0299/0296.Best Meeting Point/Solution.py | {
"start": 0,
"end": 474
} | class ____:
def minTotalDistance(self, grid: List[List[int]]) -> int:
def f(arr, x):
return sum(abs(v - x) for v in arr)
rows, cols = [], []
for i, row in enumerate(grid):
for j, v in enumerate(row):
if v:
rows.append(i)
cols.append(j)
cols.sort()
i = rows[len(rows) >> 1]
j = cols[len(cols) >> 1]
return f(rows, i) + f(cols, j)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 10487,
"end": 10889
} | class ____(RawDataMixin, IncrementalAppsflyerStream):
cursor_field = "event_time"
additional_fields = additional_fields.uninstall_events
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"raw-data/export/app/{self.app_id}/uninstall_events_report/v5"
| UninstallEvents |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/llama_index/packs/code_hierarchy/query_engine.py | {
"start": 777,
"end": 6172
} | class ____(CustomQueryEngine):
"""A keyword table made specifically to work with the code hierarchy node parser."""
nodes: Sequence[BaseNode]
node_dict: Optional[Dict[str, Tuple[int, BaseNode]]] = None
repo_map_depth: int = -1
include_repo_map: bool = True
repo_map: Optional[Tuple[Dict[str, Any], str]] = None
tool_instructions: str = DEFAULT_TOOL_INSTRUCTIONS
def _setup_node_dict(self) -> None:
"""Initialize the index."""
self.node_dict = {}
for node in self.nodes:
keys = self._extract_keywords_from_node(node)
for key in keys:
self.node_dict[key] = (node.metadata["start_byte"], node.text)
self.repo_map = CodeHierarchyNodeParser.get_code_hierarchy_from_nodes(
self.nodes, max_depth=self.repo_map_depth
)
def _extract_keywords_from_node(self, node: BaseNode) -> Set[str]:
"""Determine the keywords associated with the node in the index."""
keywords = self._extract_uuid_from_node(node)
keywords |= self._extract_module_from_node(node)
keywords |= self._extract_name_from_node(node)
return keywords
def _extract_uuid_from_node(self, node: BaseNode) -> Set[str]:
"""Extract the uuid from the node."""
return {node.id_}
def _extract_module_from_node(self, node: BaseNode) -> Set[str]:
"""Extract the module name from the node."""
keywords = set()
if not node.metadata["inclusive_scopes"]:
path = Path(node.metadata["filepath"])
name = path.name
name = re.sub(r"\..*$", "", name)
if name in self.node_dict:
its_start_byte, _ = self.node_dict[name]
if node.metadata["start_byte"] < its_start_byte:
keywords.add(name)
else:
keywords.add(name)
return keywords
def _extract_name_from_node(self, node: BaseNode) -> Set[str]:
"""Extract the name and signature from the node."""
keywords = set()
if node.metadata["inclusive_scopes"]:
name = node.metadata["inclusive_scopes"][-1]["name"]
start_byte = node.metadata["start_byte"]
if name in self.node_dict:
its_start_byte, _ = self.node_dict[name]
if start_byte < its_start_byte:
keywords.add(name)
else:
keywords.add(name)
return keywords
def custom_query(self, query: str) -> str:
"""
Query the index. Only use exact matches.
If there is no exact match, but there is one for a parent, returns the parent.
"""
if self.node_dict is None or self.repo_map is None:
self._setup_node_dict()
def get_all_dict_recursive(inp: Dict[str, Any]) -> Set[str]:
"""Get all keys and values from a dictionary of dictionaries recursively."""
kvs = set()
for key, value in inp.items():
kvs.add(key)
if isinstance(value, dict):
kvs |= get_all_dict_recursive(value)
else:
kvs.add(value)
return kvs
def get_parent_dict_recursive(inp: Dict[str, Any], query: str) -> str:
"""Get the parent of a key in a dictionary of dictionaries recursively."""
for key, value in inp.items():
if isinstance(value, dict):
if query in value:
return key
else:
parent = get_parent_dict_recursive(value, query)
if parent is not None:
return parent
return None
if query in self.node_dict:
return self.node_dict[query][1]
kvs = get_all_dict_recursive(self.repo_map[0])
if query not in kvs:
return None
parent_query = query
while parent_query not in self.node_dict:
parent_query = get_parent_dict_recursive(self.repo_map[0], parent_query)
if parent_query is None:
return "None"
# After finding the parent_query, ensure it's in self.node_dict before accessing
if parent_query in self.node_dict:
return self.node_dict[parent_query][1]
else:
return "None"
def get_tool_instructions(self) -> str:
"""Get the tool instructions."""
if self.node_dict is None or self.repo_map is None:
self._setup_node_dict()
return self.tool_instructions.format(
repo_map=self.repo_map[1] if self.include_repo_map else ""
)
def as_langchain_tool(
self,
**tool_kwargs: Any,
) -> "LlamaIndexTool":
"""
Return the index as a langchain tool.
Set a repo map depth of -1 to include all nodes.
otherwise set the depth to the desired max depth.
"""
from llama_index.core.langchain_helpers.agents import LlamaIndexTool
if self.node_dict is None or self.repo_map is None:
self._setup_node_dict()
return LlamaIndexTool(
name="Code Search",
description=self.get_tool_instructions(),
query_engine=self,
**tool_kwargs,
)
| CodeHierarchyKeywordQueryEngine |
python | mlflow__mlflow | mlflow/data/evaluation_dataset_source.py | {
"start": 79,
"end": 1797
} | class ____(DatasetSource):
"""
Represents the source of an evaluation dataset stored in MLflow's tracking store.
"""
def __init__(self, dataset_id: str):
"""
Args:
dataset_id: The ID of the evaluation dataset.
"""
self._dataset_id = dataset_id
@staticmethod
def _get_source_type() -> str:
return "mlflow_evaluation_dataset"
def load(self) -> Any:
"""
Loads the evaluation dataset from the tracking store using current tracking URI.
Returns:
The EvaluationDataset entity.
"""
from mlflow.tracking._tracking_service.utils import _get_store
store = _get_store()
return store.get_evaluation_dataset(self._dataset_id)
@staticmethod
def _can_resolve(raw_source: Any) -> bool:
"""
Determines if the raw source is an evaluation dataset ID.
"""
if isinstance(raw_source, str):
return raw_source.startswith("d-") and len(raw_source) == 34
return False
@classmethod
def _resolve(cls, raw_source: Any) -> "EvaluationDatasetSource":
"""
Creates an EvaluationDatasetSource from a dataset ID.
"""
if not cls._can_resolve(raw_source):
raise ValueError(f"Cannot resolve {raw_source} as an evaluation dataset ID")
return cls(dataset_id=raw_source)
def to_dict(self) -> dict[str, Any]:
return {
"dataset_id": self._dataset_id,
}
@classmethod
def from_dict(cls, source_dict: dict[Any, Any]) -> "EvaluationDatasetSource":
return cls(
dataset_id=source_dict["dataset_id"],
)
| EvaluationDatasetSource |
python | kamyu104__LeetCode-Solutions | Python/maximum-length-substring-with-two-occurrences.py | {
"start": 754,
"end": 1227
} | class ____(object):
def maximumLengthSubstring(self, s):
"""
:type s: str
:rtype: int
"""
COUNT = 2
result = 0
cnt = [0]*26
left = 0
for right, x in enumerate(s):
cnt[ord(x)-ord('a')] += 1
while cnt[ord(x)-ord('a')] > COUNT:
cnt[ord(s[left])-ord('a')] -= 1
left += 1
result = max(result, right-left+1)
return result
| Solution2 |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_eks.py | {
"start": 2675,
"end": 2763
} | class ____(TypedDict):
nodegroup_name: str
nodegroup_role_arn: str
| NodeGroupParams |
python | catalyst-team__catalyst | catalyst/contrib/datasets/imagecar.py | {
"start": 1169,
"end": 4023
} | class ____(Dataset):
"""
The dataset contains images of cars and the corresponding binary masks for them
"""
def __init__(
self,
root: str,
train: bool = True,
download: bool = False,
transforms: Optional[Callable] = None,
):
"""
Args:
root: str: root directory of dataset where
``CarvanaOneCarDataset/`` exist.
train: (bool, optional): If True, creates dataset from
training part, otherwise from test part
download: (bool, optional): If true, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again.
transforms: (callable, optional): A function/transform that
takes in an image and returns a transformed version.
Raises:
RuntimeError: If ``download is False`` and the dataset not found.
Examples:
>>> from catalyst.contrib.datasets import CarvanaOneCarDataset
>>> dataset = CarvanaOneCarDataset(root='./',
>>> train=True,
>>> download=True,
>>> transforms=None)
>>> image = dataset[0]['image']
>>> mask = dataset[0]['mask']
"""
directory = Path(root) / "CarvanaOneCarDataset"
if download and not directory.exists():
_download_file_from_google_drive(
DATASET_IDX, f"{root}/CarvanaOneCarDataset.zip"
)
_extract_archive(f"{root}/CarvanaOneCarDataset.zip", f"{root}/", True)
if not directory.exists():
raise RuntimeError(
"Dataset not found. You can use download=True to download it"
)
split = "train" if train else "test"
mask_path = directory / f"{split}_masks"
image_path = directory / f"{split}_images"
self.image_paths = sorted(image_path.glob("*.jpg"))
self.mask_paths = sorted(mask_path.glob("*.png"))
self.transforms = transforms
def __len__(self) -> int:
"""
Returns:
int, dataset length
"""
return len(self.image_paths)
def __getitem__(self, idx: int) -> dict:
"""
Args:
idx: Index
Returns:
Dict with 2 fields: ``image`` and ``mask``
"""
image_path = str(self.image_paths[idx])
mask_path = str(self.mask_paths[idx])
result = {"image": cv2.imread(image_path), "mask": cv2.imread(mask_path, 2)}
if self.transforms is not None:
result = self.transforms(**result)
return result
__all__ = ["CarvanaOneCarDataset"]
| CarvanaOneCarDataset |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/conv.py | {
"start": 967,
"end": 12078
} | class ____(WeightedQuantizedModule):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
device=None,
dtype=None,
):
# All subclasses have this signature - See PR #49702s
raise NotImplementedError
def _init(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode="zeros",
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
if in_channels % groups != 0:
raise ValueError("in_channels must be divisible by groups")
if out_channels % groups != 0:
raise ValueError("out_channels must be divisible by groups")
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if padding_mode not in _SUPPORTED_PADDING:
raise ValueError(
f"'padding_mode' {padding_mode} is not supported by quantized convolution"
)
self.padding_mode = padding_mode
# Initialize as NCHW. set_weight will internally transpose to NHWC.
if self.transposed:
weight_shape = [in_channels, out_channels // self.groups]
else:
weight_shape = [out_channels, in_channels // self.groups]
qweight = torch._empty_affine_quantized(
weight_shape + list(kernel_size),
scale=1,
zero_point=0,
dtype=torch.qint8,
**{k: v for k, v in factory_kwargs.items() if k != "dtype"},
)
bias_float = (
torch.zeros(
out_channels,
dtype=torch.float,
**{k: v for k, v in factory_kwargs.items() if k != "dtype"},
)
if bias
else None
)
self.set_weight_bias(qweight, bias_float)
self.scale = 1.0
self.zero_point = 0
def set_weight_bias(self, qweight, bias_float):
raise NotImplementedError
def bias(self):
raise NotImplementedError
def _weight_bias(self):
raise NotImplementedError
def extra_repr(self):
s = (
"{in_channels}, {out_channels}, kernel_size={kernel_size}"
", stride={stride}, scale={scale}, zero_point={zero_point}"
)
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.output_padding != (0,) * len(self.output_padding):
s += ", output_padding={output_padding}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias() is None:
s += ", bias=False"
return s.format(**self.__dict__)
# ===== Serialization methods =====
# The special consideration here is that we have to unpack the weights into
# their regular QTensor form for serialization. Packed weights should not
# live outside the process in which they were created, rather they should be
# derived from the QTensor weight.
# self
# |--- weight : Tensor
# |--- bias : Tensor
#
# TODO: maybe change to this when https://github.com/pytorch/pytorch/pull/32958 is landed
# self
# |--- _packed_params : Conv2dPackedParamsBase or Conv3dPackedParamsBase
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
(w, b) = self._weight_bias()
destination[prefix + "weight"] = w
destination[prefix + "bias"] = b
destination[prefix + "scale"] = torch.tensor(self.scale)
destination[prefix + "zero_point"] = torch.tensor(self.zero_point)
@torch.jit.export
def __getstate__(self):
(w, b) = self._weight_bias()
return (
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.padding,
self.dilation,
self.transposed,
self.output_padding,
self.groups,
self.padding_mode,
w,
b,
self.scale,
self.zero_point,
self.training,
)
# ===== Deserialization methods =====
# Counterpart to the serialization methods, we must pack the serialized
# QTensor weight into its packed format for use by the FBGEMM ops.
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
self.set_weight_bias(state_dict[prefix + "weight"], state_dict[prefix + "bias"])
state_dict.pop(prefix + "weight")
state_dict.pop(prefix + "bias")
self.scale = float(state_dict[prefix + "scale"])
state_dict.pop(prefix + "scale")
self.zero_point = int(state_dict[prefix + "zero_point"])
state_dict.pop(prefix + "zero_point")
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def __setstate__(self, state):
self.in_channels = state[0]
self.out_channels = state[1]
self.kernel_size = state[2]
self.stride = state[3]
self.padding = state[4]
self.dilation = state[5]
self.transposed = state[6]
self.output_padding = state[7]
self.groups = state[8]
self.padding_mode = state[9]
self.set_weight_bias(state[10], state[11])
self.scale = state[12]
self.zero_point = state[13]
self.training = state[14]
def __deepcopy__(self, memo):
new_instance = type(self).__new__(type(self))
torch.nn.Module.__init__(new_instance)
state = self.__getstate__()
new_instance.__setstate__(state)
return new_instance
def __copy__(self):
return self.__deepcopy__({})
@classmethod
def get_qconv(cls, mod, activation_post_process, weight_post_process=None):
r"""Creates a qconv object and returns it."""
if weight_post_process is None:
weight_post_process = mod.qconfig.weight()
weight_post_process(mod.weight)
assert weight_post_process.dtype == torch.qint8, (
"Weight observer must have a dtype of qint8"
)
qweight = _quantize_weight(mod.weight.float(), weight_post_process)
# the __init__ call used is the one from derived classes and not the one from _ConvNd
qconv = cls(
mod.in_channels,
mod.out_channels,
mod.kernel_size,
mod.stride,
mod.padding,
mod.dilation,
mod.groups,
mod.bias is not None,
mod.padding_mode,
)
qconv.set_weight_bias(qweight, mod.bias)
if (
activation_post_process is None
or activation_post_process.dtype == torch.float
):
return qconv # dynamic quantization doesn't need scale/zero_point
else:
act_scale, act_zp = activation_post_process.calculate_qparams()
qconv.scale = float(act_scale)
qconv.zero_point = int(act_zp)
return qconv
@staticmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
if hasattr(mod, "weight_fake_quant"):
# assert type(mod) is cls.__QAT_MODULE, " nnq." + cls.__name__ + \
# ".from_float only works for " + cls.__QAT_MODULE.__name__
if type(mod) is cls._NNIQAT_CONV_BN_MODULE:
mod.weight, mod.bias = fuse_conv_bn_weights(
mod.weight,
mod.bias,
mod.bn.running_mean,
mod.bn.running_var,
mod.bn.eps,
mod.bn.weight,
mod.bn.bias,
)
assert hasattr(mod, "activation_post_process"), (
"Input QAT module must have observer attached"
)
weight_post_process = mod.weight_fake_quant
activation_post_process = mod.activation_post_process
else:
assert type(mod) is cls._FLOAT_MODULE, (
" nnq."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
+ " but got:"
+ str(type(mod))
)
assert hasattr(mod, "qconfig"), (
"Input float module must have qconfig defined."
)
activation_post_process = (
None
if not hasattr(mod, "activation_post_process")
else mod.activation_post_process
)
if type(mod) in [
cls._NNI_CONV_RELU_MODULE,
cls._NNI_CONV_ADD_MODULE,
cls._NNI_CONV_ADD_RELU_MODULE,
]:
mod = mod[0]
weight_post_process = mod.qconfig.weight()
return cls.get_qconv(mod, activation_post_process, weight_post_process)
@classmethod
def from_reference(cls, ref_qconv, output_scale, output_zero_point):
r"""Create a (fbgemm/qnnpack) quantized module from a reference quantized module
Args:
ref_qconv (Module): a reference quantized module, either produced by torch.ao.quantization
utilities or provided by the user
output_scale (float): scale for output Tensor
output_zero_point (int): zero point for output Tensor
"""
qconv = cls(
ref_qconv.in_channels,
ref_qconv.out_channels,
ref_qconv.kernel_size, # type: ignore[arg-type]
ref_qconv.stride, # type: ignore[arg-type]
ref_qconv.padding, # type: ignore[arg-type]
ref_qconv.dilation, # type: ignore[arg-type]
ref_qconv.groups,
ref_qconv.bias is not None, # type: ignore[arg-type]
ref_qconv.padding_mode,
device=ref_qconv.weight.device,
dtype=ref_qconv.weight.dtype,
)
qweight = ref_qconv.get_quantized_weight()
qconv.set_weight_bias(qweight, ref_qconv.bias)
qconv.scale = float(output_scale)
qconv.zero_point = int(output_zero_point)
return qconv
| _ConvNd |
python | marshmallow-code__marshmallow | tests/test_deserialization.py | {
"start": 939,
"end": 2493
} | class ____:
@pytest.mark.parametrize("FieldClass", ALL_FIELDS)
def test_fields_allow_none_deserialize_to_none(self, FieldClass):
field = FieldClass(allow_none=True)
assert field.deserialize(None) is None
# https://github.com/marshmallow-code/marshmallow/issues/111
@pytest.mark.parametrize("FieldClass", ALL_FIELDS)
def test_fields_dont_allow_none_by_default(self, FieldClass):
field = FieldClass()
with pytest.raises(ValidationError, match="Field may not be null."):
field.deserialize(None)
def test_allow_none_is_true_if_missing_is_true(self):
field = fields.Raw(load_default=None)
assert field.allow_none is True
assert field.deserialize(None) is None
def test_list_field_deserialize_none_to_none(self):
field = fields.List(fields.String(allow_none=True), allow_none=True)
assert field.deserialize(None) is None
def test_tuple_field_deserialize_none_to_none(self):
field = fields.Tuple([fields.String()], allow_none=True)
assert field.deserialize(None) is None
def test_list_of_nested_allow_none_deserialize_none_to_none(self):
field = fields.List(fields.Nested(Schema(), allow_none=True))
assert field.deserialize([None]) == [None]
def test_list_of_nested_non_allow_none_deserialize_none_to_validation_error(self):
field = fields.List(fields.Nested(Schema(), allow_none=False))
with pytest.raises(ValidationError):
field.deserialize([None])
| TestDeserializingNone |
python | Lightning-AI__lightning | src/lightning/pytorch/demos/boring_classes.py | {
"start": 1979,
"end": 2340
} | class ____(IterableDataset):
"""
.. warning:: This is meant for testing/debugging and is experimental.
"""
def __init__(self, size: int, count: int):
self.count = count
self.size = size
def __iter__(self) -> Iterator[Tensor]:
for _ in range(self.count):
yield torch.randn(self.size)
| RandomIterableDataset |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.