language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/loops/progress.py
|
{
"start": 6159,
"end": 7134
}
|
class ____(_Progress):
"""Tracks batch progress.
These counters are local to a trainer rank. By default, they are not globally synced across all ranks.
Args:
total: Tracks the total batch progress.
current: Tracks the current batch progress.
is_last_batch: Whether the batch is the last one. This is useful for iterable datasets.
"""
is_last_batch: bool = False
@override
def reset(self) -> None:
super().reset()
self.is_last_batch = False
@override
def reset_on_run(self) -> None:
super().reset_on_run()
self.is_last_batch = False
def increment_by(self, n: int, is_last_batch: bool = False) -> None:
super().increment_by(n)
self.is_last_batch = is_last_batch
@override
def load_state_dict(self, state_dict: dict) -> None:
super().load_state_dict(state_dict)
self.is_last_batch = state_dict["is_last_batch"]
@dataclass
|
_BatchProgress
|
python
|
getsentry__sentry
|
tests/sentry/api/test_base.py
|
{
"start": 26415,
"end": 27479
}
|
class ____(APITestCase):
"""Tests for ensuring request.access is properly set before being accessed."""
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization()
self.user = self.create_user()
self.create_member(user=self.user, organization=self.org)
self.request = self.make_request(user=self.user, method="GET")
def test_access_property_set_before_convert_args(self) -> None:
"""Test that request.access is available during convert_args"""
class AccessUsingEndpoint(Endpoint):
permission_classes = ()
def convert_args(self, request, *args, **kwargs):
# This should not raise an AttributeError
assert request.access is not None
return (args, kwargs)
def get(self, request):
return Response({"ok": True})
response = AccessUsingEndpoint.as_view()(self.request)
assert response.status_code == 200
assert response.data == {"ok": True}
|
RequestAccessTest
|
python
|
vyperlang__vyper
|
tests/unit/ast/test_annotate_and_optimize_ast.py
|
{
"start": 87,
"end": 1729
}
|
class ____(python_ast.NodeVisitor):
def assert_about_node(self, node):
raise AssertionError()
def generic_visit(self, node):
self.assert_about_node(node)
super().generic_visit(node)
TEST_CONTRACT_SOURCE_CODE = """
struct S:
a: bool
b: int128
interface ERC20Contract:
def name() -> String[64]: view
@external
def foo() -> int128:
return -(-(-1))
"""
def get_contract_info(source_code):
pre_parser = PreParser(is_interface=False)
pre_parser.parse(source_code)
py_ast = python_ast.parse(pre_parser.reformatted_code)
annotate_python_ast(py_ast, pre_parser.reformatted_code, pre_parser)
return py_ast, pre_parser.reformatted_code
def test_it_annotates_ast_with_source_code():
contract_ast, reformatted_code = get_contract_info(TEST_CONTRACT_SOURCE_CODE)
class AssertSourceCodePresent(AssertionVisitor):
def assert_about_node(self, node):
assert node.full_source_code is reformatted_code
AssertSourceCodePresent().visit(contract_ast)
def test_it_annotates_ast_with_class_types():
contract_ast, _ = get_contract_info(TEST_CONTRACT_SOURCE_CODE)
struct_def = contract_ast.body[0]
contract_def = contract_ast.body[1]
assert struct_def.ast_type == "StructDef"
assert contract_def.ast_type == "InterfaceDef"
def test_it_rewrites_unary_subtractions():
contract_ast, _ = get_contract_info(TEST_CONTRACT_SOURCE_CODE)
function_def = contract_ast.body[2]
return_stmt = function_def.body[0]
assert isinstance(return_stmt.value, python_ast.Constant)
assert return_stmt.value.value == -1
|
AssertionVisitor
|
python
|
automl__auto-sklearn
|
test/test_pipeline/components/classification/test_base.py
|
{
"start": 369,
"end": 12655
}
|
class ____(unittest.TestCase):
# Magic command to not run tests on base class
__test__ = False
res = None
module = None
sk_module = None
# Hyperparameter which is increased by iterative_fit
step_hyperparameter = None
def test_default_iris(self):
if self.__class__ == BaseClassificationComponentTest:
return
for i in range(2):
predictions, targets, n_calls = _test_classifier(
dataset="iris", classifier=self.module
)
self.assertAlmostEqual(
self.res["default_iris"],
sklearn.metrics.accuracy_score(targets, predictions),
places=self.res.get("default_iris_places", 7),
)
if self.res.get("iris_n_calls"):
self.assertEqual(self.res["iris_n_calls"], n_calls)
def test_get_max_iter(self):
if self.__class__ == BaseClassificationComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
self.module.get_max_iter()
def test_default_iris_iterative_fit(self):
if self.__class__ == BaseClassificationComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
for i in range(2):
predictions, targets, classifier = _test_classifier_iterative_fit(
dataset="iris", classifier=self.module
)
self.assertAlmostEqual(
self.res["default_iris_iterative"],
sklearn.metrics.accuracy_score(targets, predictions),
places=self.res.get("default_iris_iterative_places", 7),
)
if self.step_hyperparameter is not None:
self.assertEqual(
getattr(classifier.estimator, self.step_hyperparameter["name"]),
self.res.get(
"iris_iterative_n_iter", self.step_hyperparameter["value"]
),
)
def test_default_iris_predict_proba(self):
if self.__class__ == BaseClassificationComponentTest:
return
for _ in range(2):
predictions, targets = _test_classifier_predict_proba(
dataset="iris", classifier=self.module
)
self.assertAlmostEqual(
self.res["default_iris_proba"],
sklearn.metrics.log_loss(targets, predictions),
places=self.res.get("default_iris_proba_places", 7),
)
def test_default_iris_sparse(self):
if self.__class__ == BaseClassificationComponentTest:
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
predictions, targets, _ = _test_classifier(
dataset="iris", classifier=self.module, sparse=True
)
self.assertAlmostEqual(
self.res["default_iris_sparse"],
sklearn.metrics.accuracy_score(targets, predictions),
places=self.res.get("default_iris_sparse_places", 7),
)
def test_default_digits_binary(self):
if self.__class__ == BaseClassificationComponentTest:
return
for i in range(2):
predictions, targets, _ = _test_classifier(
classifier=self.module, dataset="digits", sparse=False, make_binary=True
)
self.assertAlmostEqual(
self.res["default_digits_binary"],
sklearn.metrics.accuracy_score(targets, predictions),
places=self.res.get("default_digits_binary_places", 7),
)
def test_default_digits(self):
if self.__class__ == BaseClassificationComponentTest:
return
for i in range(2):
predictions, targets, n_calls = _test_classifier(
dataset="digits", classifier=self.module
)
self.assertAlmostEqual(
self.res["default_digits"],
sklearn.metrics.accuracy_score(targets, predictions),
places=self.res.get("default_digits_places", 7),
)
if self.res.get("digits_n_calls"):
self.assertEqual(self.res["digits_n_calls"], n_calls)
def test_default_digits_iterative_fit(self):
if self.__class__ == BaseClassificationComponentTest:
return
if not hasattr(self.module, "iterative_fit"):
return
for i in range(2):
predictions, targets, classifier = _test_classifier_iterative_fit(
dataset="digits", classifier=self.module
)
self.assertAlmostEqual(
self.res["default_digits_iterative"],
sklearn.metrics.accuracy_score(targets, predictions),
places=self.res.get("default_digits_iterative_places", 7),
)
if self.step_hyperparameter is not None:
self.assertEqual(
getattr(classifier.estimator, self.step_hyperparameter["name"]),
self.res.get(
"digits_iterative_n_iter", self.step_hyperparameter["value"]
),
)
def test_default_digits_multilabel(self):
if self.__class__ == BaseClassificationComponentTest:
return
if not self.module.get_properties()["handles_multilabel"]:
return
for _ in range(2):
predictions, targets, _ = _test_classifier(
classifier=self.module, dataset="digits", make_multilabel=True
)
score = sklearn.metrics.precision_score(
targets, predictions, average="macro", zero_division=0
)
self.assertAlmostEqual(
self.res["default_digits_multilabel"],
score,
places=self.res.get("default_digits_multilabel_places", 7),
)
def test_default_digits_multilabel_predict_proba(self):
if self.__class__ == BaseClassificationComponentTest:
return
if not self.module.get_properties()["handles_multilabel"]:
return
for i in range(2):
predictions, targets = _test_classifier_predict_proba(
classifier=self.module, make_multilabel=True
)
self.assertEqual(predictions.shape, ((50, 3)))
self.assertAlmostEqual(
self.res["default_digits_multilabel_proba"],
sklearn.metrics.roc_auc_score(targets, predictions, average="macro"),
places=self.res.get("default_digits_multilabel_proba_places", 7),
)
def test_target_algorithm_multioutput_multiclass_support(self):
if self.__class__ == BaseClassificationComponentTest:
return
if not self.module.get_properties()["handles_multiclass"]:
return
elif self.sk_module is not None:
cls = self.sk_module
X = np.random.random((10, 10))
y = np.random.randint(0, 1, size=(10, 10))
self.assertRaisesRegex(
ValueError, "bad input shape \\(10, 10\\)", cls.fit, X, y
)
else:
return
def test_module_idempotent(self):
"""Fitting twice with the same config gives the same model params.
This is only valid when the random_state passed is an int. If a
RandomState object is passed then repeated calls to fit will have
different results. See the section on "Controlling Randomness" in the
sklearn docs.
https://scikit-learn.org/0.24/common_pitfalls.html#controlling-randomness
"""
if self.__class__ == BaseClassificationComponentTest:
return
classifier_cls = self.module
X = np.array(
[
[0, 0],
[0, 1],
[1, 0],
[1, 1],
[0, 0],
[0, 1],
[1, 0],
[1, 1],
[0, 0],
[0, 1],
[1, 0],
[1, 1],
[0, 0],
[0, 1],
[1, 0],
[1, 1],
]
)
y = np.array(
[
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
0,
1,
1,
0,
]
)
# There are certain errors we ignore so we wrap this in a function
def fitted_params(model) -> Optional[Dict]:
"""
Returns the params if fitted successfully, else None if an
acceptable error occurs
"""
# We are okay with Numerical in Quadractic disciminant analysis
def is_QDA_error(err):
return "Numerical problems in QDA" in err.args[0]
# We are okay if the BaseClassifier in AdaBoostClassifier is worse
# than random so no ensemble can be fit
def is_AdaBoostClassifier_error(err):
return (
"BaseClassifier in AdaBoostClassifier ensemble is worse"
+ " than random, ensemble can not be fit."
in err.args[0]
)
def is_unset_param_raw_predictions_val_error(err):
return (
"local variable 'raw_predictions_val' referenced before"
+ " assignment"
in err.args[0]
)
try:
with ignore_warnings(classifier_warnings):
model.fit(X.copy(), y.copy())
except ValueError as e:
if is_AdaBoostClassifier_error(e) or is_QDA_error(e):
return None
except UnboundLocalError as e:
if is_unset_param_raw_predictions_val_error(e):
return None
return model.estimator.get_params()
# We ignore certain keys when comparing
param_keys_ignored = ["base_estimator"]
# We use the default config + sampled ones
configuration_space = classifier_cls.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
sampled = [configuration_space.sample_configuration() for _ in range(2)]
for seed, config in enumerate([default] + sampled):
model_args = {"random_state": seed, **config}
classifier = classifier_cls(**model_args)
# Get the parameters on the first and second fit with config params
params_first = fitted_params(classifier)
if hasattr(classifier.estimator, "random_state"):
rs_1 = classifier.random_state
rs_estimator_1 = classifier.estimator.random_state
params_second = fitted_params(classifier)
if hasattr(classifier.estimator, "random_state"):
rs_2 = classifier.random_state
rs_estimator_2 = classifier.estimator.random_state
# An acceptable error occured, skip to next sample
if params_first is None or params_second is None:
continue
# Remove keys we don't wish to include in the comparison
for params in [params_first, params_second]:
for key in param_keys_ignored:
if key in params:
del params[key]
# They should have equal parameters
self.assertEqual(
params_first, params_second, f"Failed with model args {model_args}"
)
if hasattr(classifier.estimator, "random_state"):
assert all(
[
seed == random_state
for random_state in [rs_1, rs_estimator_1, rs_2, rs_estimator_2]
]
)
|
BaseClassificationComponentTest
|
python
|
mwaskom__seaborn
|
tests/_marks/test_text.py
|
{
"start": 231,
"end": 4344
}
|
class ____:
def get_texts(self, ax):
if ax.texts:
return list(ax.texts)
else:
# Compatibility with matplotlib < 3.5 (I think)
return [a for a in ax.artists if isinstance(a, MPLText)]
def test_simple(self):
x = y = [1, 2, 3]
s = list("abc")
p = Plot(x, y, text=s).add(Text()).plot()
ax = p._figure.axes[0]
for i, text in enumerate(self.get_texts(ax)):
x_, y_ = text.get_position()
assert x_ == x[i]
assert y_ == y[i]
assert text.get_text() == s[i]
assert text.get_horizontalalignment() == "center"
assert text.get_verticalalignment() == "center_baseline"
def test_set_properties(self):
x = y = [1, 2, 3]
s = list("abc")
color = "red"
alpha = .6
fontsize = 6
valign = "bottom"
m = Text(color=color, alpha=alpha, fontsize=fontsize, valign=valign)
p = Plot(x, y, text=s).add(m).plot()
ax = p._figure.axes[0]
for i, text in enumerate(self.get_texts(ax)):
assert text.get_text() == s[i]
assert text.get_color() == to_rgba(m.color, m.alpha)
assert text.get_fontsize() == m.fontsize
assert text.get_verticalalignment() == m.valign
def test_mapped_properties(self):
x = y = [1, 2, 3]
s = list("abc")
color = list("aab")
fontsize = [1, 2, 4]
p = Plot(x, y, color=color, fontsize=fontsize, text=s).add(Text()).plot()
ax = p._figure.axes[0]
texts = self.get_texts(ax)
assert texts[0].get_color() == texts[1].get_color()
assert texts[0].get_color() != texts[2].get_color()
assert (
texts[0].get_fontsize()
< texts[1].get_fontsize()
< texts[2].get_fontsize()
)
def test_mapped_alignment(self):
x = [1, 2]
p = Plot(x=x, y=x, halign=x, valign=x, text=x).add(Text()).plot()
ax = p._figure.axes[0]
t1, t2 = self.get_texts(ax)
assert t1.get_horizontalalignment() == "left"
assert t2.get_horizontalalignment() == "right"
assert t1.get_verticalalignment() == "top"
assert t2.get_verticalalignment() == "bottom"
def test_identity_fontsize(self):
x = y = [1, 2, 3]
s = list("abc")
fs = [5, 8, 12]
p = Plot(x, y, text=s, fontsize=fs).add(Text()).scale(fontsize=None).plot()
ax = p._figure.axes[0]
for i, text in enumerate(self.get_texts(ax)):
assert text.get_fontsize() == fs[i]
def test_offset_centered(self):
x = y = [1, 2, 3]
s = list("abc")
p = Plot(x, y, text=s).add(Text()).plot()
ax = p._figure.axes[0]
ax_trans = ax.transData.get_matrix()
for text in self.get_texts(ax):
assert_array_almost_equal(text.get_transform().get_matrix(), ax_trans)
def test_offset_valign(self):
x = y = [1, 2, 3]
s = list("abc")
m = Text(valign="bottom", fontsize=5, offset=.1)
p = Plot(x, y, text=s).add(m).plot()
ax = p._figure.axes[0]
expected_shift_matrix = np.zeros((3, 3))
expected_shift_matrix[1, -1] = m.offset * ax.figure.dpi / 72
ax_trans = ax.transData.get_matrix()
for text in self.get_texts(ax):
shift_matrix = text.get_transform().get_matrix() - ax_trans
assert_array_almost_equal(shift_matrix, expected_shift_matrix)
def test_offset_halign(self):
x = y = [1, 2, 3]
s = list("abc")
m = Text(halign="right", fontsize=10, offset=.5)
p = Plot(x, y, text=s).add(m).plot()
ax = p._figure.axes[0]
expected_shift_matrix = np.zeros((3, 3))
expected_shift_matrix[0, -1] = -m.offset * ax.figure.dpi / 72
ax_trans = ax.transData.get_matrix()
for text in self.get_texts(ax):
shift_matrix = text.get_transform().get_matrix() - ax_trans
assert_array_almost_equal(shift_matrix, expected_shift_matrix)
|
TestText
|
python
|
getsentry__sentry
|
tests/sentry/core/endpoints/test_organization_member_index.py
|
{
"start": 34581,
"end": 40977
}
|
class ____(OrganizationMemberListTestBase, HybridCloudTestMixin):
method = "post"
def test_forbid_qq(self) -> None:
data = {"email": "1234@qq.com", "role": "member", "teams": [self.team.slug]}
response = self.get_error_response(self.organization.slug, **data, status_code=400)
assert response.data["email"][0] == "Enter a valid email address."
@patch.object(OrganizationMember, "send_invite_email")
def test_simple(self, mock_send_invite_email: MagicMock) -> None:
data = {"email": "jane@gmail.com", "role": "member", "teams": [self.team.slug]}
response = self.get_success_response(self.organization.slug, **data)
om = OrganizationMember.objects.get(id=response.data["id"])
assert om.user_id is None
assert om.email == "jane@gmail.com"
assert om.role == "member"
assert list(om.teams.all()) == [self.team]
assert om.inviter_id == self.user.id
self.assert_org_member_mapping(org_member=om)
mock_send_invite_email.assert_called_once()
def test_no_teams(self) -> None:
data = {"email": "jane@gmail.com", "role": "member"}
response = self.get_success_response(self.organization.slug, **data)
om = OrganizationMember.objects.get(id=response.data["id"])
assert om.user_id is None
assert om.email == "jane@gmail.com"
assert om.role == "member"
assert list(om.teams.all()) == []
assert om.inviter_id == self.user.id
self.assert_org_member_mapping(org_member=om)
@patch.object(OrganizationMember, "send_invite_email")
def test_no_email(self, mock_send_invite_email: MagicMock) -> None:
data = {
"email": "jane@gmail.com",
"role": "member",
"teams": [self.team.slug],
"sendInvite": False,
}
response = self.get_success_response(self.organization.slug, **data)
om = OrganizationMember.objects.get(id=response.data["id"])
assert om.user_id is None
assert om.email == "jane@gmail.com"
assert om.role == "member"
assert list(om.teams.all()) == [self.team]
assert om.inviter_id == self.user.id
self.assert_org_member_mapping(org_member=om)
assert not mock_send_invite_email.mock_calls
@patch.object(OrganizationMember, "send_invite_email")
def test_referrer_param(self, mock_send_invite_email: MagicMock) -> None:
data = {
"email": "jane@gmail.com",
"role": "member",
"teams": [self.team.slug],
}
response = self.get_success_response(
self.organization.slug, **data, qs_params={"referrer": "test_referrer"}
)
om = OrganizationMember.objects.get(id=response.data["id"])
assert om.user_id is None
assert om.email == "jane@gmail.com"
assert om.role == "member"
assert list(om.teams.all()) == [self.team]
assert om.inviter_id == self.user.id
self.assert_org_member_mapping(org_member=om)
mock_send_invite_email.assert_called_with("test_referrer")
@patch.object(OrganizationMember, "send_invite_email")
def test_internal_integration_token_can_only_invite_member_role(
self, mock_send_invite_email: MagicMock
) -> None:
internal_integration = self.create_internal_integration(
name="Internal App", organization=self.organization, scopes=["member:write"]
)
token = self.create_internal_integration_token(
user=self.user, internal_integration=internal_integration
)
err_message = (
"Integration tokens are restricted to inviting new members with the member role only."
)
data = {"email": "jane@gmail.com", "role": "owner", "teams": [self.team.slug]}
response = self.get_error_response(
self.organization.slug,
**data,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=400,
)
assert response.data[0] == err_message
data = {"email": "jane@gmail.com", "role": "manager", "teams": [self.team.slug]}
response = self.get_error_response(
self.organization.slug,
**data,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=400,
)
assert response.data[0] == err_message
data = {"email": "jane@gmail.com", "role": "member", "teams": [self.team.slug]}
response = self.get_success_response(
self.organization.slug,
**data,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=201,
)
om = OrganizationMember.objects.get(id=response.data["id"])
assert om.user_id is None
assert om.email == "jane@gmail.com"
assert om.role == "member"
assert list(om.teams.all()) == [self.team]
self.assert_org_member_mapping(org_member=om)
mock_send_invite_email.assert_called_once()
@patch("sentry.ratelimits.for_organization_member_invite")
def test_rate_limited(self, mock_rate_limit: MagicMock) -> None:
mock_rate_limit.return_value = True
data = {"email": "jane@gmail.com", "role": "member"}
self.get_error_response(self.organization.slug, **data, status_code=429)
assert not OrganizationMember.objects.filter(email="jane@gmail.com").exists()
@patch(
"sentry.roles.organization_roles.get",
wraps=mock_organization_roles_get_factory(organization_roles.get),
)
def test_cannot_add_to_team_when_team_roles_disabled(self, mock_get: MagicMock) -> None:
owner_user = self.create_user("owner@localhost")
self.owner = self.create_member(
user=owner_user, organization=self.organization, role="owner"
)
self.login_as(user=owner_user)
data = {
"email": "eric@localhost",
"orgRole": "member",
"teamRoles": [{"teamSlug": self.team.slug, "role": None}],
}
response = self.get_error_response(self.organization.slug, **data, status_code=400)
assert (
response.data["email"]
== "The user with a 'member' role cannot have team-level permissions."
)
|
OrganizationMemberListPostTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/local_compute_log_manager.py
|
{
"start": 13061,
"end": 13792
}
|
class ____(PatternMatchingEventHandler):
def __init__(self, manager, log_key, update_paths, complete_paths):
self.manager = manager
self.log_key = log_key
self.update_paths = update_paths
self.complete_paths = complete_paths
patterns = update_paths + complete_paths
super().__init__(patterns=patterns)
def on_created(self, event):
if event.src_path in self.complete_paths:
self.manager.remove_all_subscriptions(self.log_key)
self.manager.unwatch(self.log_key, self)
def on_modified(self, event):
if event.src_path in self.update_paths:
self.manager.notify_subscriptions(self.log_key)
|
LocalComputeLogFilesystemEventHandler
|
python
|
getsentry__sentry
|
src/sentry/snuba/metrics/extraction.py
|
{
"start": 11675,
"end": 12668
}
|
class ____(TypedDict):
"""
Specification for a metric to extract from some data.
The metric type is given as part of the MRI (metric reference identifier)
which must follow the form: `<type>:<namespace>/<name>@<unit>`.
How the metric's value is obtained depends on the metric type:
- Counter metrics are a special case, since the default product counters do
not count any specific field but rather the occurrence of the event. As
such, there is no value expression, and the field is set to `None`.
Semantics of specifying remain undefined at this point.
- Distribution metrics require a numeric value.
- Set metrics require a string value, which is then emitted into the set as
unique value. Insertion of numbers and other types is undefined.
"""
category: Literal["transaction"]
mri: str
field: NotRequired[str | None]
condition: NotRequired[RuleCondition]
tags: NotRequired[Sequence[TagSpec]]
|
MetricSpec
|
python
|
django__django
|
django/contrib/admin/exceptions.py
|
{
"start": 57,
"end": 194
}
|
class ____(SuspiciousOperation):
"""Invalid filter was passed to admin view via URL querystring"""
pass
|
DisallowedModelAdminLookup
|
python
|
sympy__sympy
|
sympy/integrals/manualintegrate.py
|
{
"start": 11177,
"end": 11398
}
|
class ____(Rule):
"""Leave the integral as is."""
def eval(self) -> Expr:
return Integral(self.integrand, self.variable)
def contains_dont_know(self) -> bool:
return True
@dataclass
|
DontKnowRule
|
python
|
getsentry__sentry
|
tests/sentry/incidents/endpoints/validators/test_validators.py
|
{
"start": 11145,
"end": 24618
}
|
class ____(TestMetricAlertsDetectorValidator):
@mock.patch("sentry.incidents.metric_issue_detector.schedule_update_project_config")
@mock.patch("sentry.workflow_engine.endpoints.validators.base.detector.create_audit_entry")
def test_create_with_valid_data(
self, mock_audit: mock.MagicMock, mock_schedule_update_project_config
) -> None:
detector = self.create_static_detector()
# Verify audit log
mock_audit.assert_called_once_with(
request=self.context["request"],
organization=self.project.organization,
target_object=detector.id,
event=audit_log.get_event_id("DETECTOR_ADD"),
data=detector.get_audit_log_data(),
)
mock_schedule_update_project_config.assert_called_once_with(detector)
@mock.patch(
"sentry.seer.anomaly_detection.store_data_workflow_engine.seer_anomaly_detection_connection_pool.urlopen"
)
@mock.patch("sentry.workflow_engine.endpoints.validators.base.detector.create_audit_entry")
def test_anomaly_detection(
self, mock_audit: mock.MagicMock, mock_seer_request: mock.MagicMock
) -> None:
seer_return_value: StoreDataResponse = {"success": True}
mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
detector = self.create_dynamic_detector()
# Verify detector in DB
self.assert_validated(detector)
assert mock_seer_request.call_count == 1
# Verify audit log
mock_audit.assert_called_once_with(
request=self.context["request"],
organization=self.project.organization,
target_object=detector.id,
event=audit_log.get_event_id("DETECTOR_ADD"),
data=detector.get_audit_log_data(),
)
def test_anomaly_detection__invalid_comparison(self) -> None:
data = {
**self.valid_data,
"conditionGroup": {
"id": self.data_condition_group.id,
"organizationId": self.organization.id,
"logicType": self.data_condition_group.logic_type,
"conditions": [
{
"type": Condition.ANOMALY_DETECTION,
"comparison": {
"sensitivity": "super sensitive",
"seasonality": AnomalyDetectionSeasonality.AUTO,
"threshold_type": AnomalyDetectionThresholdType.ABOVE_AND_BELOW,
},
"conditionResult": DetectorPriorityLevel.HIGH,
"conditionGroupId": self.data_condition_group.id,
},
],
},
"config": {
"threshold_period": 1,
"detection_type": AlertRuleDetectionType.DYNAMIC.value,
},
}
validator = MetricIssueDetectorValidator(
data=data,
context=self.context,
)
assert not validator.is_valid()
@mock.patch(
"sentry.seer.anomaly_detection.store_data_workflow_engine.seer_anomaly_detection_connection_pool.urlopen"
)
def test_anomaly_detection__send_historical_data_fails(
self, mock_seer_request: mock.MagicMock
) -> None:
"""
Test that if the call to Seer fails that we do not create the detector, dcg, and data condition
"""
from django.core.exceptions import ValidationError
mock_seer_request.side_effect = TimeoutError
assert not DataCondition.objects.filter(type=Condition.ANOMALY_DETECTION).exists()
DataConditionGroup.objects.all().delete()
validator = MetricIssueDetectorValidator(
data=self.valid_anomaly_detection_data,
context=self.context,
)
assert validator.is_valid(), validator.errors
detector = None
with self.tasks(), pytest.raises(ValidationError):
detector = validator.save()
assert not detector
assert not DataCondition.objects.filter(type=Condition.ANOMALY_DETECTION).exists()
assert DataConditionGroup.objects.all().count() == 0
mock_seer_request.side_effect = MaxRetryError(
seer_anomaly_detection_connection_pool, SEER_ANOMALY_DETECTION_STORE_DATA_URL
)
validator = MetricIssueDetectorValidator(
data=self.valid_anomaly_detection_data,
context=self.context,
)
assert validator.is_valid(), validator.errors
with self.tasks(), pytest.raises(ValidationError):
detector = validator.save()
assert not detector
assert not DataCondition.objects.filter(type=Condition.ANOMALY_DETECTION).exists()
assert DataConditionGroup.objects.all().count() == 0
def test_invalid_detector_type(self) -> None:
data = {**self.valid_data, "type": "invalid_type"}
validator = MetricIssueDetectorValidator(data=data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("type") == [
ErrorDetail(
string=get_unknown_detector_type_error("invalid_type", self.organization),
code="invalid",
)
]
def test_no_resolution_condition(self) -> None:
data = {
**self.valid_data,
"conditionGroup": {
"id": self.data_condition_group.id,
"organizationId": self.organization.id,
"logicType": self.data_condition_group.logic_type,
"conditions": [
{
"type": Condition.GREATER,
"comparison": 100,
"conditionResult": DetectorPriorityLevel.HIGH,
"conditionGroupId": self.data_condition_group.id,
},
],
},
}
validator = MetricIssueDetectorValidator(data=data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("conditionGroup", {}).get("conditions") == [
ErrorDetail(
string="Resolution condition required for metric issue detector.", code="invalid"
)
]
def test_too_many_conditions(self) -> None:
data = {
**self.valid_data,
"conditionGroup": {
"id": self.data_condition_group.id,
"organizationId": self.organization.id,
"logicType": self.data_condition_group.logic_type,
"conditions": [
{
"type": Condition.GREATER,
"comparison": 100,
"conditionResult": DetectorPriorityLevel.HIGH,
"conditionGroupId": self.data_condition_group.id,
},
{
"type": Condition.GREATER,
"comparison": 200,
"conditionResult": DetectorPriorityLevel.HIGH,
"conditionGroupId": self.data_condition_group.id,
},
{
"type": Condition.GREATER,
"comparison": 300,
"conditionResult": DetectorPriorityLevel.HIGH,
"conditionGroupId": self.data_condition_group.id,
},
{
"type": Condition.LESS_OR_EQUAL,
"comparison": 100,
"conditionResult": DetectorPriorityLevel.OK,
"conditionGroupId": self.data_condition_group.id,
},
],
},
}
validator = MetricIssueDetectorValidator(data=data, context=self.context)
assert not validator.is_valid()
assert validator.errors.get("nonFieldErrors") == [
ErrorDetail(string="Too many conditions", code="invalid")
]
@mock.patch("sentry.quotas.backend.get_metric_detector_limit")
def test_enforce_quota_feature_disabled(self, mock_get_limit: mock.MagicMock) -> None:
mock_get_limit.return_value = 0
validator = MetricIssueDetectorValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
assert validator.save()
@mock.patch("sentry.quotas.backend.get_metric_detector_limit")
@with_feature("organizations:workflow-engine-metric-detector-limit")
def test_enforce_quota_within_limit(self, mock_get_limit: mock.MagicMock) -> None:
mock_get_limit.return_value = 1
# Create a not-metric detector
self.create_detector(
project=self.project,
name="Error Detector",
status=ObjectStatus.ACTIVE,
)
# Create 3 inactive detectors
for status in [
ObjectStatus.DISABLED,
ObjectStatus.PENDING_DELETION,
ObjectStatus.DELETION_IN_PROGRESS,
]:
self.create_detector(
project_id=self.project.id,
name=f"Inactive Detector {status}",
type=MetricIssue.slug,
status=status,
)
validator = MetricIssueDetectorValidator(data=self.valid_data, context=self.context)
assert validator.is_valid()
assert validator.save()
mock_get_limit.assert_called_once_with(self.project.organization.id)
validator = MetricIssueDetectorValidator(data=self.valid_data, context=self.context)
validator.is_valid()
with self.assertRaisesMessage(
ValidationError,
expected_message="Used 1/1 of allowed metric_issue monitors.",
):
validator.save()
@with_feature("organizations:discover-saved-queries-deprecation")
def test_transaction_dataset_deprecation_transactions(self) -> None:
data = {
**self.valid_data,
"dataSources": [
{
"queryType": SnubaQuery.Type.PERFORMANCE.value,
"dataset": Dataset.Transactions.value,
"query": "test query",
"aggregate": "count()",
"timeWindow": 3600,
"environment": self.environment.name,
"eventTypes": [SnubaQueryEventType.EventType.TRANSACTION.name.lower()],
}
],
}
validator = MetricIssueDetectorValidator(data=data, context=self.context)
assert validator.is_valid(), validator.errors
with self.assertRaisesMessage(
ValidationError,
expected_message="Creation of transaction-based alerts is disabled, as we migrate to the span dataset. Create span-based alerts (dataset: events_analytics_platform) with the is_transaction:true filter instead.",
):
validator.save()
@with_feature("organizations:discover-saved-queries-deprecation")
@with_feature("organizations:mep-rollout-flag")
def test_transaction_dataset_deprecation_generic_metrics(self) -> None:
data = {
**self.valid_data,
"dataSources": [
{
"queryType": SnubaQuery.Type.PERFORMANCE.value,
"dataset": Dataset.PerformanceMetrics.value,
"query": "test query",
"aggregate": "count()",
"timeWindow": 3600,
"environment": self.environment.name,
"eventTypes": [SnubaQueryEventType.EventType.TRANSACTION.name.lower()],
}
],
}
validator = MetricIssueDetectorValidator(data=data, context=self.context)
assert validator.is_valid(), validator.errors
with self.assertRaisesMessage(
ValidationError,
expected_message="Creation of transaction-based alerts is disabled, as we migrate to the span dataset. Create span-based alerts (dataset: events_analytics_platform) with the is_transaction:true filter instead.",
):
validator.save()
@with_feature("organizations:discover-saved-queries-deprecation")
def test_transaction_dataset_deprecation_multiple_data_sources(self) -> None:
data = {
**self.valid_data,
"dataSources": [
{
"queryType": SnubaQuery.Type.PERFORMANCE.value,
"dataset": Dataset.Transactions.value,
"query": "test query",
"aggregate": "count()",
"timeWindow": 3600,
"environment": self.environment.name,
"eventTypes": [SnubaQueryEventType.EventType.TRANSACTION.name.lower()],
},
],
}
validator = MetricIssueDetectorValidator(data=data, context=self.context)
assert validator.is_valid(), validator.errors
with self.assertRaisesMessage(
ValidationError,
expected_message="Creation of transaction-based alerts is disabled, as we migrate to the span dataset. Create span-based alerts (dataset: events_analytics_platform) with the is_transaction:true filter instead.",
):
validator.save()
|
TestMetricAlertsCreateDetectorValidator
|
python
|
kamyu104__LeetCode-Solutions
|
Python/power-of-three.py
|
{
"start": 356,
"end": 483
}
|
class ____(object):
def isPowerOfThree(self, n):
return n > 0 and (math.log10(n)/math.log10(3)).is_integer()
|
Solution2
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_eager_relations.py
|
{
"start": 204195,
"end": 207168
}
|
class ____(fixtures.DeclarativeMappedTest):
"""test for [ticket:3431]"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
b_id = Column(ForeignKey("b.id"))
c_id = Column(ForeignKey("c.id"))
b = relationship("B")
c = relationship("C")
class B(Base):
__tablename__ = "b"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
c_id = Column(ForeignKey("c.id"))
c = relationship("C")
class C(Base):
__tablename__ = "c"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
d_id = Column(ForeignKey("d.id"))
d = relationship("D")
class D(Base):
__tablename__ = "d"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", ForeignKey("b.id")),
)
def test_multi_path_load(self):
A, B, C, D = self.classes("A", "B", "C", "D")
s = fixture_session()
c = C(d=D())
s.add(A(b=B(c=c), c=c))
s.commit()
c_alias_1 = aliased(C)
c_alias_2 = aliased(C)
q = s.query(A)
q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d)
q = q.options(
contains_eager(A.b)
.contains_eager(B.c, alias=c_alias_1)
.contains_eager(C.d)
)
q = q.join(c_alias_2, A.c)
q = q.options(contains_eager(A.c, alias=c_alias_2))
a1 = q.all()[0]
# ensure 'd' key was populated in dict. Varies based on
# PYTHONHASHSEED
in_("d", a1.c.__dict__)
def test_multi_path_load_of_type(self):
A, B, C, D = self.classes("A", "B", "C", "D")
s = fixture_session()
c = C(d=D())
s.add(A(b=B(c=c), c=c))
s.commit()
c_alias_1 = aliased(C)
c_alias_2 = aliased(C)
q = s.query(A)
q = q.join(A.b).join(B.c.of_type(c_alias_1)).join(c_alias_1.d)
q = q.options(
contains_eager(A.b)
.contains_eager(B.c.of_type(c_alias_1))
.contains_eager(c_alias_1.d)
)
q = q.join(A.c.of_type(c_alias_2))
q = q.options(contains_eager(A.c.of_type(c_alias_2)))
a1 = q.all()[0]
# ensure 'd' key was populated in dict. Varies based on
# PYTHONHASHSEED
in_("d", a1.c.__dict__)
|
EntityViaMultiplePathTestOne
|
python
|
jina-ai__jina
|
tests/unit/orchestrate/flow/flow-construct/test_flow.py
|
{
"start": 8060,
"end": 12481
}
|
class ____(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert 'key1' not in os.environ
assert 'key2' not in os.environ
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
def test_flow_with_pod_envs():
f = Flow.load_config(os.path.join(cur_dir, 'yaml/flow-with-envs.yml'))
with f:
_validate_flow(f)
@pytest.mark.slow
@pytest.mark.parametrize('protocol', ['websocket', 'grpc', 'http'])
@pytest.mark.parametrize('on_done', [None, lambda x: x])
def test_return_results_sync_flow(protocol, on_done):
with Flow(protocol=protocol).add() as f:
da = f.index(
from_ndarray(np.random.random([10, 2])),
on_done=on_done,
)
if on_done is None:
assert isinstance(da, DocumentArray)
assert len(da) == 10
for doc in da:
assert isinstance(doc, Document)
else:
assert da is None
_validate_flow(f)
@pytest.mark.parametrize(
'input,expected_host,expected_port',
[
('0.0.0.0', ['0.0.0.0'], None),
('0.0.0.0:12345', ['0.0.0.0'], ['12345']),
('123.124.125.0:45678', ['123.124.125.0'], ['45678']),
('api.jina.ai:45678', ['api.jina.ai'], ['45678']),
(['api.jina.ai', '123.124.125.0'], ['api.jina.ai', '123.124.125.0'], None),
(
['api.jina.ai:12345', '123.124.125.0:45678'],
['api.jina.ai', '123.124.125.0'],
['12345', '45678'],
),
],
)
def test_flow_host_expose_shortcut(input, expected_host, expected_port):
f = Flow().add(host=input).build()
assert f['executor0'].args.host == expected_host
if expected_port:
assert f['executor0'].args.port == expected_port
def test_flow_workspace_id():
f = Flow().add().add().add().build()
assert len(f.workspace_id) == 4
assert len(set(f.workspace_id.values())) == 4
with pytest.raises(ValueError):
f.workspace_id = 'hello'
new_id = random_identity()
f.workspace_id = new_id
assert len(set(f.workspace_id.values())) == 1
assert list(f.workspace_id.values())[0] == new_id
@pytest.mark.skipif(
'GITHUB_WORKFLOW' in os.environ,
reason='not stable in gh action',
)
@pytest.mark.slow
def test_bad_pod_graceful_termination():
def asset_bad_flow(f):
with pytest.raises(RuntimeFailToStart):
with f:
assert f._build_level == FlowBuildLevel.EMPTY
# bad local pod
asset_bad_flow(Flow().add(name='exec2', uses='hello-there'))
# bad local pod at second
asset_bad_flow(Flow().add().add(name='exec3', uses='hello-there'))
# bad local pod at second, with correct pod at last
asset_bad_flow(Flow().add().add(name='exec5', uses='hello-there').add())
def test_socket_types_2_remote_one_local():
f = (
Flow()
.add(name='executor1', host='0.0.0.1')
.add(name='executor2', shards=2, host='0.0.0.2')
.add(name='executor3', shards=2, host='1.2.3.4', needs=['gateway'])
.needs(name='join', needs=['executor2', 'executor3'])
)
f.build()
_validate_flow(f)
def test_socket_types_2_remote_one_local_input_socket_pull_connect_from_remote():
f = (
Flow()
.add(name='executor1', host='0.0.0.1')
.add(name='executor2', shards=2, host='0.0.0.2')
.add(name='executor3', shards=2, host='1.2.3.4', needs=['gateway'])
.needs(name='join', needs=['executor2', 'executor3'])
)
f.build()
_validate_flow(f)
def test_single_document_flow_index():
d = Document()
with Flow().add() as f:
f.index(d)
f.index(lambda: d)
def test_flow_equalities():
f1 = (
Flow()
.add(name='executor0')
.add(name='executor1', needs='gateway')
.needs_all(name='joiner')
)
f2 = (
Flow()
.add(name='executor0')
.add(name='executor1', needs='gateway')
.add(name='joiner', needs=['executor0', 'executor1'])
)
assert f1 == f2
f2 = f2.add(name='executor0')
assert f1 != f2
def test_flow_get_item():
f1 = Flow().add().add(needs='gateway').needs_all(name='joiner')
assert isinstance(f1[1], Deployment)
assert isinstance(f1['executor0'], Deployment)
|
EnvChecker2
|
python
|
crytic__slither
|
slither/tools/upgradeability/checks/constant.py
|
{
"start": 195,
"end": 3058
}
|
class ____(AbstractCheck):
ARGUMENT = "were-constant"
IMPACT = CheckClassification.HIGH
HELP = "Variables that should be constant"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#variables-that-should-be-constant"
WIKI_TITLE = "Variables that should be constant"
# region wiki_description
WIKI_DESCRIPTION = """
Detect state variables that should be `constant̀`.
"""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
uint variable1;
uint constant variable2;
uint variable3;
}
contract ContractV2{
uint variable1;
uint variable2;
uint variable3;
}
```
Because `variable2` is not anymore a `constant`, the storage location of `variable3` will be different.
As a result, `ContractV2` will have a corrupted storage layout.
"""
# endregion wiki_exploit_scenario
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Do not remove `constant` from a state variables during an update.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
REQUIRE_CONTRACT_V2 = True
def _check(self) -> List[Output]:
contract_v1 = self.contract
contract_v2 = self.contract_v2
if contract_v2 is None:
raise Exception("were-constant requires a V2 contract")
state_variables_v1 = contract_v1.state_variables
state_variables_v2 = contract_v2.state_variables
v2_additional_variables = len(state_variables_v2) - len(state_variables_v1)
v2_additional_variables = max(v2_additional_variables, 0)
# We keep two index, because we need to have them out of sync if v2
# has additional non constant variables
idx_v1 = 0
idx_v2 = 0
results = []
while idx_v1 < len(state_variables_v1):
state_v1 = contract_v1.state_variables[idx_v1]
if len(state_variables_v2) <= idx_v2:
break
state_v2 = contract_v2.state_variables[idx_v2]
if state_v2:
if state_v1.is_constant:
if not state_v2.is_constant:
# If v2 has additional non constant variables, we need to skip them
if (
state_v1.name != state_v2.name or state_v1.type != state_v2.type
) and v2_additional_variables > 0:
v2_additional_variables -= 1
idx_v2 += 1
continue
info: CHECK_INFO = [state_v1, " was constant, but ", state_v2, "is not.\n"]
json = self.generate_result(info)
results.append(json)
idx_v1 += 1
idx_v2 += 1
return results
|
WereConstant
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_eager_relations.py
|
{
"start": 210619,
"end": 217238
}
|
class ____(fixtures.DeclarativeMappedTest):
"""test for [ticket:3963]"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
bs = relationship("B")
class B(Base):
__tablename__ = "b"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
a_id = Column(ForeignKey("a.id"))
cs = relationship("C")
class C(Base):
__tablename__ = "c"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
b_id = Column(ForeignKey("b.id"))
@classmethod
def insert_data(cls, connection):
A, B, C = cls.classes("A", "B", "C")
s = Session(connection)
s.add(A(id=1, bs=[B(cs=[C()])]))
s.add(A(id=2))
s.commit()
def _run_tests(self, query, expected):
def go():
for a, _ in query:
for b in a.bs:
b.cs
self.assert_sql_count(testing.db, go, expected)
def test_lazyload_aliased_abs_bcs_one(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(joinedload(A.bs).joinedload(B.cs))
)
self._run_tests(q, 3)
def test_lazyload_aliased_abs_bcs_two(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(defaultload(A.bs).joinedload(B.cs))
)
self._run_tests(q, 3)
def test_pathed_lazyload_aliased_abs_bcs(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
opt = Load(A).joinedload(A.bs).joinedload(B.cs)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(opt)
)
self._run_tests(q, 3)
def test_pathed_lazyload_plus_joined_aliased_abs_bcs(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
opt = Load(aa).defaultload(aa.bs).joinedload(B.cs)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(opt)
)
self._run_tests(q, 2)
def test_pathed_joinedload_aliased_abs_bcs(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
opt = Load(aa).joinedload(aa.bs).joinedload(B.cs)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(opt)
)
self._run_tests(q, 1)
def test_lazyload_plus_joined_aliased_abs_bcs(self):
"""by running the test twice, this test includes a test
for #7447 to ensure cached queries apply the cached option objects
to the InstanceState which line up with the cached current_path."""
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(defaultload(aa.bs).joinedload(B.cs))
)
self._run_tests(q, 2)
def test_joinedload_aliased_abs_bcs(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(aa, A)
.filter(aa.id == 1)
.filter(A.id == 2)
.filter(aa.id != A.id)
.options(joinedload(aa.bs).joinedload(B.cs))
)
self._run_tests(q, 1)
def test_lazyload_unaliased_abs_bcs_one(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(A, aa)
.filter(aa.id == 2)
.filter(A.id == 1)
.filter(aa.id != A.id)
.options(joinedload(aa.bs).joinedload(B.cs))
)
self._run_tests(q, 3)
def test_lazyload_unaliased_abs_bcs_two(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(A, aa)
.filter(aa.id == 2)
.filter(A.id == 1)
.filter(aa.id != A.id)
.options(defaultload(aa.bs).joinedload(B.cs))
)
self._run_tests(q, 3)
def test_lazyload_plus_joined_unaliased_abs_bcs(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(A, aa)
.filter(aa.id == 2)
.filter(A.id == 1)
.filter(aa.id != A.id)
.options(defaultload(A.bs).joinedload(B.cs))
)
self._run_tests(q, 2)
def test_joinedload_unaliased_abs_bcs(self):
A, B, C = self.classes("A", "B", "C")
for i in range(2):
s = fixture_session()
aa = aliased(A)
q = (
s.query(A, aa)
.filter(aa.id == 2)
.filter(A.id == 1)
.filter(aa.id != A.id)
.options(joinedload(A.bs).joinedload(B.cs))
)
self._run_tests(q, 1)
|
LazyLoadOptSpecificityTest
|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_json/_profiles/_module_modern_m2c.py
|
{
"start": 212,
"end": 1025
}
|
class ____(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
encode_strings_as_utf8 = True
@classmethod
def post_init(cls) -> None:
cls.allowed_ansible_serializable_types = _profiles._common_module_types | _profiles._common_module_response_types
cls.serialize_map = {
# The bytes type is not supported, use str instead (future module profiles may support a bytes wrapper distinct from `bytes`).
set: cls.serialize_as_list, # legacy _json_encode_fallback behavior
tuple: cls.serialize_as_list, # JSONEncoder built-in behavior
_datetime.date: _datatag.AnsibleSerializableDate,
_datetime.time: _datatag.AnsibleSerializableTime,
_datetime.datetime: _datatag.AnsibleSerializableDateTime,
}
|
_Profile
|
python
|
neetcode-gh__leetcode
|
python/0016-3sum-closest.py
|
{
"start": 0,
"end": 857
}
|
class ____:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
best = float('inf')
for i in range(len(nums) - 2):
val = nums[i]
left = i + 1
right = len(nums) - 1
while left < right:
currentGap = abs(target - (val + nums[left] + nums[right]))
if abs(best - target) > currentGap:
best = val + nums[left] + nums[right]
if val + nums[left] + nums[right] < target:
left += 1
elif val + nums[left] + nums[right] > target:
right -= 1
else: #closest it can get
return target
return best
|
Solution
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/operators/bedrock.py
|
{
"start": 25471,
"end": 28148
}
|
class ____(AwsBaseOperator[BedrockAgentHook]):
"""
Set up an Amazon Bedrock Data Source to be added to an Amazon Bedrock Knowledge Base.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BedrockCreateDataSourceOperator`
:param name: name for the Amazon Bedrock Data Source being created. (templated).
:param bucket_name: The name of the Amazon S3 bucket to use for data source storage. (templated)
:param knowledge_base_id: The unique identifier of the knowledge base to which to add the data source. (templated)
:param create_data_source_kwargs: Any additional optional parameters to pass to the API call. (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = BedrockAgentHook
template_fields: Sequence[str] = aws_template_fields(
"name",
"bucket_name",
"knowledge_base_id",
"create_data_source_kwargs",
)
def __init__(
self,
name: str,
knowledge_base_id: str,
bucket_name: str | None = None,
create_data_source_kwargs: dict[str, Any] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.name = name
self.knowledge_base_id = knowledge_base_id
self.bucket_name = bucket_name
self.create_data_source_kwargs = create_data_source_kwargs or {}
def execute(self, context: Context) -> str:
create_ds_response = self.hook.conn.create_data_source(
name=self.name,
knowledgeBaseId=self.knowledge_base_id,
dataSourceConfiguration={
"type": "S3",
"s3Configuration": {"bucketArn": f"arn:aws:s3:::{self.bucket_name}"},
},
**self.create_data_source_kwargs,
)
return create_ds_response["dataSource"]["dataSourceId"]
|
BedrockCreateDataSourceOperator
|
python
|
Textualize__textual
|
examples/mother.py
|
{
"start": 743,
"end": 809
}
|
class ____(Markdown):
"""Markdown for the user prompt."""
|
Prompt
|
python
|
wandb__wandb
|
wandb/apis/public/registries/_utils.py
|
{
"start": 272,
"end": 4244
}
|
class ____(str, Enum):
# names are what users see/pass into Python methods
# values are what's expected by backend API
organization = "PRIVATE"
restricted = "RESTRICTED"
@classmethod
def _missing_(cls, value: object) -> Any:
# Allow instantiation from enum names too (e.g. "organization" or "restricted")
return cls.__members__.get(value)
@classmethod
def from_gql(cls, value: str) -> Visibility:
"""Convert a GraphQL `visibility` value to a Visibility enum."""
try:
return cls(value)
except ValueError:
expected = ",".join(repr(e.value) for e in cls)
raise ValueError(
f"Invalid visibility {value!r} from backend. Expected one of: {expected}"
) from None
@classmethod
def from_python(cls, name: str) -> Visibility:
"""Convert a visibility string to a `Visibility` enum."""
try:
return cls(name)
except ValueError:
expected = ",".join(repr(e.name) for e in cls)
raise ValueError(
f"Invalid visibility {name!r}. Expected one of: {expected}"
) from None
def prepare_artifact_types_input(
artifact_types: Collection[str] | None,
) -> list[dict[str, str]] | None:
"""Format the artifact types for the GQL input.
Args:
artifact_types: The artifact types to add to the registry.
Returns:
The artifact types for the GQL input.
"""
from wandb.sdk.artifacts._validators import validate_artifact_types
if artifact_types:
return [{"name": typ} for typ in validate_artifact_types(artifact_types)]
return None
def ensure_registry_prefix_on_names(query: Any, in_name: bool = False) -> Any:
"""Recursively the registry prefix to values under "name" keys, excluding regex ops.
- in_name: True if we are under a "name" key (or propagating from one).
EX: {"name": "model"} -> {"name": "wandb-registry-model"}
"""
from wandb.sdk.artifacts._validators import REGISTRY_PREFIX
if isinstance((txt := query), str):
return ensureprefix(txt, REGISTRY_PREFIX) if in_name else txt
if isinstance((dct := query), dict):
new_dict = {}
for key, obj in dct.items():
if key == "$regex":
# For regex operator, we skip transformation of its value.
new_dict[key] = obj
elif key == "name":
new_dict[key] = ensure_registry_prefix_on_names(obj, in_name=True)
else:
# For any other key, propagate flags as-is.
new_dict[key] = ensure_registry_prefix_on_names(obj, in_name=in_name)
return new_dict
if isinstance((seq := query), (list, tuple)):
return list(map(partial(ensure_registry_prefix_on_names, in_name=in_name), seq))
return query
@lru_cache(maxsize=10)
def fetch_org_entity_from_organization(client: Client, organization: str) -> str:
"""Fetch the org entity from the organization.
Args:
client (Client): Graphql client.
organization (str): The organization to fetch the org entity for.
"""
query = gql(
"""
query FetchOrgEntityFromOrganization($organization: String!) {
organization(name: $organization) {
orgEntity {
name
}
}
}
"""
)
try:
response = client.execute(query, variable_values={"organization": organization})
except Exception as e:
raise ValueError(
f"Error fetching org entity for organization: {organization!r}"
) from e
if (
not (org := response["organization"])
or not (org_entity := org["orgEntity"])
or not (org_name := org_entity["name"])
):
raise ValueError(f"Organization entity for {organization!r} not found.")
return org_name
|
Visibility
|
python
|
has2k1__plotnine
|
plotnine/themes/themeable.py
|
{
"start": 42103,
"end": 42655
}
|
class ____(themeable):
"""
Frame around colorbar
Parameters
----------
theme_element : element_rect
"""
_omit = ["facecolor"]
def apply_figure(self, figure: Figure, targets: ThemeTargets):
super().apply_figure(figure, targets)
if rect := targets.legend_frame:
rect.set(**self.properties)
def blank_figure(self, figure: Figure, targets: ThemeTargets):
super().blank_figure(figure, targets)
if rect := targets.legend_frame:
rect.set_visible(False)
|
legend_frame
|
python
|
django-mptt__django-mptt
|
tests/myapp/tests.py
|
{
"start": 33263,
"end": 34727
}
|
class ____(TestCase):
def test_insert_unordered_stuff(self):
root = OrderedInsertion.objects.create(name="")
# "b" gets inserted first,
b = OrderedInsertion.objects.create(name="b", parent=root)
# "a" gets inserted later,
a = OrderedInsertion.objects.create(name="a", parent=root)
# ... but specifying OrderedInsertion.MPTTMeta.order_insertion_by
# tells django-mptt to order added items by the name. So basically
# instance "a", added later, will get the first place in the
# tree. So what's exactly seems to be the problem?
#
# The problem is, item "b" will not get refreshed in any
# way. We need to reload it manually or else there will be problems
# like the one demonstrated below:
self.assertIn(a, a.get_ancestors(include_self=True))
# This will raise an AssertionError, unless we reload the item from
# the database. As long as we won't come up with a sensible way
# of reloading all Django instances pointing to a given row in the
# database...
# self.assertIn(b, b.get_ancestors(include_self=True)))
self.assertRaises(
AssertionError, self.assertIn, b, b.get_ancestors(include_self=True)
)
# ... we need to reload it properly ourselves:
b.refresh_from_db()
self.assertIn(b, b.get_ancestors(include_self=True))
|
OrderedInsertionSortingTestCase
|
python
|
astropy__astropy
|
astropy/time/core.py
|
{
"start": 106882,
"end": 107005
}
|
class ____(AstropyDeprecationWarning):
"""Warning for missing unit or format in TimeDelta."""
|
TimeDeltaMissingUnitWarning
|
python
|
huggingface__transformers
|
src/transformers/models/afmoe/modeling_afmoe.py
|
{
"start": 23803,
"end": 27524
}
|
class ____(AfmoePreTrainedModel):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`AfmoeDecoderLayer`]
Args:
config: AfmoeConfig
"""
def __init__(self, config: AfmoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[AfmoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = AfmoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = AfmoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.post_init()
@auto_docstring
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens,
past_seen_tokens + inputs_embeds.shape[1],
device=inputs_embeds.device,
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
# Apply muP input scaling if enabled
if self.config.mup_enabled:
hidden_states = hidden_states * (self.config.hidden_size**0.5)
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_ids=position_ids,
past_key_value=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
@auto_docstring
|
AfmoeModel
|
python
|
pytorch__pytorch
|
torch/_dynamo/precompile_context.py
|
{
"start": 1578,
"end": 1705
}
|
class ____(BackendCacheArtifact[Any]):
def after_deserialization(self) -> Any:
return self.content
|
EagerCacheArtifact
|
python
|
gevent__gevent
|
src/greentest/3.12/test_interpreters.py
|
{
"start": 17445,
"end": 18133
}
|
class ____(TestBase):
# In these tests we generally want a lot of interpreters,
# but not so many that any test takes too long.
@support.requires_resource('cpu')
def test_create_many_sequential(self):
alive = []
for _ in range(100):
interp = interpreters.create()
alive.append(interp)
@support.requires_resource('cpu')
def test_create_many_threaded(self):
alive = []
def task():
interp = interpreters.create()
alive.append(interp)
threads = (threading.Thread(target=task) for _ in range(200))
with threading_helper.start_threads(threads):
pass
|
StressTests
|
python
|
huggingface__transformers
|
src/transformers/models/deepseek_v2/modeling_deepseek_v2.py
|
{
"start": 4020,
"end": 6732
}
|
class ____(nn.Module):
def __init__(self, config: DeepseekV2Config):
super().__init__()
self.config = config
self.experts = DeepseekV2Experts(config)
self.gate = nn.Linear(config.hidden_size, config.n_routed_experts, bias=False)
if config.n_shared_experts is not None:
intermediate_size = config.moe_intermediate_size * config.n_shared_experts
self.shared_experts = DeepseekV2MLP(config=config, intermediate_size=intermediate_size)
self.routed_scaling_factor = config.routed_scaling_factor
self.topk_method = config.topk_method
self.num_group = config.n_group
self.top_k = config.num_experts_per_tok
self.topk_group = config.topk_group
def route_tokens_to_experts(self, router_logits):
batch_size, seq_len, hidden_dim = router_logits.shape
router_logits = router_logits.view(-1, hidden_dim)
router_logits = router_logits.softmax(dim=-1, dtype=torch.float32)
if self.topk_method == "greedy":
topk_weight, topk_idx = torch.topk(router_logits, k=self.top_k, dim=-1, sorted=False)
elif self.topk_method == "group_limited_greedy":
group_scores = router_logits.view(batch_size * seq_len, self.num_group, -1).max(dim=-1).values
group_idx = torch.topk(group_scores, k=self.topk_group, dim=-1, sorted=False)[1]
group_mask = torch.zeros_like(group_scores)
group_mask.scatter_(1, group_idx, 1)
score_mask = (
group_mask.unsqueeze(-1)
.expand(batch_size * seq_len, self.num_group, self.num_experts // self.num_group)
.reshape(batch_size * seq_len, -1)
)
tmp_scores = router_logits.masked_fill(~score_mask.bool(), 0.0)
topk_weight, topk_idx = torch.topk(tmp_scores, k=self.top_k, dim=-1, sorted=False)
topk_weight = topk_weight * self.routed_scaling_factor
topk_weight = torch.zeros_like(router_logits).scatter_(1, topk_idx, topk_weight)
return topk_idx, topk_weight
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
residuals = hidden_states
orig_shape = hidden_states.shape
router_logits = nn.functional.linear(hidden_states.type(torch.float32), self.gate.weight.type(torch.float32))
topk_indices, topk_weights = self.route_tokens_to_experts(router_logits)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
hidden_states = self.experts(hidden_states, topk_indices, topk_weights).view(*orig_shape)
hidden_states = hidden_states + self.shared_experts(residuals)
return hidden_states
|
DeepseekV2Moe
|
python
|
cherrypy__cherrypy
|
cherrypy/_cplogging.py
|
{
"start": 5238,
"end": 15525
}
|
class ____(object):
"""An object to assist both simple and advanced logging.
``cherrypy.log`` is an instance of this class.
"""
appid = None
"""The id() of the Application object which owns this log manager.
If this is a global log manager, appid is None.
"""
error_log = None
"""The actual :class:`logging.Logger` instance for error messages."""
access_log = None
"""The actual :class:`logging.Logger` instance for access messages."""
access_log_format = '{h} {l} {u} {t} "{r}" {s} {b} "{f}" "{a}"'
logger_root = None
"""The "top-level" logger name.
This string will be used as the first segment in the Logger names.
The default is "cherrypy", for example, in which case the Logger names
will be of the form::
cherrypy.error.<appid>
cherrypy.access.<appid>
"""
def __init__(self, appid=None, logger_root='cherrypy'):
"""Initialize a CherryPy log manager."""
self.logger_root = logger_root
self.appid = appid
if appid is None:
self.error_log = logging.getLogger('%s.error' % logger_root)
self.access_log = logging.getLogger('%s.access' % logger_root)
else:
self.error_log = logging.getLogger(
'%s.error.%s' % (logger_root, appid),
)
self.access_log = logging.getLogger(
'%s.access.%s' % (logger_root, appid),
)
self.error_log.setLevel(logging.INFO)
self.access_log.setLevel(logging.INFO)
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
self.error_log.addHandler(NullHandler())
self.access_log.addHandler(NullHandler())
cherrypy.engine.subscribe('graceful', self.reopen_files)
def reopen_files(self):
"""Close and reopen all file handlers."""
for log in (self.error_log, self.access_log):
for h in log.handlers:
if isinstance(h, logging.FileHandler):
h.acquire()
h.stream.close()
h.stream = open(h.baseFilename, h.mode)
h.release()
def error(
self,
msg='',
context='',
severity=logging.INFO,
traceback=False,
):
"""Write the given ``msg`` to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
If ``traceback`` is True, the traceback of the current exception
(if any) will be appended to ``msg``.
"""
exc_info = None
if traceback:
exc_info = _cperror._exc_info()
self.error_log.log(
severity,
' '.join((self.time(), context, msg)),
exc_info=exc_info,
)
def __call__(self, *args, **kwargs):
"""Record an error log entry."""
return self.error(*args, **kwargs)
def access(self):
r"""Write to the access log (in Apache/NCSA Combined Log format).
See the
`apache documentation
<http://httpd.apache.org/docs/current/logs.html#combined>`_
for format details.
CherryPy calls this automatically for you. Note there are no arguments;
it collects the data itself from
:class:`cherrypy.request<cherrypy._cprequest.Request>`.
Like Apache started doing in 2.0.46, non-printable and other special
characters in %r (and we expand that to all parts) are escaped using
\\xhh sequences, where hh stands for the hexadecimal representation
of the raw byte. Exceptions from this rule are " and \\, which are
escaped by prepending a backslash, and all whitespace characters,
which are written in their C-style notation (\\n, \\t, etc).
"""
request = cherrypy.serving.request
remote = request.remote
response = cherrypy.serving.response
outheaders = response.headers
inheaders = request.headers
if response.output_status is None:
status = '-'
else:
status = response.output_status.split(b' ', 1)[0]
status = status.decode('ISO-8859-1')
atoms = {
'h': remote.name or remote.ip,
'l': '-',
'u': getattr(request, 'login', None) or '-',
't': self.time(),
'r': request.request_line,
's': status,
'b': dict.get(outheaders, 'Content-Length', '') or '-',
'f': dict.get(inheaders, 'Referer', ''),
'a': dict.get(inheaders, 'User-Agent', ''),
'o': dict.get(inheaders, 'Host', '-'),
'i': request.unique_id,
'z': LazyRfc3339UtcTime(),
}
for k, v in atoms.items():
if not isinstance(v, str):
v = str(v)
v = v.replace('"', '\\"').encode('utf8')
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[2:-1]
# in python 3.0 the repr of bytes (as returned by encode)
# uses double \'s. But then the logger escapes them yet, again
# resulting in quadruple slashes. Remove the extra one here.
v = v.replace('\\\\', '\\')
# Escape double-quote.
atoms[k] = v
try:
self.access_log.log(
logging.INFO,
self.access_log_format.format(**atoms),
)
except Exception:
self(traceback=True)
def time(self):
"""Return now() in Apache Common Log Format (no timezone)."""
now = datetime.datetime.now()
monthnames = [
'jan',
'feb',
'mar',
'apr',
'may',
'jun',
'jul',
'aug',
'sep',
'oct',
'nov',
'dec',
]
month = monthnames[now.month - 1].capitalize()
return '[%02d/%s/%04d:%02d:%02d:%02d]' % (
now.day,
month,
now.year,
now.hour,
now.minute,
now.second,
)
def _get_builtin_handler(self, log, key):
for h in log.handlers:
if getattr(h, '_cpbuiltin', None) == key:
return h
# ------------------------- Screen handlers ------------------------- #
def _set_screen_handler(self, log, enable, stream=None):
h = self._get_builtin_handler(log, 'screen')
if enable:
if not h:
if stream is None:
stream = sys.stderr
h = logging.StreamHandler(stream)
h.setFormatter(logfmt)
h._cpbuiltin = 'screen'
log.addHandler(h)
elif h:
log.handlers.remove(h)
@property
def screen(self):
"""Turn stderr/stdout logging on or off.
If you set this to True, it'll add the appropriate StreamHandler
for you. If you set it to False, it will remove the handler.
"""
h = self._get_builtin_handler
has_h = h(self.error_log, 'screen') or h(self.access_log, 'screen')
return bool(has_h)
@screen.setter
def screen(self, newvalue):
self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr)
self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout)
# -------------------------- File handlers -------------------------- #
def _add_builtin_file_handler(self, log, fname):
h = logging.FileHandler(fname)
h.setFormatter(logfmt)
h._cpbuiltin = 'file'
log.addHandler(h)
def _set_file_handler(self, log, filename):
h = self._get_builtin_handler(log, 'file')
if filename:
if h:
if h.baseFilename != os.path.abspath(filename):
h.close()
log.handlers.remove(h)
self._add_builtin_file_handler(log, filename)
else:
self._add_builtin_file_handler(log, filename)
else:
if h:
h.close()
log.handlers.remove(h)
@property
def error_file(self):
"""The filename for self.error_log.
If you set this to a string, it'll add the appropriate FileHandler for
you. If you set it to ``None`` or ``''``, it will remove the handler.
"""
h = self._get_builtin_handler(self.error_log, 'file')
if h:
return h.baseFilename
return ''
@error_file.setter
def error_file(self, newvalue):
self._set_file_handler(self.error_log, newvalue)
@property
def access_file(self):
"""The filename for self.access_log.
If you set this to a string, it'll add the appropriate FileHandler for
you. If you set it to ``None`` or ``''``, it will remove the handler.
"""
h = self._get_builtin_handler(self.access_log, 'file')
if h:
return h.baseFilename
return ''
@access_file.setter
def access_file(self, newvalue):
self._set_file_handler(self.access_log, newvalue)
# ------------------------- WSGI handlers ------------------------- #
def _set_wsgi_handler(self, log, enable):
h = self._get_builtin_handler(log, 'wsgi')
if enable:
if not h:
h = WSGIErrorHandler()
h.setFormatter(logfmt)
h._cpbuiltin = 'wsgi'
log.addHandler(h)
elif h:
log.handlers.remove(h)
@property
def wsgi(self):
"""Write errors to wsgi.errors.
If you set this to True, it'll add the appropriate
:class:`WSGIErrorHandler<cherrypy._cplogging.WSGIErrorHandler>` for you
(which writes errors to ``wsgi.errors``).
If you set it to False, it will remove the handler.
"""
return bool(self._get_builtin_handler(self.error_log, 'wsgi'))
@wsgi.setter
def wsgi(self, newvalue):
self._set_wsgi_handler(self.error_log, newvalue)
|
LogManager
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/tokens.py
|
{
"start": 8453,
"end": 8527
}
|
class ____(Token):
__slots__ = ()
id = '<stream end>'
|
StreamEndToken
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/highlight.py
|
{
"start": 20090,
"end": 21969
}
|
class ____(Extension):
"""Configure highlight settings globally."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = copy.deepcopy(DEFAULT_CONFIG)
super().__init__(*args, **kwargs)
def get_pymdownx_highlight_settings(self):
"""Get the specified extension."""
target = None
if self.enabled:
target = self.getConfigs()
if target is None:
target = {}
config_clone = copy.deepcopy(DEFAULT_CONFIG)
for k in config_clone.keys():
target[k] = config_clone[k][0]
return target
def get_pymdownx_highlighter(self):
"""Get the highlighter."""
return Highlight
def extendMarkdown(self, md):
"""Add support for code highlighting."""
config = self.getConfigs()
self.pygments_code_block = -1
self.md = md
self.enabled = config.get("_enabled", False)
if self.enabled:
ht = HighlightTreeprocessor(self.md, self)
ht.config = self.getConfigs()
self.md.treeprocessors.register(ht, "indent-highlight", 30)
index = 0
register = None
for ext in self.md.registeredExtensions:
if isinstance(ext, HighlightExtension):
register = not ext.enabled and self.enabled
break
index += 1
if register is None:
register = True
index = -1
if register:
if index == -1:
self.md.registerExtension(self)
else:
self.md.registeredExtensions[index] = self
def reset(self):
"""Reset."""
self.pygments_code_block = -1
def makeExtension(*args, **kwargs):
"""Return extension."""
return HighlightExtension(*args, **kwargs)
|
HighlightExtension
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_scalar_methods.py
|
{
"start": 5363,
"end": 7215
}
|
class ____(TestCase):
@parametrize(
"cls",
[
np.number,
np.integer,
np.inexact,
np.unsignedinteger,
np.signedinteger,
np.floating,
],
)
def test_abc(self, cls: type[np.number]) -> None:
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
def test_abc_complexfloating(self) -> None:
alias = np.complexfloating[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.complexfloating
@parametrize("arg_len", range(4))
def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert np.complexfloating[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
np.complexfloating[arg_tup]
@parametrize("cls", [np.generic])
def test_abc_non_numeric(self, cls: type[np.generic]) -> None:
with pytest.raises(TypeError):
cls[Any]
@parametrize("code", np.typecodes["All"])
def test_concrete(self, code: str) -> None:
cls = np.dtype(code).type
with pytest.raises(TypeError):
cls[Any]
@parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.number[arg_tup]
else:
with pytest.raises(TypeError):
np.number[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.number[Any]
@skip(reason="scalartype(...).bit_count() not implemented")
@instantiate_parametrized_tests
|
TestClassGetItem
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/regression/sgd.py
|
{
"start": 600,
"end": 8916
}
|
class ____(
IterativeComponent,
AutoSklearnRegressionAlgorithm,
):
def __init__(
self,
loss,
penalty,
alpha,
fit_intercept,
tol,
learning_rate,
l1_ratio=0.15,
epsilon=0.1,
eta0=0.01,
power_t=0.5,
average=False,
random_state=None,
):
self.max_iter = self.get_max_iter()
self.loss = loss
self.penalty = penalty
self.alpha = alpha
self.fit_intercept = fit_intercept
self.tol = tol
self.learning_rate = learning_rate
self.l1_ratio = l1_ratio
self.epsilon = epsilon
self.eta0 = eta0
self.power_t = power_t
self.random_state = random_state
self.average = average
self.estimator = None
self.scaler = None
@staticmethod
def get_max_iter():
return 1024
def get_current_iter(self):
return self.n_iter_
def iterative_fit(self, X, y, n_iter=2, refit=False):
import sklearn.preprocessing
from sklearn.linear_model import SGDRegressor
# Need to fit at least two iterations, otherwise early stopping will not
# work because we cannot determine whether the algorithm actually
# converged. The only way of finding this out is if the sgd spends less
# iterations than max_iter. If max_iter == 1, it has to spend at least
# one iteration and will always spend at least one iteration, so we
# cannot know about convergence.
n_iter = max(n_iter, 2)
if refit:
self.estimator = None
self.scaler = None
if self.estimator is None:
self.fully_fit_ = False
self.alpha = float(self.alpha)
self.l1_ratio = float(self.l1_ratio) if self.l1_ratio is not None else 0.15
self.epsilon = float(self.epsilon) if self.epsilon is not None else 0.1
self.eta0 = float(self.eta0)
self.power_t = float(self.power_t) if self.power_t is not None else 0.25
self.average = check_for_bool(self.average)
self.fit_intercept = check_for_bool(self.fit_intercept)
self.tol = float(self.tol)
self.estimator = SGDRegressor(
loss=self.loss,
penalty=self.penalty,
alpha=self.alpha,
fit_intercept=self.fit_intercept,
max_iter=n_iter,
tol=self.tol,
learning_rate=self.learning_rate,
l1_ratio=self.l1_ratio,
epsilon=self.epsilon,
eta0=self.eta0,
power_t=self.power_t,
shuffle=True,
average=self.average,
random_state=self.random_state,
warm_start=True,
)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
if y.ndim == 1:
y = y.reshape((-1, 1))
y_scaled = self.scaler.fit_transform(y)
# Flatten: [[0], [0], [0]] -> [0, 0, 0]
if y_scaled.ndim == 2 and y_scaled.shape[1] == 1:
y_scaled = y_scaled.flatten()
self.estimator.fit(X, y_scaled)
self.n_iter_ = self.estimator.n_iter_
else:
self.estimator.max_iter += n_iter
self.estimator.max_iter = min(self.estimator.max_iter, self.max_iter)
# Convert y to be at least 2d for the scaler
# [1,1,1] -> [[1], [1], [1]]
if y.ndim == 1:
y = y.reshape((-1, 1))
y_scaled = self.scaler.transform(y)
# Flatten: [[0], [0], [0]] -> [0, 0, 0]
if y_scaled.ndim == 2 and y_scaled.shape[1] == 1:
y_scaled = y_scaled.flatten()
self.estimator._validate_params()
self.estimator._partial_fit(
X,
y_scaled,
alpha=self.estimator.alpha,
C=1.0,
loss=self.estimator.loss,
learning_rate=self.estimator.learning_rate,
max_iter=n_iter,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
self.n_iter_ += self.estimator.n_iter_
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.n_iter_
):
self.fully_fit_ = True
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, "fully_fit_"):
return False
else:
return self.fully_fit_
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
Y_pred = self.estimator.predict(X)
return self.scaler.inverse_transform(Y_pred)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "SGD Regressor",
"name": "Stochastic Gradient Descent Regressor",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"handles_sparse": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
loss = CategoricalHyperparameter(
"loss",
[
"squared_loss",
"huber",
"epsilon_insensitive",
"squared_epsilon_insensitive",
],
default_value="squared_loss",
)
penalty = CategoricalHyperparameter(
"penalty", ["l1", "l2", "elasticnet"], default_value="l2"
)
alpha = UniformFloatHyperparameter(
"alpha", 1e-7, 1e-1, log=True, default_value=0.0001
)
l1_ratio = UniformFloatHyperparameter(
"l1_ratio", 1e-9, 1.0, log=True, default_value=0.15
)
fit_intercept = UnParametrizedHyperparameter("fit_intercept", "True")
tol = UniformFloatHyperparameter(
"tol", 1e-5, 1e-1, default_value=1e-4, log=True
)
epsilon = UniformFloatHyperparameter(
"epsilon", 1e-5, 1e-1, default_value=0.1, log=True
)
learning_rate = CategoricalHyperparameter(
"learning_rate",
["optimal", "invscaling", "constant"],
default_value="invscaling",
)
eta0 = UniformFloatHyperparameter(
"eta0", 1e-7, 1e-1, default_value=0.01, log=True
)
power_t = UniformFloatHyperparameter("power_t", 1e-5, 1, default_value=0.25)
average = CategoricalHyperparameter(
"average", ["False", "True"], default_value="False"
)
cs.add_hyperparameters(
[
loss,
penalty,
alpha,
l1_ratio,
fit_intercept,
tol,
epsilon,
learning_rate,
eta0,
power_t,
average,
]
)
# TODO add passive/aggressive here, although not properly documented?
elasticnet = EqualsCondition(l1_ratio, penalty, "elasticnet")
epsilon_condition = InCondition(
epsilon,
loss,
["huber", "epsilon_insensitive", "squared_epsilon_insensitive"],
)
# eta0 is only relevant if learning_rate!='optimal' according to code
# https://github.com/scikit-learn/scikit-learn/blob/0.19.X/sklearn/
# linear_model/sgd_fast.pyx#L603
eta0_in_inv_con = InCondition(eta0, learning_rate, ["invscaling", "constant"])
power_t_condition = EqualsCondition(power_t, learning_rate, "invscaling")
cs.add_conditions(
[elasticnet, epsilon_condition, power_t_condition, eta0_in_inv_con]
)
return cs
|
SGD
|
python
|
kamyu104__LeetCode-Solutions
|
Python/design-a-file-sharing-system.py
|
{
"start": 1905,
"end": 3320
}
|
class ____(object):
def __init__(self, m):
"""
:type m: int
"""
self.__users = []
self.__lookup = set()
self.__chunks = collections.defaultdict(set)
self.__min_heap = []
def join(self, ownedChunks):
"""
:type ownedChunks: List[int]
:rtype: int
"""
if self.__min_heap:
userID = heapq.heappop(self.__min_heap)
else:
userID = len(self.__users)+1
self.__users.append(set())
self.__users[userID-1] = set(ownedChunks)
self.__lookup.add(userID)
for c in ownedChunks:
self.__chunks[c].add(userID)
return userID
def leave(self, userID):
"""
:type userID: int
:rtype: None
"""
if userID not in self.__lookup:
return
for c in self.__users[userID-1]:
self.__chunks[c].remove(userID)
self.__lookup.remove(userID)
self.__users[userID-1] = []
heapq.heappush(self.__min_heap, userID)
def request(self, userID, chunkID):
"""
:type userID: int
:type chunkID: int
:rtype: List[int]
"""
result = sorted(self.__chunks[chunkID])
if not result:
return
self.__users[userID-1].add(chunkID)
self.__chunks[chunkID].add(userID)
return result
|
FileSharing2
|
python
|
python-excel__xlrd
|
tests/test_ignore_workbook_corruption_error.py
|
{
"start": 79,
"end": 451
}
|
class ____(TestCase):
def test_not_corrupted(self):
with self.assertRaises(Exception) as context:
xlrd.open_workbook(from_sample('corrupted_error.xls'))
self.assertTrue('Workbook corruption' in str(context.exception))
xlrd.open_workbook(from_sample('corrupted_error.xls'), ignore_workbook_corruption=True)
|
TestIgnoreWorkbookCorruption
|
python
|
pytorch__pytorch
|
benchmarks/operator_benchmark/pt/qlinear_test.py
|
{
"start": 1075,
"end": 1299
}
|
class ____(_QLinearBenchmarkBase):
def init(self, N, IN, OUT, device):
super().init(N, IN, OUT, nnq.Linear(IN, OUT))
self.inputs = {"input": self.qX}
self.set_module_name("QLinear")
|
QLinearBenchmark
|
python
|
dateutil__dateutil
|
src/dateutil/parser/isoparser.py
|
{
"start": 1051,
"end": 13230
}
|
class ____(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation. The decimal separator can be
either a dot or a comma.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
.. versionadded:: 2.7.0
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
if len(components) > 3 and components[3] == 24:
components[3] = 0
return datetime(*components) + timedelta(days=1)
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {!r}'.format(datestr.decode('ascii')))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
components = self._parse_isotime(timestr)
if components[0] == 24:
components[0] = 0
return time(*components)
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_DATE_SEP = b'-'
_TIME_SEP = b':'
_FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len_str < 2:
raise ValueError('ISO time too short')
has_sep = False
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Zz':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
has_sep = True
pos += 1
elif comp == 2 and has_sep:
if timestr[pos:pos+1] != self._TIME_SEP:
raise ValueError('Inconsistent use of colon separator')
pos += 1
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if comp == 3:
# Fraction of a second
frac = self._FRACTION_REGEX.match(timestr[pos:])
if not frac:
continue
us_str = frac.group(1)[:6] # Truncate to microseconds
components[comp] = int(us_str) * 10**(6 - len(us_str))
pos += len(frac.group())
if pos < len_str:
raise ValueError('Unused components in ISO string')
if components[0] == 24:
# Standard supports 00:00 and 24:00 as representations of midnight
if any(component != 0 for component in components[1:4]):
raise ValueError('Hour may only be 24 at 24:00:00.000')
return components
def _parse_tzstr(self, tzstr, zero_as_utc=True):
if tzstr == b'Z' or tzstr == b'z':
return tz.UTC
if len(tzstr) not in {3, 5, 6}:
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
if tzstr[0:1] == b'-':
mult = -1
elif tzstr[0:1] == b'+':
mult = 1
else:
raise ValueError('Time zone offset requires sign')
hours = int(tzstr[1:3])
if len(tzstr) == 3:
minutes = 0
else:
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
if zero_as_utc and hours == 0 and minutes == 0:
return tz.UTC
else:
if minutes > 59:
raise ValueError('Invalid minutes in time zone offset')
if hours > 23:
raise ValueError('Invalid hours in time zone offset')
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
DEFAULT_ISOPARSER = isoparser()
isoparse = DEFAULT_ISOPARSER.isoparse
|
isoparser
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_str.py
|
{
"start": 588,
"end": 629
}
|
class ____:
def __str__(self): ...
|
Str3
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/execution/stats.py
|
{
"start": 6906,
"end": 14253
}
|
class ____:
run_id: str
step_key_stats: Sequence[RunStepKeyStatsSnapshot]
partial_markers: Optional[Mapping[str, Sequence[RunStepMarker]]]
def build_run_step_stats_from_events(
run_id: str,
entries: Iterable[EventLogEntry],
) -> Sequence[RunStepKeyStatsSnapshot]:
snapshot = build_run_step_stats_snapshot_from_events(run_id, entries)
return snapshot.step_key_stats
def build_run_step_stats_snapshot_from_events(
run_id: str,
entries: Iterable[EventLogEntry],
previous_snapshot: Optional["RunStepStatsSnapshot"] = None,
) -> "RunStepStatsSnapshot":
by_step_key: dict[str, dict[str, Any]] = defaultdict(dict)
attempts = defaultdict(list)
markers: dict[str, dict[str, Any]] = defaultdict(dict)
if previous_snapshot:
for step_stats in previous_snapshot.step_key_stats:
check.invariant(step_stats.run_id == run_id)
by_step_key[step_stats.step_key] = {
"start_time": step_stats.start_time,
"end_time": step_stats.end_time,
"status": step_stats.status,
"materialization_events": step_stats.materialization_events,
"expectation_results": step_stats.expectation_results,
"attempts": step_stats.attempts,
"partial_attempt_start": step_stats.partial_attempt_start,
}
for attempt in step_stats.attempts_list:
attempts[step_stats.step_key].append(attempt)
for marker in step_stats.markers:
assert marker.key
markers[step_stats.step_key][marker.key] = {
"key": marker.key,
"start": marker.start_time,
"end": marker.end_time,
}
# handle the partial markers
if previous_snapshot.partial_markers:
for step_key, partial_markers in previous_snapshot.partial_markers.items():
for marker in partial_markers:
assert marker.key
markers[step_key][marker.key] = {
"key": marker.key,
"start": marker.start_time,
"end": marker.end_time,
}
def _open_attempt(step_key: str, event: EventLogEntry) -> None:
by_step_key[step_key]["attempts"] = int(by_step_key[step_key].get("attempts") or 0) + 1
by_step_key[step_key]["partial_attempt_start"] = event.timestamp
def _close_attempt(step_key: str, event: EventLogEntry) -> None:
start_time = by_step_key[step_key].get("partial_attempt_start")
if start_time is None:
# this should only happen if the step was retried before starting (weird)
by_step_key[step_key]["attempts"] = int(by_step_key[step_key].get("attempts") or 0) + 1
start_time = event.timestamp
attempts[step_key].append(
RunStepMarker(
start_time=start_time,
end_time=event.timestamp,
)
)
by_step_key[step_key]["partial_attempt_start"] = None
for event in entries:
if not event.is_dagster_event:
continue
dagster_event = event.get_dagster_event()
step_key = dagster_event.step_key
if not step_key:
continue
if dagster_event.event_type not in STEP_STATS_EVENT_TYPES:
continue
if dagster_event.event_type == DagsterEventType.STEP_START:
by_step_key[step_key]["status"] = StepEventStatus.IN_PROGRESS
by_step_key[step_key]["start_time"] = event.timestamp
_open_attempt(step_key, event)
if dagster_event.event_type == DagsterEventType.STEP_RESTARTED:
_open_attempt(step_key, event)
if dagster_event.event_type == DagsterEventType.STEP_UP_FOR_RETRY:
_close_attempt(step_key, event)
if dagster_event.event_type == DagsterEventType.STEP_FAILURE:
by_step_key[step_key]["end_time"] = event.timestamp
by_step_key[step_key]["status"] = StepEventStatus.FAILURE
_close_attempt(step_key, event)
if dagster_event.event_type == DagsterEventType.STEP_SUCCESS:
by_step_key[step_key]["end_time"] = event.timestamp
by_step_key[step_key]["status"] = StepEventStatus.SUCCESS
_close_attempt(step_key, event)
if dagster_event.event_type == DagsterEventType.STEP_SKIPPED:
by_step_key[step_key]["end_time"] = event.timestamp
by_step_key[step_key]["status"] = StepEventStatus.SKIPPED
_close_attempt(step_key, event)
if dagster_event.event_type == DagsterEventType.ASSET_MATERIALIZATION:
materialization_events = by_step_key[step_key].get("materialization_events", [])
materialization_events.append(event)
by_step_key[step_key]["materialization_events"] = materialization_events
if dagster_event.event_type == DagsterEventType.STEP_EXPECTATION_RESULT:
expectation_data = cast("StepExpectationResultData", dagster_event.event_specific_data)
expectation_result = expectation_data.expectation_result
step_expectation_results = by_step_key[step_key].get("expectation_results", [])
step_expectation_results.append(expectation_result)
by_step_key[step_key]["expectation_results"] = step_expectation_results
if dagster_event.event_type in MARKER_EVENTS:
if dagster_event.engine_event_data.marker_start:
marker_key = dagster_event.engine_event_data.marker_start
if marker_key not in markers[step_key]:
markers[step_key][marker_key] = {"key": marker_key, "start": event.timestamp}
else:
markers[step_key][marker_key]["start"] = event.timestamp
if dagster_event.engine_event_data.marker_end:
marker_key = dagster_event.engine_event_data.marker_end
if marker_key not in markers[step_key]:
markers[step_key][marker_key] = {"key": marker_key, "end": event.timestamp}
else:
markers[step_key][marker_key]["end"] = event.timestamp
snapshots = []
for step_key, step_stats in by_step_key.items():
snapshots.append(
RunStepKeyStatsSnapshot(
run_id=run_id,
step_key=step_key,
**step_stats,
markers=[
RunStepMarker(
start_time=marker.get("start"),
end_time=marker.get("end"),
key=marker.get("key"),
)
for marker in markers[step_key].values()
],
attempts_list=attempts[step_key],
)
)
return RunStepStatsSnapshot(
run_id=run_id,
step_key_stats=snapshots,
partial_markers={
step_key: [
RunStepMarker(start_time=marker.get("start"), end_time=marker.get("end"), key=key)
for key, marker in markers.items()
]
for step_key, markers in markers.items()
if step_key not in by_step_key
},
)
|
RunStepStatsSnapshot
|
python
|
numba__numba
|
numba/core/ccallback.py
|
{
"start": 415,
"end": 1002
}
|
class ____(_FunctionCompiler):
def _customize_flags(self, flags):
flags.no_cpython_wrapper = True
flags.no_cfunc_wrapper = False
# Disable compilation of the IR module, because we first want to
# add the cfunc wrapper.
flags.no_compile = True
# Object mode is not currently supported in C callbacks
# (no reliable way to get the environment)
flags.enable_pyobject = False
if flags.force_pyobject:
raise NotImplementedError("object mode not allowed in C callbacks")
return flags
|
_CFuncCompiler
|
python
|
PyCQA__pylint
|
tests/functional/n/none_dunder_protocols.py
|
{
"start": 123,
"end": 171
}
|
class ____(type):
__iter__ = None
|
MetaIterable
|
python
|
PyCQA__pylint
|
tests/functional/g/generic_class_syntax.py
|
{
"start": 702,
"end": 783
}
|
class ____(Parent[_T]):
def func(self):
self.update_interval = None
|
Child
|
python
|
openai__openai-python
|
src/openai/resources/moderations.py
|
{
"start": 7067,
"end": 7324
}
|
class ____:
def __init__(self, moderations: AsyncModerations) -> None:
self._moderations = moderations
self.create = _legacy_response.async_to_raw_response_wrapper(
moderations.create,
)
|
AsyncModerationsWithRawResponse
|
python
|
PyCQA__flake8
|
src/flake8/statistics.py
|
{
"start": 3387,
"end": 4357
}
|
class ____:
"""Simple wrapper around the logic of each statistic.
Instead of maintaining a simple but potentially hard to reason about
tuple, we create a class which has attributes and a couple
convenience methods on it.
"""
def __init__(
self, error_code: str, filename: str, message: str, count: int,
) -> None:
"""Initialize our Statistic."""
self.error_code = error_code
self.filename = filename
self.message = message
self.count = count
@classmethod
def create_from(cls, error: Violation) -> Statistic:
"""Create a Statistic from a :class:`flake8.violation.Violation`."""
return cls(
error_code=error.code,
filename=error.filename,
message=error.text,
count=0,
)
def increment(self) -> None:
"""Increment the number of times we've seen this error in this file."""
self.count += 1
|
Statistic
|
python
|
optuna__optuna
|
optuna/testing/tempfile_pool.py
|
{
"start": 298,
"end": 1274
}
|
class ____:
tempfile_pool: list[IO[Any]] = []
def __new__(cls, **kwargs: Any) -> "NamedTemporaryFilePool":
if not hasattr(cls, "_instance"):
cls._instance = super(NamedTemporaryFilePool, cls).__new__(cls)
atexit.register(cls._instance.cleanup)
return cls._instance
def __init__(self, **kwargs: Any) -> None:
self.kwargs = kwargs
def tempfile(self) -> IO[Any]:
self._tempfile = tempfile.NamedTemporaryFile(delete=False, **self.kwargs)
self.tempfile_pool.append(self._tempfile)
return self._tempfile
def cleanup(self) -> None:
gc.collect()
for i in self.tempfile_pool:
os.unlink(i.name)
def __enter__(self) -> IO[Any]:
return self.tempfile()
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self._tempfile.close()
|
NamedTemporaryFilePool
|
python
|
kamyu104__LeetCode-Solutions
|
Python/path-with-minimum-effort.py
|
{
"start": 3199,
"end": 4747
}
|
class ____(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x): # bi-bfs
lookup = [[False]*len(heights[0]) for _ in xrange(len(heights))]
left, right = {(0, 0)}, {(len(heights)-1, len(heights[0])-1)}
while left:
for r, c in left:
lookup[r][c] = True
new_left = set()
for r, c in left:
if (r, c) in right:
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
new_left.add((nr, nc))
left = new_left
if len(left) > len(right):
left, right = right, left
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
# Time: O(m * n * logh)
# Space: O(m * n)
import collections
# bfs solution
|
Solution3
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/docs_beta/guides/automation/asset-sensor-with-config.py
|
{
"start": 23,
"end": 1488
}
|
class ____(dg.Config):
param1: str
@dg.asset
def daily_sales_data(context: dg.AssetExecutionContext):
context.log.info("Asset to watch")
# highlight-next-line
yield dg.MaterializeResult(metadata={"specific_property": "value"})
@dg.asset
def weekly_report(context: dg.AssetExecutionContext, config: MyConfig):
context.log.info(f"Running weekly report with param1: {config.param1}")
my_job = dg.define_asset_job(
"my_job",
[weekly_report],
config=dg.RunConfig(ops={"weekly_report": MyConfig(param1="value")}),
)
@dg.asset_sensor(asset_key=dg.AssetKey("daily_sales_data"), job=my_job)
def daily_sales_data_sensor(context: dg.SensorEvaluationContext, asset_event):
materialization: dg.AssetMaterialization = (
asset_event.dagster_event.event_specific_data.materialization
)
# Example custom logic: Check if the dg.asset metadata has a specific property
# highlight-start
if "specific_property" in materialization.metadata:
yield dg.RunRequest(
run_key=context.cursor,
run_config=dg.RunConfig(
ops={
"weekly_report": MyConfig(
param1=str(materialization.metadata.get("specific_property"))
)
}
),
)
# highlight-end
defs = dg.Definitions(
assets=[daily_sales_data, weekly_report],
jobs=[my_job],
sensors=[daily_sales_data_sensor],
)
|
MyConfig
|
python
|
huggingface__transformers
|
src/transformers/models/dia/modeling_dia.py
|
{
"start": 24472,
"end": 29199
}
|
class ____(DiaPreTrainedModel):
"""Transformer Decoder Stack using DenseGeneral."""
def __init__(self, config: DiaDecoderConfig):
super().__init__(config)
self.num_channels = config.num_channels
self.vocab_size = config.vocab_size
self.embeddings = DiaMultiChannelEmbedding(config)
self.layers = nn.ModuleList(
[DiaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = DiaRMSNorm(config.hidden_size, eps=config.norm_eps)
self.rotary_emb = DiaRotaryEmbedding(config=config)
@auto_docstring
@can_return_tuple
def forward(
self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Union[BaseModelOutputWithPastAndCrossAttentions, tuple]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`):
The original `decoder_input_ids` in 3D shape to facilitate more efficient computations.
[What are input IDs?](../glossary#input-ids)
"""
batch_size, seq_length = input_ids.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=input_ids.device
)
if position_ids is None:
position_ids = cache_position[None, :]
# RoPE
hidden_states = self.embeddings(input_ids)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=input_ids.device)
attention_mask = create_causal_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(
hidden_states,
# Needs to be an arg in order to function properly
# on inplace operations to be carried (e.g. compile)
position_embeddings,
attention_mask,
encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns = all_self_attns + (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
hidden_states = self.norm(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring(
custom_intro="""
The bare Dia model outputting raw hidden-states without any specific head on top.
"""
)
|
DiaDecoder
|
python
|
keras-team__keras
|
keras/src/layers/rnn/conv_lstm3d.py
|
{
"start": 141,
"end": 8289
}
|
class ____(ConvLSTM):
"""3D Convolutional LSTM.
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 3 integers, specifying the size of the
convolution window.
strides: int or tuple/list of 3 integers, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 3 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function to use. By default hyperbolic tangent
activation function is applied (`tanh(x)`).
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If `True`, add 1 to the bias of the forget
gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default: `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 6D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
This is only relevant if `dropout` or `recurrent_dropout` are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape:
- If `data_format='channels_first'`:
5D tensor with shape: `(samples, time, channels, *spatial_dims)`
- If `data_format='channels_last'`:
5D tensor with shape: `(samples, time, *spatial_dims, channels)`
Output shape:
- If `return_state`: a list of tensors. The first tensor is the output.
The remaining tensors are the last states,
each 4D tensor with shape: `(samples, filters, *spatial_dims)` if
`data_format='channels_first'`
or shape: `(samples, *spatial_dims, filters)` if
`data_format='channels_last'`.
- If `return_sequences`: 5D tensor with shape: `(samples, timesteps,
filters, *spatial_dims)` if data_format='channels_first'
or shape: `(samples, timesteps, *spatial_dims, filters)` if
`data_format='channels_last'`.
- Else, 4D tensor with shape: `(samples, filters, *spatial_dims)` if
`data_format='channels_first'`
or shape: `(samples, *spatial_dims, filters)` if
`data_format='channels_last'`.
References:
- [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)
(the current implementation does not include the feedback loop on the
cells output).
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs,
):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
seed=seed,
**kwargs,
)
|
ConvLSTM3D
|
python
|
pennersr__django-allauth
|
allauth/account/views.py
|
{
"start": 36013,
"end": 38174
}
|
class ____(NextRedirectMixin, FormView):
def dispatch(self, request, *args, **kwargs):
resp = self._check_reauthentication_method_available(request)
if resp:
return resp
resp = self._check_ratelimit(request)
if resp:
return resp
return super().dispatch(request, *args, **kwargs)
def _check_ratelimit(self, request):
return ratelimit.consume_or_429(
self.request,
action="reauthenticate",
user=self.request.user,
)
def _check_reauthentication_method_available(self, request):
methods = get_adapter().get_reauthentication_methods(self.request.user)
if any([m["url"] == request.path for m in methods]):
# Method is available
return None
if not methods:
# Reauthentication not available
raise PermissionDenied("Reauthentication not available")
url = self.passthrough_next_url(methods[0]["url"])
return HttpResponseRedirect(url)
def get_default_success_url(self):
url = get_adapter(self.request).get_login_redirect_url(self.request)
return url
def form_valid(self, form):
response = flows.reauthentication.resume_request(self.request)
if response:
return response
return super().form_valid(form)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
ret.update(
{
"reauthentication_alternatives": self.get_reauthentication_alternatives(),
}
)
return ret
def get_reauthentication_alternatives(self):
methods = get_adapter().get_reauthentication_methods(self.request.user)
alts = []
for method in methods:
alt = dict(method)
if self.request.path == alt["url"]:
continue
alt["url"] = self.passthrough_next_url(alt["url"])
alts.append(alt)
alts = sorted(alts, key=lambda alt: alt["description"])
return alts
@method_decorator(login_required, name="dispatch")
|
BaseReauthenticateView
|
python
|
getsentry__sentry
|
src/sentry/integrations/slack/utils/threads.py
|
{
"start": 753,
"end": 3357
}
|
class ____:
"""
Stateless utility class for handling notification action threads.
This class will will be used for the issue and metric alert handlers.
Eventually with Notification Platform, we should delete this class
"""
@classmethod
def _save_notification_action_message(
cls,
data: NewNotificationActionNotificationMessage,
) -> None:
"""Save a notification action message to the repository."""
try:
action_repository: NotificationActionNotificationMessageRepository = (
get_default_notification_action_repository()
)
action_repository.create_notification_message(data=data)
except NotificationMessageValidationError as err:
extra = data.__dict__ if data else None
_default_logger.info(
"Validation error for new notification action message", exc_info=err, extra=extra
)
except Exception:
# if there's an error trying to save a notification message, don't let that error block this flow
# we already log at the repository layer, no need to log again here
pass
@classmethod
def _get_notification_action_for_notification_action(
cls,
organization: Organization,
lifecycle: EventLifecycle,
action: Action,
group: Group,
open_period_start: datetime | None,
thread_option_default: bool,
) -> NotificationActionNotificationMessage | None:
"""Find the thread in which to post a notification action notification as a reply.
Return None to post the notification as a top-level message.
"""
if not (
OrganizationOption.objects.get_value(
organization=organization,
key="sentry:issue_alerts_thread_flag",
default=thread_option_default,
)
):
return None
parent_notification_message: NotificationActionNotificationMessage | None = None
try:
action_repository: NotificationActionNotificationMessageRepository = (
get_default_notification_action_repository()
)
parent_notification_message = action_repository.get_parent_notification_message(
action=action,
group=group,
open_period_start=open_period_start,
)
except Exception as e:
lifecycle.record_halt(e)
return None
return parent_notification_message
|
NotificationActionThreadUtils
|
python
|
getsentry__sentry
|
src/sentry/relocation/services/relocation_export/service.py
|
{
"start": 630,
"end": 903
}
|
class ____(ByRegionName):
parameter_name: str = "replying_region_name"
# See the comment on /src/sentry/relocation/tasks/process.py::uploading_start for a detailed description of
# how this service fits into the entire SAAS->SAAS relocation workflow.
|
ByReplyingRegionName
|
python
|
tensorflow__tensorflow
|
tensorflow/python/platform/gfile.py
|
{
"start": 4445,
"end": 5065
}
|
class ____(_FileIO):
"""File I/O wrappers without thread locking.
Note, that this is somewhat like builtin Python file I/O, but
there are semantic differences to make it more efficient for
some backing filesystems. For example, a write mode file will
not be opened until the first write call (to minimize RPC
invocations in network filesystems).
"""
@deprecated(None, 'Use tf.gfile.GFile.')
def __init__(self, name, mode='r'):
super(FastGFile, self).__init__(name=name, mode=mode)
# Does not alias to Open so that we use our version of GFile to strip
# 'b' mode.
Open = GFile
|
FastGFile
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/doubly_linked_list/python/main.py
|
{
"start": 66,
"end": 234
}
|
class ____:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# creation of the main double linked list class
|
Node
|
python
|
google__jax
|
jax/_src/hijax.py
|
{
"start": 2239,
"end": 3172
}
|
class ____(core.AbstractValue):
is_high = True
has_qdd = False # immutable
# type equality
def __hash__(self): assert False, "must override"
def __eq__(self, other): assert False, "must override"
# lowering from hijax type to lojax types
def lo_ty(self) -> list[core.AbstractValue]:
assert False, "must override"
# define lowering from hijax value to lojax values and back (like pytrees)
def lower_val(self, hi_val: HiVal) -> list[LoVal]: # TODO(mattjj); not lovals
assert False, "must override"
def raise_val(self, *lo_vals: LoVal) -> HiVal:
assert False, "must override"
# autodiff interface
def to_tangent_aval(self) -> HiType:
assert False, "must override"
# the next two are required if this type is itself a tangent type
def vspace_zero(self) -> HiVal:
assert False, "must override"
def vspace_add(self, x: HiVal, y: HiVal) -> HiVal:
assert False, "must override"
|
HiType
|
python
|
fluentpython__example-code-2e
|
24-class-metaprog/persistent/dblib.py
|
{
"start": 727,
"end": 963
}
|
class ____(Exception):
"""Query returned more than 1 row."""
SQLType = str
TypeMap = dict[type, SQLType]
SQL_TYPES: TypeMap = {
int: 'INTEGER',
str: 'TEXT',
float: 'REAL',
bytes: 'BLOB',
}
|
UnexpectedMultipleResults
|
python
|
python-pillow__Pillow
|
Tests/test_file_avif.py
|
{
"start": 1708,
"end": 2355
}
|
class ____:
def test_unsupported(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(AvifImagePlugin, "SUPPORTED", False)
with pytest.raises(UnidentifiedImageError):
with pytest.warns(UserWarning, match="AVIF support not installed"):
with Image.open(TEST_AVIF_FILE):
pass
def test_unsupported_open(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(AvifImagePlugin, "SUPPORTED", False)
with pytest.raises(SyntaxError):
AvifImagePlugin.AvifImageFile(TEST_AVIF_FILE)
@skip_unless_feature("avif")
|
TestUnsupportedAvif
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/_client.py
|
{
"start": 20844,
"end": 21875
}
|
class ____:
_client: Anthropic
def __init__(self, client: Anthropic) -> None:
self._client = client
@cached_property
def completions(self) -> completions.CompletionsWithStreamingResponse:
from .resources.completions import CompletionsWithStreamingResponse
return CompletionsWithStreamingResponse(self._client.completions)
@cached_property
def messages(self) -> messages.MessagesWithStreamingResponse:
from .resources.messages import MessagesWithStreamingResponse
return MessagesWithStreamingResponse(self._client.messages)
@cached_property
def models(self) -> models.ModelsWithStreamingResponse:
from .resources.models import ModelsWithStreamingResponse
return ModelsWithStreamingResponse(self._client.models)
@cached_property
def beta(self) -> beta.BetaWithStreamingResponse:
from .resources.beta import BetaWithStreamingResponse
return BetaWithStreamingResponse(self._client.beta)
|
AnthropicWithStreamedResponse
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/execution/base.py
|
{
"start": 552,
"end": 4091
}
|
class ____(object):
"""Data that must be available at all points during query execution.
Namely, schema of the type system that is currently executing,
and the fragments defined in the query document"""
__slots__ = 'schema', 'fragments', 'root_value', 'operation', 'variable_values', 'errors', 'context_value', \
'argument_values_cache', 'executor', 'middleware', '_subfields_cache'
def __init__(self, schema, document_ast, root_value, context_value, variable_values, operation_name, executor, middleware):
"""Constructs a ExecutionContext object from the arguments passed
to execute, which we will pass throughout the other execution
methods."""
errors = []
operation = None
fragments = {}
for definition in document_ast.definitions:
if isinstance(definition, ast.OperationDefinition):
if not operation_name and operation:
raise GraphQLError('Must provide operation name if query contains multiple operations.')
if not operation_name or definition.name and definition.name.value == operation_name:
operation = definition
elif isinstance(definition, ast.FragmentDefinition):
fragments[definition.name.value] = definition
else:
raise GraphQLError(
u'GraphQL cannot execute a request containing a {}.'.format(definition.__class__.__name__),
definition
)
if not operation:
if operation_name:
raise GraphQLError(u'Unknown operation named "{}".'.format(operation_name))
else:
raise GraphQLError('Must provide an operation.')
variable_values = get_variable_values(schema, operation.variable_definitions or [], variable_values)
self.schema = schema
self.fragments = fragments
self.root_value = root_value
self.operation = operation
self.variable_values = variable_values
self.errors = errors
self.context_value = context_value
self.argument_values_cache = {}
self.executor = executor
self.middleware = middleware
self._subfields_cache = {}
def get_field_resolver(self, field_resolver):
if not self.middleware:
return field_resolver
return self.middleware.get_field_resolver(field_resolver)
def get_argument_values(self, field_def, field_ast):
k = field_def, field_ast
result = self.argument_values_cache.get(k)
if not result:
result = self.argument_values_cache[k] = get_argument_values(field_def.args, field_ast.arguments,
self.variable_values)
return result
def get_sub_fields(self, return_type, field_asts):
k = return_type, tuple(field_asts)
if k not in self._subfields_cache:
subfield_asts = DefaultOrderedDict(list)
visited_fragment_names = set()
for field_ast in field_asts:
selection_set = field_ast.selection_set
if selection_set:
subfield_asts = collect_fields(
self, return_type, selection_set,
subfield_asts, visited_fragment_names
)
self._subfields_cache[k] = subfield_asts
return self._subfields_cache[k]
|
ExecutionContext
|
python
|
sphinx-doc__sphinx
|
sphinx/domains/cpp/_ast.py
|
{
"start": 37870,
"end": 38815
}
|
class ____(ASTExpression):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTNoexceptExpr):
return NotImplemented
return self.expr == other.expr
def __hash__(self) -> int:
return hash(self.expr)
def _stringify(self, transform: StringifyTransform) -> str:
return 'noexcept(' + transform(self.expr) + ')'
def get_id(self, version: int) -> str:
return 'nx' + self.expr.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('noexcept', 'noexcept')
signode += addnodes.desc_sig_punctuation('(', '(')
self.expr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_punctuation(')', ')')
|
ASTNoexceptExpr
|
python
|
plotly__plotly.py
|
plotly/graph_objs/histogram2dcontour/colorbar/_tickformatstop.py
|
{
"start": 233,
"end": 8569
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram2dcontour.colorbar"
_path_str = "histogram2dcontour.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram2dcontour.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickformatstop
|
python
|
getsentry__sentry
|
src/sentry/hybridcloud/models/orgauthtokenreplica.py
|
{
"start": 521,
"end": 2092
}
|
class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization = FlexibleForeignKey("sentry.Organization", null=False, on_delete=models.CASCADE)
orgauthtoken_id = HybridCloudForeignKey("sentry.OrgAuthToken", null=False, on_delete="cascade")
# The JWT token in hashed form
token_hashed = models.TextField(null=False)
name = models.CharField(max_length=MAX_NAME_LENGTH, null=False, blank=False)
scope_list = ArrayField(models.TextField(), default=list)
created_by_id = HybridCloudForeignKey(
"sentry.User", null=True, blank=True, on_delete="set_null"
)
date_added = models.DateTimeField(default=timezone.now, null=False)
date_deactivated = models.DateTimeField(null=True, blank=True)
class Meta:
app_label = "hybridcloud"
db_table = "hybridcloud_orgauthtokenreplica"
indexes = (models.Index(fields=["token_hashed"]),)
__repr__ = sane_repr("organization_id", "token_hashed")
def __str__(self) -> str:
return force_str(self.token_hashed)
@property
def entity_id(self) -> int:
return self.orgauthtoken_id
def get_audit_log_data(self) -> dict[str, Any]:
return {"name": self.name, "scopes": self.get_scopes()}
def get_allowed_origins(self) -> list[str]:
return []
def get_scopes(self) -> list[str]:
return self.scope_list
def has_scope(self, scope: str) -> bool:
return scope in self.get_scopes()
def is_active(self) -> bool:
return self.date_deactivated is None
|
OrgAuthTokenReplica
|
python
|
pydata__xarray
|
xarray/tests/test_ufuncs.py
|
{
"start": 6411,
"end": 6524
}
|
class ____(DuckArray):
def __array_namespace__(self, *, api_version=None):
return DuckArray2
|
DuckArray2
|
python
|
jina-ai__jina
|
tests/unit/orchestrate/flow/flow-orchestrate/test_ndarray_type.py
|
{
"start": 708,
"end": 2356
}
|
class ____(Executor):
@requests
def check_nparray(self, docs, **kwargs):
embedding_is_nparray = True
tensor_is_nparray = True
for doc in docs:
embedding_is_nparray = embedding_is_nparray and isinstance(
doc.embedding, np.ndarray
)
tensor_is_nparray = tensor_is_nparray and isinstance(doc.tensor, np.ndarray)
for doc in docs:
doc.tags['nparraycheck_embedding'] = embedding_is_nparray
doc.tags['nparraycheck_tensor'] = tensor_is_nparray
@pytest.fixture()
def linear_flow():
f = (
Flow()
.add(uses=MyExec, output_array_type='numpy')
.add(uses=NparrayInEec, output_array_type='list')
.add(uses=ListInExec)
)
return f
def test_array_conversion(linear_flow):
docs = DocumentArray.empty(5)
for doc in docs:
doc.embedding = torch.tensor(np.random.randn(5))
doc.tensor = torch.tensor(np.random.randn(3, 3))
with linear_flow as f:
resp = f.post(on='/foo', inputs=docs)
for doc in resp:
assert doc.tags['nparraycheck_embedding']
assert doc.tags['nparraycheck_tensor']
assert doc.tags['listcheck_embedding']
assert doc.tags['listcheck_tensor']
def test_empty_arrays(linear_flow):
docs = DocumentArray.empty(5)
with linear_flow as f:
resp = f.post(on='/foo', inputs=docs)
for doc in resp:
assert not doc.tags['listcheck_embedding']
assert not doc.tags['listcheck_tensor']
assert not doc.tags['nparraycheck_embedding']
assert not doc.tags['nparraycheck_tensor']
|
NparrayInEec
|
python
|
great-expectations__great_expectations
|
tests/data_context/fixtures/plugins/extended_checkpoint.py
|
{
"start": 579,
"end": 1009
}
|
class ____(LegacyCheckpoint):
def __init__(
self,
name: str,
data_context,
expectation_suite_name: Optional[str] = None,
action_list: Optional[List[dict]] = None,
):
super().__init__(
name=name,
data_context=data_context,
expectation_suite_name=expectation_suite_name,
action_list=action_list,
)
|
ExtendedLegacyCheckpoint
|
python
|
qdrant__qdrant-client
|
tools/async_client_generator/transformers/client/function_def_transformer.py
|
{
"start": 119,
"end": 1052
}
|
class ____(FunctionDefTransformer):
def __init__(
self,
keep_sync: Optional[list[str]] = None,
class_replace_map: Optional[dict[str, str]] = None,
exclude_methods: Optional[list[str]] = None,
async_methods: Optional[list[str]] = None,
):
super().__init__(keep_sync)
self.class_replace_map = class_replace_map if class_replace_map is not None else {}
self.exclude_methods = exclude_methods if exclude_methods is not None else []
self.async_methods = async_methods if async_methods is not None else []
def _keep_sync(self, name: str) -> bool:
return name in self.keep_sync or name not in self.async_methods
def visit_FunctionDef(self, sync_node: ast.FunctionDef) -> Optional[ast.AST]:
if sync_node.name in self.exclude_methods:
return None
return super().visit_FunctionDef(sync_node)
|
ClientFunctionDefTransformer
|
python
|
wandb__wandb
|
wandb/sdk/data_types/video.py
|
{
"start": 2141,
"end": 10482
}
|
class ____(BatchableMedia):
"""A class for logging videos to W&B."""
_log_type = "video-file"
EXTS = ("gif", "mp4", "webm", "ogg")
_width: Optional[int]
_height: Optional[int]
def __init__(
self,
data_or_path: Union[str, pathlib.Path, "np.ndarray", "TextIO", "BytesIO"],
caption: Optional[str] = None,
fps: Optional[int] = None,
format: Optional[Literal["gif", "mp4", "webm", "ogg"]] = None,
):
"""Initialize a W&B Video object.
Args:
data_or_path: Video can be initialized with a path to a file or an io object.
Video can be initialized with a numpy tensor. The numpy tensor
must be either 4 dimensional or 5 dimensional.
The dimensions should be (number of frames, channel, height, width) or
(batch, number of frames, channel, height, width)
The format parameter must be specified with the format argument
when initializing with a numpy array
or io object.
caption: Caption associated with the video for display.
fps: The frame rate to use when encoding raw video frames.
Default value is 4.
This parameter has no effect when data_or_path is a string, or bytes.
format: Format of video, necessary if initializing with a numpy array
or io object. This parameter will be used to determine the format
to use when encoding the video data. Accepted values are "gif",
"mp4", "webm", or "ogg".
If no value is provided, the default format will be "gif".
Examples:
Log a numpy array as a video
```python
import numpy as np
import wandb
with wandb.init() as run:
# axes are (number of frames, channel, height, width)
frames = np.random.randint(
low=0, high=256, size=(10, 3, 100, 100), dtype=np.uint8
)
run.log({"video": wandb.Video(frames, format="mp4", fps=4)})
```
"""
super().__init__(caption=caption)
if format is None:
wandb.termwarn(
"`format` argument was not provided, defaulting to `gif`. "
"This parameter will be required in v0.20.0, "
"please specify the format explicitly."
)
self._format = format or "gif"
self._width = None
self._height = None
self._channels = None
if self._format not in Video.EXTS:
raise ValueError(
"wandb.Video accepts {} formats".format(", ".join(Video.EXTS))
)
if isinstance(data_or_path, (BytesIO, str)) and fps:
msg = (
"`fps` argument does not affect the frame rate of the video "
"when providing a file path or raw bytes."
)
wandb.termwarn(msg)
if isinstance(data_or_path, BytesIO):
filename = os.path.join(
MEDIA_TMP.name, runid.generate_id() + "." + self._format
)
with open(filename, "wb") as f:
f.write(data_or_path.read())
self._set_file(filename, is_tmp=True)
elif isinstance(data_or_path, (str, pathlib.Path)):
data_or_path = str(data_or_path)
_, ext = os.path.splitext(data_or_path)
ext = ext[1:].lower()
if ext not in Video.EXTS:
raise ValueError(
"wandb.Video accepts {} formats".format(", ".join(Video.EXTS))
)
self._set_file(data_or_path, is_tmp=False)
# ffprobe -v error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 data_or_path
else:
if hasattr(data_or_path, "numpy"): # TF data eager tensors
self.data = data_or_path.numpy()
elif util.is_numpy_array(data_or_path):
self.data = data_or_path
else:
raise ValueError(
"wandb.Video accepts a file path or numpy like data as input"
)
fps = fps or 4
if _should_print_spinner():
printer_asyncio.run_async_with_spinner(
printer.new_printer(),
"Encoding video...",
functools.partial(self.encode, fps=fps),
)
else:
self.encode(fps=fps)
def encode(self, fps: int = 4) -> None:
"""Encode the video data to a file.
<!-- lazydoc-ignore: internal -->
"""
# import ImageSequenceClip from the appropriate MoviePy module
mpy = util.get_module(
"moviepy.video.io.ImageSequenceClip",
required='wandb.Video requires moviepy when passing raw data. Install with "pip install wandb[media]"',
)
tensor = self._prepare_video(self.data)
_, self._height, self._width, self._channels = tensor.shape # type: ignore
# encode sequence of images into gif string
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
filename = os.path.join(
MEDIA_TMP.name, runid.generate_id() + "." + self._format
)
if self._format == "gif":
write_gif_with_image_io(clip, filename)
else:
clip.write_videofile(filename, logger=None)
self._set_file(filename, is_tmp=True)
@classmethod
def get_media_subdir(cls: Type["Video"]) -> str:
"""Get media subdirectory for video files.
<!-- lazydoc-ignore-classmethod: internal -->
"""
return os.path.join("media", "videos")
def to_json(self, run_or_artifact: Union["LocalRun", "Artifact"]) -> dict:
"""Returns the JSON representation expected by the backend.
<!-- lazydoc-ignore: internal -->
"""
json_dict = super().to_json(run_or_artifact)
json_dict["_type"] = self._log_type
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
return json_dict
def _prepare_video(self, video: "np.ndarray") -> "np.ndarray":
"""This logic was mostly taken from tensorboardX."""
np = util.get_module(
"numpy",
required='wandb.Video requires numpy when passing raw data. To get it, run "pip install numpy".',
)
if video.ndim < 4:
raise ValueError(
"Video must be at least 4 dimensions: time, channels, height, width"
)
if video.ndim == 4:
video = video.reshape(1, *video.shape)
b, t, c, h, w = video.shape
if video.dtype != np.uint8:
logging.warning("Converting video data to uint8")
video = video.astype(np.uint8)
def is_power2(num: int) -> bool:
return num != 0 and ((num & (num - 1)) == 0)
# pad to nearest power of 2, all at once
if not is_power2(video.shape[0]):
len_addition = int(2 ** video.shape[0].bit_length() - video.shape[0])
video = np.concatenate(
(video, np.zeros(shape=(len_addition, t, c, h, w))), axis=0
)
n_rows = 2 ** ((b.bit_length() - 1) // 2)
n_cols = video.shape[0] // n_rows
video = video.reshape(n_rows, n_cols, t, c, h, w)
video = np.transpose(video, axes=(2, 0, 4, 1, 5, 3))
video = video.reshape(t, n_rows * h, n_cols * w, c)
return video
@classmethod
def seq_to_json(
cls: Type["Video"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""Convert a sequence of Video objects to a JSON representation.
<!-- lazydoc-ignore-classmethod: internal -->
"""
base_path = os.path.join(run.dir, cls.get_media_subdir())
filesystem.mkdir_exists_ok(base_path)
meta = {
"_type": "videos",
"count": len(seq),
"videos": [v.to_json(run) for v in seq],
"captions": Video.captions(seq),
}
return meta
|
Video
|
python
|
huggingface__transformers
|
tests/models/pix2struct/test_modeling_pix2struct.py
|
{
"start": 12824,
"end": 14809
}
|
class ____:
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
if text_kwargs is None:
text_kwargs = {}
if vision_kwargs is None:
vision_kwargs = {}
self.parent = parent
self.text_model_tester = Pix2StructTextModelTester(parent, **text_kwargs)
self.vision_model_tester = Pix2StructVisionModelTester(parent, **vision_kwargs)
self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test
self.seq_length = self.text_model_tester.seq_length # need seq_length for common tests
self.is_training = is_training
self.max_patches = self.vision_model_tester.max_patches
def prepare_config_and_inputs(self):
text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
vision_config, flattened_patches = self.vision_model_tester.prepare_config_and_inputs()
config = self.get_config(text_config, vision_config)
return config, input_ids, attention_mask, flattened_patches
def get_config(self, text_config, vision_config):
return Pix2StructConfig(
text_config=self.text_model_tester.get_config().to_dict(),
vision_config=self.vision_model_tester.get_config().to_dict(),
projection_dim=64,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, decoder_attention_mask, flattened_patches = config_and_inputs
attention_mask = (flattened_patches.sum(dim=-1) != 0).float()
inputs_dict = {
"decoder_input_ids": input_ids,
"labels": input_ids,
"decoder_attention_mask": decoder_attention_mask,
"flattened_patches": flattened_patches,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
|
Pix2StructModelTester
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v/modeling_glm4v.py
|
{
"start": 31327,
"end": 36091
}
|
class ____(Glm4vPreTrainedModel):
config: Glm4vVisionConfig
input_modalities = ("image", "video")
_no_split_modules = ["Glm4vVisionBlock"]
def __init__(self, config) -> None:
super().__init__(config)
self.spatial_merge_size = config.spatial_merge_size
self.patch_size = config.patch_size
self.embeddings = Glm4vVisionEmbeddings(config)
self.patch_embed = Glm4vVisionPatchEmbed(config)
head_dim = config.hidden_size // config.num_heads
self.rotary_pos_emb = Glm4vVisionRotaryEmbedding(head_dim // 2)
self.blocks = nn.ModuleList([Glm4vVisionBlock(config) for _ in range(config.depth)])
self.merger = Glm4vVisionPatchMerger(
dim=config.out_hidden_size, context_dim=config.intermediate_size, hidden_act=config.hidden_act
)
self.post_conv_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.downsample = nn.Conv2d(
in_channels=config.hidden_size,
out_channels=config.out_hidden_size,
kernel_size=config.spatial_merge_size,
stride=config.spatial_merge_size,
)
self.post_layernorm = Glm4vRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.post_init()
def rot_pos_emb(self, grid_thw):
pos_ids = []
for t, h, w in grid_thw:
hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
hpos_ids = hpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
hpos_ids = hpos_ids.permute(0, 2, 1, 3)
hpos_ids = hpos_ids.flatten()
wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
wpos_ids = wpos_ids.reshape(
h // self.spatial_merge_size,
self.spatial_merge_size,
w // self.spatial_merge_size,
self.spatial_merge_size,
)
wpos_ids = wpos_ids.permute(0, 2, 1, 3)
wpos_ids = wpos_ids.flatten()
pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
pos_ids = torch.cat(pos_ids, dim=0)
max_grid_size = grid_thw[:, 1:].max()
rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
return rotary_pos_emb, pos_ids
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor) -> torch.Tensor:
"""
Args:
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
hidden_states = self.post_conv_layernorm(hidden_states)
rotary_pos_emb, image_type_ids = self.rot_pos_emb(grid_thw)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
hidden_states = self.embeddings(hidden_states, seqlens, grid_thw, image_type_ids[:, 0], image_type_ids[:, 1])
for blk in self.blocks:
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
)
hidden_states = self.post_layernorm(hidden_states)
hidden_states = hidden_states.view(
-1, self.spatial_merge_size, self.spatial_merge_size, hidden_states.shape[-1]
)
hidden_states = hidden_states.permute(0, 3, 1, 2)
hidden_states = self.downsample(hidden_states).view(-1, self.config.out_hidden_size)
hidden_states = self.merger(hidden_states)
return hidden_states
@auto_docstring
|
Glm4vVisionModel
|
python
|
astropy__astropy
|
astropy/visualization/wcsaxes/tests/test_images.py
|
{
"start": 1735,
"end": 48119
}
|
class ____(BaseImageTests):
@figure_test
def test_tight_layout(self):
# Check that tight_layout works on a WCSAxes.
fig = Figure(figsize=(8, 6))
canvas = FigureCanvasAgg(fig)
for i in (1, 2):
fig.add_subplot(2, 1, i, projection=WCS(self.msx_header))
fig.tight_layout()
canvas.draw()
return fig
@figure_test
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
canvas.draw()
return fig
@figure_test
def test_axes_off(self):
# Test for turning the axes off
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header))
ax.imshow(np.arange(12).reshape((3, 4)))
ax.set_axis_off()
canvas.draw()
return fig
@figure_test
@pytest.mark.parametrize("axisbelow", [True, False, "line"])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
ax.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30.0, 50.0), 60.0, 50.0, facecolor="green", edgecolor="red")
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
canvas.draw()
return fig
@figure_test
def test_contour_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(
data,
transform=ax.get_transform(wcs_msx),
colors="orange",
levels=[2.5e-5, 5e-5, 1.0e-4],
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_contourf_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(
data, transform=ax.get_transform(wcs_msx), levels=[2.5e-5, 5e-5, 1.0e-4]
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.25, 0.25, 0.65, 0.65], projection=WCS(self.msx_header), aspect="equal"
)
# Change the format of the ticks
ax.coords[0].set_major_formatter("dd:mm:ss")
ax.coords[1].set_major_formatter("dd:mm:ss.ssss")
# Overlay grid on image
ax.grid(color="red", alpha=1.0, lw=1, linestyle="dashed")
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords["glon"].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords["glat"].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords["glon"].set_axislabel("Galactic Longitude", minpad=1.6)
ax.coords["glat"].set_axislabel("Galactic Latitude", minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color("red")
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == "red"
assert ax.coords.frame.get_linewidth() == 2
canvas.draw()
return fig
@figure_test
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = Figure(figsize=(8, 8))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.rosat_header), aspect="equal"
)
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color="black", alpha=1.0, lw=1, linestyle="dashed")
p = Circle((300, 100), radius=40, ec="yellow", fc="none")
ax.add_patch(p)
p = Circle(
(30.0, 20.0),
radius=20.0,
ec="orange",
fc="none",
transform=ax.get_transform("world"),
)
ax.add_patch(p)
p = Circle(
(60.0, 50.0),
radius=20.0,
ec="red",
fc="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(p)
p = Circle(
(40.0, 60.0),
radius=20.0,
ec="green",
fc="none",
transform=ax.get_transform("galactic"),
)
ax.add_patch(p)
canvas.draw()
return fig
@figure_test
def test_cube_slice_image(self):
# Test for cube slicing
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel("Velocity m/s")
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[0].grid(grid_type="contours", color="purple", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="orange", linestyle="solid")
ax.coords[2].grid(grid_type="contours", color="red", linestyle="solid")
canvas.draw()
return fig
@figure_test
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=("x", "y", 50),
aspect="equal",
)
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type="contours", color="blue", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="red", linestyle="solid")
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_plot_coord(self):
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
lines = ax.plot_coord(c, "o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(lines, list)
assert isinstance(lines[0], matplotlib.lines.Line2D)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_scatter_coord(self):
from matplotlib.collections import PathCollection
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
sc = ax.scatter_coord(c, marker="o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(sc, PathCollection)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_text_coord(self):
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
text = ax.text_coord(c, "Sample Label", color="blue", ha="right", va="top")
# Test that plot_coord returns the results from ax.text
assert isinstance(text, matplotlib.text.Text)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_plot_line(self):
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_major_formatter("x.xx")
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel("Velocity km/s")
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
canvas.draw()
return fig
@figure_test
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
canvas.draw()
return fig
@figure_test
def test_ticks_labels(self):
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color="blue", alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color="red", alpha=0.9, width=1)
ax.coords[0].set_ticks_position("all")
ax.coords[1].set_ticks_position("all")
ax.coords[0].set_axislabel("X-axis", size=20)
ax.coords[1].set_axislabel(
"Y-axis",
color="green",
size=25,
weight="regular",
style="normal",
family="cmtt10",
)
ax.coords[0].set_axislabel_position("t")
ax.coords[1].set_axislabel_position("r")
ax.coords[0].set_ticklabel(
color="purple",
size=15,
alpha=1,
weight="light",
style="normal",
family="cmss10",
)
ax.coords[1].set_ticklabel(
color="black", size=18, alpha=0.9, weight="bold", family="cmr10"
)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("r")
canvas.draw()
return fig
@figure_test
def test_no_ticks(self):
# Check that setting no ticks works
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks(number=0)
ax.coords[0].grid(True)
canvas.draw()
return fig
@figure_test
def test_rcparams(self):
# Test custom rcParams
with rc_context(
{
"axes.labelcolor": "purple",
"axes.labelsize": 14,
"axes.labelweight": "bold",
"axes.linewidth": 3,
"axes.facecolor": "0.5",
"axes.edgecolor": "green",
"xtick.color": "red",
"xtick.labelsize": 8,
"xtick.direction": "in",
"xtick.minor.visible": True,
"xtick.minor.size": 5,
"xtick.major.size": 20,
"xtick.major.width": 3,
"xtick.major.pad": 10,
"grid.color": "blue",
"grid.linestyle": ":",
"grid.linewidth": 1,
"grid.alpha": 0.5,
}
):
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
canvas.draw()
return fig
@figure_test
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = Figure(figsize=(6, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
canvas.draw()
return fig
@figure_test
def test_set_coord_type(self):
# Test for setting coord_type
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.2, 0.2, 0.6, 0.6], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type("scalar")
ax.coords[1].set_coord_type("scalar")
ax.coords[0].set_major_formatter("x.xxx")
ax.coords[1].set_major_formatter("x.xxx")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
canvas.draw()
return fig
@figure_test
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("all")
ax.coords[0].set_axislabel_position("b")
ax.coords[1].set_axislabel_position("l")
canvas.draw()
return fig
@figure_test
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[1].set_ticklabel_visible(False)
canvas.draw()
return fig
@figure_test(savefig_kwargs={"bbox_inches": "tight"})
def test_noncelestial_angular(self, tmp_path):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["solar-x", "solar-y"]
wcs.wcs.cunit = ["arcsec", "arcsec"]
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin="lower")
ax.coords[0].set_coord_type("longitude", coord_wrap=180 * u.deg)
ax.coords[1].set_coord_type("latitude")
ax.coords[0].set_major_formatter("s.s")
ax.coords[1].set_major_formatter("s.s")
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color="white", ls="solid")
# Force drawing (needed for format_coord)
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
canvas.draw()
return fig
@figure_test
def test_patches_distortion(self, tmp_path):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
# Pixel coordinates
r = Rectangle((30.0, 50.0), 60.0, 50.0, edgecolor="green", facecolor="none")
ax.add_patch(r)
# FK5 coordinates
r = Rectangle(
(266.4, -28.9),
0.3,
0.3,
edgecolor="cyan",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r)
# FK5 coordinates
c = Circle(
(266.4, -29.1),
0.15,
edgecolor="magenta",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(c)
# Pixel coordinates
ax.scatter(
[40, 100, 130],
[30, 130, 60],
s=100,
edgecolor="red",
facecolor=(1, 0, 0, 0.5),
)
# World coordinates (should not be distorted)
ax.scatter(
266.78238,
-28.769255,
transform=ax.get_transform("fk5"),
s=300,
edgecolor="red",
facecolor="none",
)
# World coordinates (should not be distorted)
r1 = SphericalCircle(
(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r1)
r2 = SphericalCircle(
SkyCoord(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
with pytest.warns(
AstropyUserWarning,
match="Received `center` of representation type "
"<class 'astropy.coordinates.*CartesianRepresentation'> "
"will be converted to SphericalRepresentation",
):
r3 = SphericalCircle(
SkyCoord(
x=-0.05486461,
y=-0.87204803,
z=-0.48633538,
representation_type="cartesian",
),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
# Test to verify that SphericalCircle works irrespective of whether
# the input(center) is a tuple or a SkyCoord object.
assert (r1.get_xy() == r2.get_xy()).all()
assert np.allclose(r1.get_xy(), r3.get_xy())
assert np.allclose(r2.get_xy()[0], [266.4, -29.25])
canvas.draw()
return fig
@figure_test
def test_quadrangle(self, tmp_path):
# Test that Quadrangle can have curved edges while Rectangle does not
wcs = WCS(self.msx_header)
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
ax.set_xlim(0, 10000)
ax.set_ylim(-10000, 0)
# Add a quadrangle patch (100 degrees by 20 degrees)
q = Quadrangle(
(255, -70) * u.deg,
100 * u.deg,
20 * u.deg,
label="Quadrangle",
edgecolor="blue",
facecolor="none",
transform=ax.get_transform("icrs"),
)
ax.add_patch(q)
# Add a rectangle patch (100 degrees by 20 degrees)
r = Rectangle(
(255, -70),
100,
20,
label="Rectangle",
edgecolor="red",
facecolor="none",
linestyle="--",
transform=ax.get_transform("icrs"),
)
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
canvas.draw()
return fig
@figure_test
def test_beam_shape_from_args(self, tmp_path):
# Test for adding the beam shape with the beam parameters as arguments
wcs = WCS(self.msx_header)
fig = Figure(figsize=(4, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(
ax,
major=2 * u.arcmin,
minor=1 * u.arcmin,
angle=-30 * u.degree,
corner="bottom right",
frame=True,
borderpad=0.0,
pad=1.0,
color="black",
)
canvas.draw()
return fig
@figure_test
def test_beam_shape_from_header(self, tmp_path):
# Test for adding the beam shape with the beam parameters from a header
hdr = self.msx_header
hdr["BMAJ"] = (2 * u.arcmin).to(u.degree).value
hdr["BMIN"] = (1 * u.arcmin).to(u.degree).value
hdr["BPA"] = 30.0
wcs = WCS(hdr)
fig = Figure(figsize=(4, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(ax, header=hdr)
canvas.draw()
return fig
@figure_test
def test_scalebar(self, tmp_path):
# Test for adding a scale bar
wcs = WCS(self.msx_header)
fig = Figure(figsize=(4, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_scalebar(
ax,
2 * u.arcmin,
label="2'",
corner="top right",
borderpad=1.0,
label_top=True,
)
canvas.draw()
return fig
@figure_test
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = Figure(figsize=(5, 3))
canvas = FigureCanvasAgg(fig)
fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
canvas.draw()
return fig
@figure_test
def test_hms_labels(self):
# This tests the appearance of the hms superscripts in tick labels
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
canvas.draw()
return fig
@figure_test(style={"text.usetex": True})
def test_latex_labels(self):
fig = Figure(figsize=(3, 3))
canvas = FigureCanvasAgg(fig)
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
canvas.draw()
return fig
@figure_test
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ["lon", "lat"]
fig = Figure(figsize=(6, 6))
canvas = FigureCanvasAgg(fig)
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
ax.tick_params(
direction="in",
length=20,
width=5,
pad=6,
labelsize=6,
color="red",
labelcolor="blue",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
ax.tick_params(
axis=0,
direction="in",
length=20,
width=5,
pad=4,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
ax.tick_params(
axis="lat",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(
axis="x",
direction="in",
length=20,
width=5,
pad=20,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
ax.tick_params(
axis="y",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
ax.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(
length=4,
pad=2,
colors="orange",
labelbottom=True,
labeltop=True,
labelsize=10,
)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which="minor", length=6)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
canvas.draw()
return fig
@pytest.fixture
def wave_wcs_1d():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.cunit = ["m"]
wcs.wcs.crpix = [1]
wcs.wcs.cdelt = [5]
wcs.wcs.crval = [45]
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_1d_wcs(wave_wcs_1d):
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.set_xlabel("this is the x-axis")
ax.set_ylabel("this is the y-axis")
canvas.draw()
return fig
@figure_test
def test_1d_plot_1d_wcs_format_unit(wave_wcs_1d):
"""
This test ensures that the format unit is updated and displayed for both
the axis ticks and default axis labels.
"""
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.coords[0].set_format_unit("nm")
canvas.draw()
return fig
@figure_test
def test_1d_plot_1d_wcs_get_transform(wave_wcs_1d):
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
ax.plot([100, 200, 300], [2, 3, 2], transform=ax.get_transform("world"))
return fig
@pytest.fixture
def spatial_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [15] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_2d_wcs_correlated(spatial_wcs_2d):
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d, slices=("x", 0))
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
ax.coords["glon"].set_ticks(color="red")
ax.coords["glon"].set_ticklabel(color="red")
ax.coords["glon"].grid(color="red")
ax.coords["glat"].set_ticks(color="blue")
ax.coords["glat"].set_ticklabel(color="blue")
ax.coords["glat"].grid(color="blue")
canvas.draw()
return fig
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [10 / 3600, 5 / 3600]
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
@pytest.mark.parametrize(
"slices, bottom_axis",
[
# Remember SLLWCS takes slices in array order
(np.s_[0, :], "custom:pos.helioprojective.lon"),
(np.s_[:, 0], "custom:pos.helioprojective.lat"),
],
)
@figure_test
def test_1d_plot_1d_sliced_low_level_wcs(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
Test that a SLLWCS through a coupled 2D WCS plots as line OK.
"""
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle[slices])
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
canvas.draw()
assert ax.coords[bottom_axis].get_ticks_position() == ["b", "#"]
return fig
@pytest.mark.parametrize(
"slices, bottom_axis", [(("x", 0), "hpln"), ((0, "x"), "hplt")]
)
@figure_test
def test_1d_plot_put_varying_axis_on_bottom_lon(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
When we plot a 1D slice through spatial axes, we want to put the axis which
actually changes on the bottom.
For example an aligned wcs, pixel grid where you plot a lon slice through a
lat axis, you would end up with no ticks on the bottom as the lon doesn't
change, and a set of lat ticks on the top because it does but it's the
correlated axis not the actual one you are plotting against.
"""
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle, slices=slices)
ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
canvas.draw()
assert ax.coords[bottom_axis].get_ticks_position() == ["b", "#"]
return fig
@figure_test
def test_allsky_labels_wrap():
# Regression test for a bug that caused some tick labels to not be shown
# when looking at all-sky maps in the case where coord_wrap < 360
fig = Figure(figsize=(4, 4))
canvas = FigureCanvasAgg(fig)
icen = 0
for ctype in [("GLON-CAR", "GLAT-CAR"), ("HGLN-CAR", "HGLT-CAR")]:
for cen in [0, 90, 180, 270]:
icen += 1
wcs = WCS(naxis=2)
wcs.wcs.ctype = ctype
wcs.wcs.crval = cen, 0
wcs.wcs.crpix = 360.5, 180.5
wcs.wcs.cdelt = -0.5, 0.5
ax = fig.add_subplot(8, 1, icen, projection=wcs)
ax.set_xlim(-0.5, 719.5)
ax.coords[0].set_ticks(spacing=50 * u.deg)
ax.coords[0].set_ticks_position("b")
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
ax.coords[1].set_ticklabel_visible(False)
ax.coords[1].set_ticks_visible(False)
fig.subplots_adjust(hspace=2, left=0.05, right=0.95, bottom=0.1, top=0.95)
canvas.draw()
return fig
@figure_test
def test_tickable_gridlines():
wcs = WCS(
{
"naxis": 2,
"naxis1": 360,
"naxis2": 180,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": -1,
"cdelt2": 1,
"ctype1": "RA---CAR",
"ctype2": "DEC--CAR",
}
)
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 360 - 0.5)
ax.set_ylim(-0.5, 150 - 0.5)
lon, lat = ax.coords
lon.grid()
lat.grid()
overlay = ax.get_coords_overlay("galactic")
overlay[0].set_ticks(spacing=30 * u.deg)
overlay[1].set_ticks(spacing=30 * u.deg)
# Test both single-character and multi-character names
overlay[1].add_tickable_gridline("g", -30 * u.deg)
overlay[0].add_tickable_gridline("const-glon", 30 * u.deg)
overlay[0].grid(color="magenta")
overlay[0].set_ticklabel_position("gt")
overlay[0].set_ticklabel(color="magenta")
overlay[0].set_axislabel("Galactic longitude", color="magenta")
overlay[1].grid(color="blue")
overlay[1].set_ticklabel_position(("const-glon", "r"))
overlay[1].set_ticklabel(color="blue")
overlay[1].set_axislabel("Galactic latitude", color="blue")
canvas.draw()
return fig
@pytest.fixture
def nondegree_frame():
# Provide a frame where the default units are not degrees for either longitude or latitude
class FakeICRS(ICRS):
frame_specific_representation_info = {
SphericalRepresentation: [
RepresentationMapping("lon", "ra", u.hourangle),
RepresentationMapping("lat", "dec", u.arcmin),
]
}
# We need valid transformations to/from a real frame, and they are just identity transformations
trans1 = StaticMatrixTransform(
np.identity(3), ICRS, FakeICRS, register_graph=frame_transform_graph
)
trans2 = StaticMatrixTransform(
np.identity(3), FakeICRS, ICRS, register_graph=frame_transform_graph
)
yield FakeICRS
# Clean up the transformation graph so that other tests are not affected
trans1.unregister(frame_transform_graph)
trans2.unregister(frame_transform_graph)
@figure_test
def test_overlay_nondegree_unit(nondegree_frame):
wcs = WCS(
{
"CTYPE1": "RA---TAN",
"CTYPE2": "DEC--TAN",
"CDELT1": -1,
"CDELT2": 1,
"CRPIX1": 20 + 0.5,
"CRPIX2": 0 + 0.5,
"CRVAL1": 0,
"CRVAL2": 0,
}
)
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 20 - 0.5)
ax.set_ylim(-0.5, 20 - 0.5)
ax.set_aspect("equal")
ax.coords[0].set_ticks_position("b")
ax.coords[0].grid()
ax.coords[1].set_ticks_position("l")
ax.coords[1].set_ticks(color="b")
ax.coords[1].set_ticklabel(color="b")
ax.coords[1].grid(color="b")
overlay = ax.get_coords_overlay(nondegree_frame())
overlay[0].set_ticks_position("t")
overlay[1].set_ticks_position("r")
overlay[1].set_ticks(color="r")
overlay[1].set_ticklabel(color="r")
overlay[1].grid(color="r", linestyle="dashed")
canvas.draw()
return fig
@figure_test
def test_nosimplify():
wcs = WCS(
{
"ctype1": "RA---CAR",
"ctype2": "DEC--CAR",
"crval1": 0,
"crval2": 0,
"cdelt1": -1,
"cdelt2": 1,
"crpix1": 1,
"crpix2": 1,
}
)
fig = Figure(figsize=(7, 8))
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(projection=wcs)
ax.coords[0].set_ticks(spacing=0.25 * u.hourangle)
ax.coords[0].set_ticklabel(rotation=90, pad=25, simplify=False)
ax.coords[0].set_ticklabel_position("bt")
ax.coords[1].set_ticks(spacing=0.25 * u.deg)
ax.coords[1].set_ticklabel(simplify=False)
ax.coords[1].set_ticklabel_position("lr")
ax.set_xlim(-30, 30)
ax.set_ylim(-2, 2)
ax.set_aspect(15)
ax.grid()
canvas.draw()
return fig
@figure_test
def test_custom_formatter(spatial_wcs_2d_small_angle):
def double_format(value, **kwargs):
if np.iterable(value):
return [f"{(v * 2):.4f}" for v in value]
else:
return f"{(value * 2):.2f}"
def fruit_format(value, **kwargs):
fruits = ["apple", "pear", "banana", "orange", "kiwi", "grape"]
if np.iterable(value):
return (fruits * 10)[: len(value)]
else:
return "apple"
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle)
ax.coords[0].set_major_formatter(double_format)
ax.coords[1].set_major_formatter(fruit_format)
canvas.draw()
return fig
|
TestBasic
|
python
|
getsentry__sentry
|
src/sentry/utils/snuba_rpc.py
|
{
"start": 2452,
"end": 2496
}
|
class ____(SnubaError):
pass
|
SnubaRPCError
|
python
|
getsentry__sentry
|
src/sentry/mail/forms/notify_email.py
|
{
"start": 157,
"end": 392
}
|
class ____(MemberTeamForm[ActionTargetType]):
targetType = forms.ChoiceField(choices=ACTION_CHOICES)
teamValue = ActionTargetType.TEAM
memberValue = ActionTargetType.MEMBER
targetTypeEnum = ActionTargetType
|
NotifyEmailForm
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/spanner.py
|
{
"start": 1422,
"end": 1627
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Spanner Database Link."""
name = "Spanner Database"
key = "spanner_database"
format_str = SPANNER_DATABASE_LINK
|
SpannerDatabaseLink
|
python
|
numba__numba
|
numba/cuda/cudadrv/driver.py
|
{
"start": 69375,
"end": 70847
}
|
class ____(mviewbuf.MemAlloc):
"""A pointer to a pinned buffer on the host.
:param context: The context in which the pointer was mapped.
:type context: Context
:param owner: The object owning the memory. For EMM plugin implementation,
this ca
:param pointer: The address of the buffer.
:type pointer: ctypes.c_void_p
:param size: The size of the buffer in bytes.
:type size: int
:param owner: An object owning the buffer that has been pinned. For EMM
plugin implementation, the default of ``None`` suffices for
memory allocated in ``memhostalloc`` - for ``mempin``, it
should be the owner passed in to the ``mempin`` method.
:param finalizer: A function that is called when the buffer is to be freed.
:type finalizer: function
"""
def __init__(self, context, pointer, size, owner=None, finalizer=None):
self.context = context
self.owned = owner
self.size = size
self.host_pointer = pointer
self.is_managed = finalizer is not None
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
if USE_NV_BINDING:
self._bufptr_ = self.host_pointer
else:
self._bufptr_ = self.host_pointer.value
if finalizer is not None:
weakref.finalize(self, finalizer)
def own(self):
return self
|
PinnedMemory
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 53393,
"end": 54442
}
|
class ____(BaseModel):
"""
Schema for Human-in-the-loop detail history.
"""
options: Annotated[list[str], Field(min_length=1, title="Options")]
subject: Annotated[str, Field(title="Subject")]
body: Annotated[str | None, Field(title="Body")] = None
defaults: Annotated[list[str] | None, Field(title="Defaults")] = None
multiple: Annotated[bool | None, Field(title="Multiple")] = False
params: Annotated[dict[str, Any] | None, Field(title="Params")] = None
assigned_users: Annotated[list[HITLUser] | None, Field(title="Assigned Users")] = None
created_at: Annotated[datetime, Field(title="Created At")]
responded_by_user: HITLUser | None = None
responded_at: Annotated[datetime | None, Field(title="Responded At")] = None
chosen_options: Annotated[list[str] | None, Field(title="Chosen Options")] = None
params_input: Annotated[dict[str, Any] | None, Field(title="Params Input")] = None
response_received: Annotated[bool | None, Field(title="Response Received")] = False
|
HITLDetailHistory
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
|
{
"start": 18932,
"end": 19222
}
|
class ____(graphene.ObjectType):
class Meta:
interfaces = (
GrapheneMessageEvent,
GrapheneDisplayableEvent,
GrapheneStepEvent,
GrapheneMarkerEvent,
)
name = "ResourceInitStartedEvent"
|
GrapheneResourceInitStartedEvent
|
python
|
doocs__leetcode
|
solution/2800-2899/2809.Minimum Time to Make Array Sum At Most x/Solution2.py
|
{
"start": 0,
"end": 465
}
|
class ____:
def minimumTime(self, nums1: List[int], nums2: List[int], x: int) -> int:
n = len(nums1)
f = [0] * (n + 1)
for a, b in sorted(zip(nums1, nums2), key=lambda z: z[1]):
for j in range(n, 0, -1):
f[j] = max(f[j], f[j - 1] + a + b * j)
s1 = sum(nums1)
s2 = sum(nums2)
for j in range(n + 1):
if s1 + s2 * j - f[j] <= x:
return j
return -1
|
Solution
|
python
|
doocs__leetcode
|
solution/0800-0899/0813.Largest Sum of Averages/Solution2.py
|
{
"start": 0,
"end": 459
}
|
class ____:
def largestSumOfAverages(self, nums: List[int], k: int) -> float:
n = len(nums)
f = [[0] * (k + 1) for _ in range(n + 1)]
s = list(accumulate(nums, initial=0))
for i in range(1, n + 1):
f[i][1] = s[i] / i
for j in range(2, min(i + 1, k + 1)):
for h in range(i):
f[i][j] = max(f[i][j], f[h][j - 1] + (s[i] - s[h]) / (i - h))
return f[n][k]
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 557651,
"end": 558130
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteProject"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "owner")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
owner = sgqlc.types.Field(ProjectOwner, graphql_name="owner")
"""The repository or organization the project was removed from."""
|
DeleteProjectPayload
|
python
|
Netflix__metaflow
|
metaflow/_vendor/packaging/specifiers.py
|
{
"start": 25567,
"end": 39080
}
|
class ____(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
def __init__(
self, specifiers: str = "", prereleases: Optional[bool] = None
) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
# Split on `,` to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier.
parsed: Set[Specifier] = set()
for specifier in split_specifiers:
parsed.add(Specifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> Optional[bool]:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value: bool) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
<SpecifierSet('!=2.0.0,>=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<SpecifierSet({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: Optional[bool] = None,
installed: Optional[bool] = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it uses :attr:`prereleases` to determine
whether or not prereleases are allowed.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
# Ensure that our item is a Version instance.
if not isinstance(item, Version):
item = Version(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
if installed and item.is_prerelease:
item = Version(item.base_version)
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: Optional[bool] = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will be intelligently decide whether to allow
prereleases or not (based on the :attr:`prereleases` attribute, and
whether the only versions matching are prereleases).
This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
because it implements the rule from :pep:`440` that a prerelease item
SHOULD be accepted if no other versions match the given specifier.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', <Version('1.4')>]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
[]
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iter(iterable)
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases.
else:
filtered: List[UnparsedVersionVar] = []
found_prereleases: List[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return iter(found_prereleases)
return iter(filtered)
|
SpecifierSet
|
python
|
PyCQA__pydocstyle
|
src/pydocstyle/config.py
|
{
"start": 33218,
"end": 33488
}
|
class ____(Exception):
"""An exception for illegal configurations."""
pass
# General configurations for pydocstyle run.
RunConfiguration = namedtuple(
'RunConfiguration',
('explain', 'source', 'debug', 'verbose', 'count', 'config'),
)
|
IllegalConfiguration
|
python
|
huggingface__transformers
|
src/transformers/models/vivit/modeling_vivit.py
|
{
"start": 12502,
"end": 13755
}
|
class ____(GradientCheckpointingLayer):
"""This corresponds to the EncoderBlock class in the scenic/vivit implementation."""
def __init__(self, config: VivitConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = VivitAttention(config)
self.intermediate = VivitIntermediate(config)
self.output = VivitOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm)
# first residual connection
hidden_states = attention_output + hidden_states
# in Vivit, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
return layer_output
|
VivitLayer
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/weak_tensor_math_ops_test.py
|
{
"start": 28979,
"end": 32912
}
|
class ____(test_util.TensorFlowTestCase):
def numpySafeFloorDivInt(self, x, y):
z = x // y
# Numpy produces 0 for INT_MIN/-1, but we expect an overflow to INT_MIN
# so that (INT_MIN/-1) + (INT_MIN % -1) = INT_MIN + 0 = INT_MIN.
z[(x == np.iinfo(x.dtype).min) & (y == -1)] = np.iinfo(x.dtype).min
return z
def numpySafeFloorModInt(self, x, y):
# Numpy crashes with a FPE for INT_MIN % -1.
z = self.numpySafeFloorDivInt(x, y)
return x - z * y
def numpySafeTruncateDivInt(self, x, y):
z = self.numpySafeFloorDivInt(x, y)
# Round up if non-zero remainder and inputs have opposite signs.
z[(x != z * y) & ((x < 0) != (y < 0))] += 1
return z
def numpySafeTruncateModInt(self, x, y):
# Numpy crashes with a FPE for INT_MIN % -1.
z = self.numpySafeTruncateDivInt(x, y)
return x - z * y
def intEdgeTestData(self, dtype):
"""Edge-case test data for integer types."""
# INT_MIN/-1 will produce signed-integer overflow, so we instead test
# (INT_MIN + 1) / -1.
nums = np.array(
[
[np.iinfo(dtype).min, -1, 1, np.iinfo(dtype).max],
[np.iinfo(dtype).min + 1, -1, 1, np.iinfo(dtype).max],
[np.iinfo(dtype).min, -1, 1, np.iinfo(dtype).max],
[np.iinfo(dtype).min, -1, 1, np.iinfo(dtype).max],
],
dtype=dtype,
)
divs = np.array(
[
[
np.iinfo(dtype).min,
np.iinfo(dtype).min,
np.iinfo(dtype).min,
np.iinfo(dtype).min,
],
[-1, -1, -1, -1],
[1, 1, 1, 1],
[
np.iinfo(dtype).max,
np.iinfo(dtype).max,
np.iinfo(dtype).max,
np.iinfo(dtype).max,
],
],
dtype=dtype,
)
return nums, divs
@test_util.disable_asan("Expected signed integer overflow.")
@test_util.disable_ubsan("Expected signed integer overflow.")
def testFloorDivModIntEdges(self):
for dtype in [np.int32, np.int64]:
x, y = self.intEdgeTestData(dtype)
x_weak, y_weak = _get_weak_tensor(x), _get_weak_tensor(y)
tf_floor_div = math_ops.floor_div(x_weak, y_weak)
np_floor_div = self.numpySafeFloorDivInt(x, y)
self.assertIsInstance(tf_floor_div, WeakTensor)
self.assertAllEqual(tf_floor_div, np_floor_div)
tf_floor_mod = math_ops.floormod(x_weak, y_weak)
np_floor_mod = self.numpySafeFloorModInt(x, y)
self.assertIsInstance(tf_floor_div, WeakTensor)
self.assertAllEqual(tf_floor_mod, np_floor_mod)
z = math_ops.add(math_ops.multiply(tf_floor_div, y_weak), tf_floor_mod)
# x = floor_div(x, y) * y + floor_mod(x, y)
self.assertIsInstance(z, WeakTensor)
self.assertAllEqual(z, np.broadcast_to(x, z.shape))
@test_util.disable_asan("Expected signed integer overflow.")
@test_util.disable_ubsan("Expected signed integer overflow.")
def testTruncateDivModIntEdges(self):
for dtype in [np.int32, np.int64]:
x, y = self.intEdgeTestData(dtype)
x_weak, y_weak = _get_weak_tensor(x), _get_weak_tensor(y)
tf_truncate_div = math_ops.truncatediv(x_weak, y_weak)
np_truncate_div = self.numpySafeTruncateDivInt(x, y)
self.assertIsInstance(tf_truncate_div, WeakTensor)
self.assertAllEqual(tf_truncate_div, np_truncate_div)
tf_truncate_mod = math_ops.truncatemod(x_weak, y_weak)
np_truncate_mod = self.numpySafeTruncateModInt(x, y)
self.assertIsInstance(tf_truncate_mod, WeakTensor)
self.assertAllEqual(tf_truncate_mod, np_truncate_mod)
z = math_ops.add(
math_ops.multiply(tf_truncate_div, y_weak), tf_truncate_mod
)
self.assertIsInstance(z, WeakTensor)
# x = truncatediv(x, y) * y + truncatemod(x, y)
self.assertAllEqual(z, np.broadcast_to(x, z.shape))
@test_util.run_all_in_graph_and_eager_modes
|
DivAndModTest
|
python
|
pypa__pip
|
src/pip/_internal/req/req_uninstall.py
|
{
"start": 21661,
"end": 24099
}
|
class ____:
def __init__(self, pth_file: str) -> None:
self.file = pth_file
self.entries: set[str] = set()
self._saved_lines: list[bytes] | None = None
def add(self, entry: str) -> None:
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
# os.path.splitdrive is used instead of os.path.isabs because isabs
# treats non-absolute paths with drive letter markings like c:foo\bar
# as absolute paths. It also does not recognize UNC paths if they don't
# have more than "\\sever\share". Valid examples: "\\server\share\" or
# "\\server\share\folder".
if WINDOWS and not os.path.splitdrive(entry)[0]:
entry = entry.replace("\\", "/")
self.entries.add(entry)
def remove(self) -> None:
logger.verbose("Removing pth entries from %s:", self.file)
# If the file doesn't exist, log a warning and return
if not os.path.isfile(self.file):
logger.warning("Cannot remove entries from nonexistent file %s", self.file)
return
with open(self.file, "rb") as fh:
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
if any(b"\r\n" in line for line in lines):
endline = "\r\n"
else:
endline = "\n"
# handle missing trailing newline
if lines and not lines[-1].endswith(endline.encode("utf-8")):
lines[-1] = lines[-1] + endline.encode("utf-8")
for entry in self.entries:
try:
logger.verbose("Removing entry: %s", entry)
lines.remove((entry + endline).encode("utf-8"))
except ValueError:
pass
with open(self.file, "wb") as fh:
fh.writelines(lines)
def rollback(self) -> bool:
if self._saved_lines is None:
logger.error("Cannot roll back changes to %s, none were made", self.file)
return False
logger.debug("Rolling %s back to previous state", self.file)
with open(self.file, "wb") as fh:
fh.writelines(self._saved_lines)
return True
|
UninstallPthEntries
|
python
|
kennethreitz__tablib
|
src/tablib/exceptions.py
|
{
"start": 143,
"end": 213
}
|
class ____(Exception):
"Outside of Dataset size"
|
InvalidDatasetIndex
|
python
|
pytorch__pytorch
|
test/inductor/test_cpu_cpp_wrapper.py
|
{
"start": 1619,
"end": 14394
}
|
class ____(InductorTestCase):
device = "cpu"
test_failures_cpp_wrapper = {
# conv2d will fallback for dynamic shapes; the fallback path is not yet supported
"test_conv2d_unary_cpu_dynamic_shapes": test_torchinductor.TestFailure(
("cpp_wrapper",), is_skip=True
),
"test_conv2d_binary_inplace_fusion_failed_cpu_dynamic_shapes": test_torchinductor.TestFailure(
("cpp_wrapper",), is_skip=True
),
"test_conv2d_binary_inplace_fusion_pass_cpu_dynamic_shapes": test_torchinductor.TestFailure(
("cpp_wrapper",), is_skip=True
),
# aten._native_multi_head_attention.default is not yet supported for dynamic shapes
"test_multihead_attention_cpu_dynamic_shapes": test_torchinductor.TestFailure(
("cpp_wrapper",), is_skip=True
),
}
if TEST_WITH_ROCM:
test_failures_cpp_wrapper.update(
{
"test_linear_packed": test_torchinductor.TestFailure(
("cpp_wrapper"), is_skip=True
),
"test_linear_packed_dynamic_shapes": test_torchinductor.TestFailure(
("cpp_wrapper"), is_skip=True
),
}
)
def make_test_case(
name,
device,
tests,
condition=True,
slow=False,
func_inputs=None,
code_string_count=None,
test_build_separate=False,
):
test_name = f"{name}_{device}" if device else name
if code_string_count is None:
code_string_count = {}
func = getattr(tests, test_name)
assert callable(func), "not a callable"
func = slowTest(func) if slow else func
new_test_name = f"{test_name}_separate" if test_build_separate else test_name
@config.patch(
cpp_wrapper=True,
cpp_wrapper_build_separate=test_build_separate,
)
def fn(self):
tests.setUpClass()
tests.setUp()
try:
with torch._C._PreserveDispatchKeyGuard():
torch._C._dispatch_tls_set_dispatch_key_included(
torch._C.DispatchKey.Dense, True
)
_, code = test_torchinductor.run_and_get_cpp_code(
func, *func_inputs if func_inputs else []
)
# If a test generates no code, skip the remaining checks. This can
# happen for tests validating build-dependent features (e.g. datatypes
# that are available on some platforms and not others).
if code:
if test_build_separate:
self.assertIn("kernel_src", code)
self.assertIn("CppWrapperCodeCache", code)
self.assertTrue(
all(
code.count(string) == code_string_count[string]
for string in code_string_count
)
)
finally:
tests.tearDown()
tests.tearDownClass()
fn.__name__ = new_test_name
import copy
fn.__dict__ = copy.deepcopy(func.__dict__)
if condition:
setattr(
CppWrapperTemplate,
new_test_name,
fn,
)
if RUN_CPU:
class BaseTest(NamedTuple):
name: str
device: str = "cpu"
tests: InductorTestCase = test_torchinductor.CpuTests()
condition: bool = True
slow: bool = False
func_inputs: list = None
code_string_count: dict = {}
test_build_separate: bool = False
for item in [
BaseTest("test_add_complex"),
BaseTest("test_add_complex", test_build_separate=True),
BaseTest("test_add_complex4"),
BaseTest("test_add_complex4", test_build_separate=True),
BaseTest("test_as_strided"), # buffer reuse
BaseTest("test_bernoulli1"),
BaseTest("test_bitwise"), # int32
BaseTest("test_bmm1"),
BaseTest("test_bmm1", test_build_separate=True),
BaseTest("test_bmm2"),
BaseTest("test_cat"), # alias
BaseTest(
"test_conv2d_binary_inplace_fusion_failed",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
func_inputs=[
["aoti_torch_cpu_mkldnn__convolution_pointwise_binary("],
["aoti_torch_cpu_mkldnn__convolution_pointwise_binary_("],
],
),
BaseTest(
"test_conv2d_binary_inplace_fusion_pass",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available(),
func_inputs=[
["aoti_torch_cpu_mkldnn__convolution_pointwise_binary_("],
["aoti_torch_cpu_mkldnn__convolution_pointwise_binary("],
],
),
BaseTest(
"test_conv2d_unary",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcherGenericCPU(),
condition=torch.backends.mkldnn.is_available(),
slow=True,
),
BaseTest("test_conv_transpose2d_packed", "cpu", test_cpu_repro.CPUReproTests()),
BaseTest("test_cumsum"),
BaseTest("test_custom_op_1"),
BaseTest("test_custom_op_2"),
BaseTest("test_custom_op_3"),
BaseTest("test_dtype_sympy_expr"),
BaseTest("test_embedding_bag"), # test default FallbackKernel
BaseTest("test_index_put1"),
BaseTest("test_index_put_deterministic_fallback"),
BaseTest("test_adding_tensor_offsets"),
BaseTest("test_inductor_layout_optimization_input_mutations"),
BaseTest("test_int_div", "", test_cpu_repro.CPUReproTests()),
BaseTest("test_int8_weight_only_quant"),
BaseTest("test_linear1"),
BaseTest("test_linear2"),
*[
BaseTest(func, "", test_cpu_select_algorithm.TestSelectAlgorithmCPU())
for func in dir(test_cpu_select_algorithm.TestSelectAlgorithmCPU())
if func.startswith(
(
"test_linear_with_pointwise",
"test_grouped_linear",
)
)
],
BaseTest("test_polar"),
BaseTest(
"test_linear_binary",
"",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
torch.backends.mkldnn.is_available()
and torch.ops.mkldnn._is_mkldnn_bf16_supported(),
),
BaseTest(
"test_linear_packed",
"",
test_cpu_repro.CPUReproTests(),
torch.backends.mkldnn.is_available()
and (
torch.ops.mkldnn._is_mkldnn_bf16_supported()
or torch.ops.mkldnn._is_mkldnn_fp16_supported()
),
),
*[
BaseTest(
func,
"",
test_cpu_repro.CPUReproTests(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
)
for func in dir(test_cpu_repro.CPUReproTests())
if func.startswith("test_lstm_packed_change_input_sizes")
],
BaseTest("test_max_pool2d6_dilation_1"),
BaseTest("test_max_pool2d6_dilation_2"),
BaseTest(
"test_mkl_linear", "", test_cpu_repro.CPUReproTests(), condition=TEST_MKL
),
BaseTest("test_mm_views"),
BaseTest("test_multihead_attention", "cpu", test_cpu_repro.CPUReproTests()),
BaseTest(
"test_multi_threading",
condition=not IS_WINDOWS,
# Two threads compile, so we expect the output code to be printed twice.
code_string_count={"py::gil_scoped_release_simple release;": 2},
),
BaseTest("test_profiler_mark_wrapper_call"),
BaseTest(
"test_qconv2d",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_add",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_add_relu",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_dequant_promotion",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_qconv2d_maxpool2d_linear_dynamic",
"cpu",
test_mkldnn_pattern_matcher.TestDynamicPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
func_inputs=[
[
"aoti_torch_cpu__qconv_pointwise_tensor",
"torch.ops.quantized.max_pool2d",
"aoti_torch_cpu__qlinear_pointwise_tensor",
]
],
),
*[
BaseTest(
func,
"",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
)
for func in dir(test_mkldnn_pattern_matcher.TestPatternMatcher())
if func.startswith("test_qlinear")
],
BaseTest(
"test_qconv2d_with_concat",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_dynamic_qlinear",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest(
"test_dynamic_qlinear_qat",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
condition=torch.backends.mkldnn.is_available() and not IS_WINDOWS,
),
BaseTest("test_randint"),
BaseTest("test_randn_with_dtype_and_device"),
BaseTest("test_reduction1"), # Reduction
BaseTest("test_relu"), # multiple inputs
BaseTest("test_repeat_interleave", "", test_cpu_repro.CPUReproTests()),
BaseTest("test_scalar_input"),
BaseTest("test_scalar_output"),
BaseTest("test_scaled_dot_product_attention"),
BaseTest("test_scatter1"),
BaseTest("test_scatter2"),
BaseTest("test_scatter3"),
BaseTest("test_scatter4"),
BaseTest("test_scatter5"),
BaseTest("test_scatter6"),
BaseTest("test_scatter_reduce1"),
BaseTest("test_scatter_reduce2"),
BaseTest("test_scatter_reduce3"),
BaseTest("test_silu"), # single input, single output
BaseTest("test_sort"),
BaseTest("test_sum_dtype"), # float64
BaseTest("test_sum_int"), # bool, int64, int8, uint8
BaseTest("test_tensor2"), # constant input
BaseTest(
"test_transpose", code_string_count={".reset();": 2}
), # multiple outputs, buffer clear
BaseTest("test_view_as_complex"),
BaseTest("test_view_as_real"),
BaseTest(
"test_woq_int4",
"cpu",
test_mkldnn_pattern_matcher.TestPatternMatcher(),
),
]:
make_test_case(
item.name,
item.device,
item.tests,
item.condition,
item.slow,
item.func_inputs,
item.code_string_count,
item.test_build_separate,
)
test_torchinductor.copy_tests(
CppWrapperTemplate,
TestCppWrapper,
"cpp_wrapper",
test_failures_cpp_wrapper,
)
DynamicShapesCppWrapperTemplate = (
test_torchinductor_dynamic_shapes.make_dynamic_cls(CppWrapperTemplate)
)
test_torchinductor.copy_tests(
DynamicShapesCppWrapperTemplate,
DynamicShapesCppWrapperCpuTests,
"cpp_wrapper",
test_failures_cpp_wrapper,
xfail_prop="_expected_failure_dynamic_wrapper",
)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if RUN_CPU:
run_tests(needs="filelock")
|
DynamicShapesCppWrapperCpuTests
|
python
|
gevent__gevent
|
src/greentest/3.14/test_socket.py
|
{
"start": 100284,
"end": 110207
}
|
class ____(unittest.TestCase):
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH_L2CAP, 'Bluetooth L2CAP sockets required for this test')
def testBindLeAttL2capSocket(self):
BDADDR_LE_PUBLIC = support.get_attribute(socket, 'BDADDR_LE_PUBLIC')
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as f:
# ATT is the only CID allowed in userspace by the Linux kernel
CID_ATT = 4
f.bind((socket.BDADDR_ANY, 0, CID_ATT, BDADDR_LE_PUBLIC))
addr = f.getsockname()
self.assertEqual(addr, (socket.BDADDR_ANY, 0, CID_ATT, BDADDR_LE_PUBLIC))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH_L2CAP, 'Bluetooth L2CAP sockets required for this test')
def testBindLePsmL2capSocket(self):
BDADDR_LE_RANDOM = support.get_attribute(socket, 'BDADDR_LE_RANDOM')
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as f:
# First user PSM in LE L2CAP
psm = 0x80
f.bind((socket.BDADDR_ANY, psm, 0, BDADDR_LE_RANDOM))
addr = f.getsockname()
self.assertEqual(addr, (socket.BDADDR_ANY, psm, 0, BDADDR_LE_RANDOM))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH_L2CAP, 'Bluetooth L2CAP sockets required for this test')
def testBindBrEdrL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as f:
# First user PSM in BR/EDR L2CAP
psm = 0x1001
f.bind((socket.BDADDR_ANY, psm))
addr = f.getsockname()
self.assertEqual(addr, (socket.BDADDR_ANY, psm))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH_L2CAP, 'Bluetooth L2CAP sockets required for this test')
def testBadL2capAddr(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as f:
with self.assertRaises(OSError):
f.bind((socket.BDADDR_ANY, 0, 0, 0, 0))
with self.assertRaises(OSError):
f.bind((socket.BDADDR_ANY,))
with self.assertRaises(OSError):
f.bind(socket.BDADDR_ANY)
with self.assertRaises(OSError):
f.bind((socket.BDADDR_ANY.encode(), 0x1001))
with self.assertRaises(OSError):
f.bind(('\ud812', 0x1001))
def testBindRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
channel = 0
try:
s.bind((socket.BDADDR_ANY, channel))
except OSError as err:
if sys.platform == 'win32' and err.winerror == 10050:
self.skipTest(str(err))
raise
addr = s.getsockname()
self.assertEqual(addr, (mock.ANY, channel))
self.assertRegex(addr[0], r'(?i)[0-9a-f]{2}(?::[0-9a-f]{2}){4}')
if sys.platform != 'win32':
self.assertEqual(addr, (socket.BDADDR_ANY, channel))
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
s.bind(addr)
addr2 = s.getsockname()
self.assertEqual(addr2, addr)
def testBadRfcommAddr(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
channel = 0
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY.encode(), channel))
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY,))
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY, channel, 0))
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY + '\0', channel))
with self.assertRaises(OSError):
s.bind('\ud812')
with self.assertRaises(OSError):
s.bind(('invalid', channel))
@unittest.skipUnless(hasattr(socket, 'BTPROTO_HCI'), 'Bluetooth HCI sockets required for this test')
def testBindHciSocket(self):
if sys.platform.startswith(('netbsd', 'dragonfly', 'freebsd')):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
s.bind(socket.BDADDR_ANY)
addr = s.getsockname()
self.assertEqual(addr, socket.BDADDR_ANY)
else:
dev = 0
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
try:
s.bind((dev,))
except OSError as err:
if err.errno in (errno.EINVAL, errno.ENODEV):
self.skipTest(str(err))
raise
addr = s.getsockname()
self.assertEqual(addr, dev)
with (self.subTest('integer'),
socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s):
s.bind(dev)
addr = s.getsockname()
self.assertEqual(addr, dev)
with (self.subTest('channel=HCI_CHANNEL_RAW'),
socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s):
channel = socket.HCI_CHANNEL_RAW
s.bind((dev, channel))
addr = s.getsockname()
self.assertEqual(addr, dev)
with (self.subTest('channel=HCI_CHANNEL_USER'),
socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s):
channel = socket.HCI_CHANNEL_USER
try:
s.bind((dev, channel))
except OSError as err:
# Needs special permissions.
if err.errno in (errno.EPERM, errno.EBUSY, errno.ERFKILL):
self.skipTest(str(err))
raise
addr = s.getsockname()
self.assertEqual(addr, (dev, channel))
@unittest.skipUnless(hasattr(socket, 'BTPROTO_HCI'), 'Bluetooth HCI sockets required for this test')
def testBadHciAddr(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
if sys.platform.startswith(('netbsd', 'dragonfly', 'freebsd')):
with self.assertRaises(OSError):
s.bind(socket.BDADDR_ANY.encode())
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY,))
with self.assertRaises(OSError):
s.bind(socket.BDADDR_ANY + '\0')
with self.assertRaises((ValueError, OSError)):
s.bind(socket.BDADDR_ANY + ' '*100)
with self.assertRaises(OSError):
s.bind('\ud812')
with self.assertRaises(OSError):
s.bind('invalid')
with self.assertRaises(OSError):
s.bind(b'invalid')
else:
dev = 0
with self.assertRaises(OSError):
s.bind(())
with self.assertRaises(OSError):
s.bind((dev, socket.HCI_CHANNEL_RAW, 0, 0))
with self.assertRaises(OSError):
s.bind(socket.BDADDR_ANY)
with self.assertRaises(OSError):
s.bind(socket.BDADDR_ANY.encode())
@unittest.skipUnless(hasattr(socket, 'BTPROTO_SCO'), 'Bluetooth SCO sockets required for this test')
def testBindScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
s.bind(socket.BDADDR_ANY)
addr = s.getsockname()
self.assertEqual(addr, socket.BDADDR_ANY)
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
s.bind(socket.BDADDR_ANY.encode())
addr = s.getsockname()
self.assertEqual(addr, socket.BDADDR_ANY)
@unittest.skipUnless(hasattr(socket, 'BTPROTO_SCO'), 'Bluetooth SCO sockets required for this test')
def testBadScoAddr(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY,))
with self.assertRaises(OSError):
s.bind((socket.BDADDR_ANY.encode(),))
with self.assertRaises(ValueError):
s.bind(socket.BDADDR_ANY + '\0')
with self.assertRaises(ValueError):
s.bind(socket.BDADDR_ANY.encode() + b'\0')
with self.assertRaises(UnicodeEncodeError):
s.bind('\ud812')
with self.assertRaises(OSError):
s.bind('invalid')
with self.assertRaises(OSError):
s.bind(b'invalid')
@unittest.skipUnless(HAVE_SOCKET_HYPERV,
'Hyper-V sockets required for this test.')
|
BluetoothTest
|
python
|
huggingface__transformers
|
src/transformers/models/sew/modeling_sew.py
|
{
"start": 15746,
"end": 20522
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = SEWPositionalConvEmbedding(config)
self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.upsample = SEWUpsampling(config)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
if self.config._attn_implementation == "flash_attention_2":
# make sure padded tokens output 0
hidden_states[~expand_attention_mask] = 0.0
# 2d mask is passed through the layers
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
else:
# make sure padded tokens output 0
hidden_states[~expand_attention_mask] = 0.0
input_lengths = (attention_mask.long()).sum(-1)
# apply pooling formula to get real output_lengths
output_lengths = input_lengths // self.config.squeeze_factor
max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
attention_ids = (
torch.arange(0, max_encoder_length, device=output_lengths.device)
.view(1, -1)
.expand(output_lengths.shape[0], -1)
)
attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
# extend attention_mask
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
n_input_timesteps = hidden_states.shape[1]
hidden_states = hidden_states.transpose(1, 2)
position_embeddings = self.pos_conv_embed(hidden_states)
pooled_hidden_states = self.pool(hidden_states)
min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.upsample(hidden_states)
if hidden_states.shape[1] < n_input_timesteps:
hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
|
SEWEncoder
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/tasks.py
|
{
"start": 93080,
"end": 96265
}
|
class ____(Request):
"""
Signal a task has completed
:param force: If not true, call fails if the task status is not
in_progress/stopped
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "completed"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not in_progress/stopped",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(CompletedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
|
CompletedRequest
|
python
|
encode__django-rest-framework
|
tests/test_throttling.py
|
{
"start": 12972,
"end": 13569
}
|
class ____(XffTestingBase):
def test_xff_spoofing_doesnt_change_machine_id_with_one_app_proxy(self):
self.config_proxy(1)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '4.4.4.4, 5.5.5.5, 2.2.2.2'
assert self.view(self.request).status_code == 429
def test_xff_spoofing_doesnt_change_machine_id_with_two_app_proxies(self):
self.config_proxy(2)
self.view(self.request)
self.request.META['HTTP_X_FORWARDED_FOR'] = '4.4.4.4, 1.1.1.1, 2.2.2.2'
assert self.view(self.request).status_code == 429
|
XffSpoofingTests
|
python
|
getsentry__sentry
|
src/sentry/rules/filters/issue_category.py
|
{
"start": 473,
"end": 582
}
|
class ____(forms.Form):
value = forms.ChoiceField(choices=list(CATEGORY_CHOICES.items()))
|
IssueCategoryForm
|
python
|
OmkarPathak__pygorithm
|
pygorithm/data_structures/linked_list.py
|
{
"start": 5717,
"end": 7527
}
|
class ____(object):
'''
Class for circular linked list
'''
def __init__(self):
self.head = None
self.tail = None
self.size = 0
def clear(self):
''' clears the head and tails of the linked list '''
self.tail = None
self.head = None
def get_data(self):
"""
prints the elements in the linked list
"""
l_list = []
current = self.tail
while True:
l_list.append(current.data)
current = current.next
if current == self.tail:
break
return l_list
def insert(self, data):
''' inserts the data in to the linked list '''
node = Node(data)
if self.head:
self.head.next = node
self.head = node
else:
self.head = node
self.tail = node
self.head.next = self.tail
self.size += 1
def delete(self, data):
''' deletes the specified element from linked list '''
current = self.tail
prev = self.tail
while prev == current or prev != self.head:
if current.data == data:
if current == self.tail:
self.tail = current.next
self.head.next = self.tail
else:
prev.next = current.next
self.size -= 1
return
prev = current
current = current.next
@staticmethod
def get_code():
"""
returns the code of the current class
"""
return inspect.getsource(CircularLinkedList)
if __name__ == '__main__':
cll = CircularLinkedList()
cll.insert(1)
cll.insert(2)
cll.insert(3)
print(cll.get_data())
|
CircularLinkedList
|
python
|
walkccc__LeetCode
|
solutions/2707. Extra Characters in a String/2707.py
|
{
"start": 0,
"end": 463
}
|
class ____:
# Similar to 139. Word Break
def minExtraChar(self, s: str, dictionary: list[str]) -> int:
n = len(s)
dictionarySet = set(dictionary)
# dp[i] := the minimum extra letters if breaking up s[0..i) optimally
dp = [0] + [n] * n
for i in range(1, n + 1):
for j in range(i):
if s[j:i] in dictionarySet:
dp[i] = min(dp[i], dp[j])
else:
dp[i] = min(dp[i], dp[j] + i - j)
return dp[n]
|
Solution
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/robotframework.py
|
{
"start": 5295,
"end": 6152
}
|
class ____(object):
_space_splitter = re.compile('( {2,})')
_pipe_splitter = re.compile('((?:^| +)\|(?: +|$))')
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
or self._split_from_spaces)
for value in splitter(row):
yield value
yield '\n'
def _split_from_spaces(self, row):
yield '' # Start with (pseudo)separator similarly as with pipes
for value in self._space_splitter.split(row):
yield value
def _split_from_pipes(self, row):
_, separator, rest = self._pipe_splitter.split(row, 1)
yield separator
while self._pipe_splitter.search(rest):
cell, separator, rest = self._pipe_splitter.split(rest, 1)
yield cell
yield separator
yield rest
|
RowSplitter
|
python
|
wandb__wandb
|
wandb/errors/links.py
|
{
"start": 391,
"end": 440
}
|
class ____:
url: str
description: str
|
WBURL
|
python
|
ijl__orjson
|
test/test_datetime.py
|
{
"start": 22186,
"end": 23345
}
|
class ____:
def test_time(self):
"""
datetime.time
"""
assert orjson.dumps([datetime.time(12, 15, 59, 111)]) == b'["12:15:59.000111"]'
assert orjson.dumps([datetime.time(12, 15, 59)]) == b'["12:15:59"]'
@pytest.mark.skipif(zoneinfo is None, reason="zoneinfo not available")
def test_time_tz(self):
"""
datetime.time with tzinfo error
"""
with pytest.raises(orjson.JSONEncodeError):
orjson.dumps(
[
datetime.time(
12,
15,
59,
111,
tzinfo=zoneinfo.ZoneInfo("Asia/Shanghai"),
),
],
)
def test_time_microsecond_max(self):
"""
datetime.time microsecond max
"""
assert orjson.dumps(datetime.time(0, 0, 0, 999999)) == b'"00:00:00.999999"'
def test_time_microsecond_min(self):
"""
datetime.time microsecond min
"""
assert orjson.dumps(datetime.time(0, 0, 0, 1)) == b'"00:00:00.000001"'
|
TestTime
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/sagemaker.py
|
{
"start": 4648,
"end": 5808
}
|
class ____(SageMakerBaseSensor):
"""
Poll the transform job until it reaches a terminal state; raise AirflowException with the failure reason.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:SageMakerTransformSensor`
:param job_name: Name of the transform job to watch.
"""
template_fields: Sequence[str] = aws_template_fields(
"job_name",
)
template_ext: Sequence[str] = ()
def __init__(self, *, job_name: str, **kwargs):
super().__init__(**kwargs)
self.job_name = job_name
def non_terminal_states(self):
return SageMakerHook.non_terminal_states
def failed_states(self):
return SageMakerHook.failed_states
def get_sagemaker_response(self):
self.log.info("Poking Sagemaker Transform Job %s", self.job_name)
return self.hook.describe_transform_job(self.job_name)
def get_failed_reason_from_response(self, response):
return response["FailureReason"]
def state_from_response(self, response):
return response["TransformJobStatus"]
|
SageMakerTransformSensor
|
python
|
huggingface__transformers
|
src/transformers/models/aimv2/modeling_aimv2.py
|
{
"start": 17086,
"end": 19551
}
|
class ____(Aimv2PreTrainedModel):
config: Aimv2VisionConfig
main_input_name = "pixel_values"
_can_record_outputs = {
"hidden_states": Aimv2EncoderLayer,
"attentions": Aimv2Attention,
}
def __init__(self, config: Aimv2VisionConfig):
super().__init__(config)
self.config = config
self.embeddings = Aimv2VisionEmbeddings(config)
self.encoder = Aimv2Encoder(config)
# The only change from SiglipVisionTransformer is, layernorm -> rms_norm.
self.rms_norm = Aimv2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.use_head = config.use_head
if self.use_head:
self.head = Aimv2AttentionPoolingHead(config)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embed
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Siglip2VisionModel
>>> model = Aimv2VisionModel.from_pretrained("apple/aimv2-large-patch14-native")
>>> processor = AutoProcessor.from_pretrained("apple/aimv2-large-patch14-native")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled features
```"""
hidden_states = self.embeddings(pixel_values)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.rms_norm(last_hidden_state)
pooler_output = self.head(last_hidden_state) if self.use_head else None
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooler_output,
)
@auto_docstring(
custom_intro="""
The text model from AIMv2 without any head or projection on top.
"""
)
|
Aimv2VisionModel
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/auth.py
|
{
"start": 10613,
"end": 11806
}
|
class ____(Response):
"""
Response of auth.edit_credentials endpoint.
:param updated: Number of credentials updated
:type updated: int
"""
_service = "auth"
_action = "edit_credentials"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of credentials updated",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(EditCredentialsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
|
EditCredentialsResponse
|
python
|
pyca__cryptography
|
tests/x509/test_x509_ext.py
|
{
"start": 78959,
"end": 80422
}
|
class ____:
def test_not_ipaddress(self):
with pytest.raises(TypeError):
x509.IPAddress(b"notanipaddress") # type:ignore[arg-type]
with pytest.raises(TypeError):
x509.IPAddress(1.3) # type:ignore[arg-type]
def test_repr(self):
gn = x509.IPAddress(ipaddress.IPv4Address("127.0.0.1"))
assert repr(gn) == "<IPAddress(value=127.0.0.1)>"
gn2 = x509.IPAddress(ipaddress.IPv6Address("ff::"))
assert repr(gn2) == "<IPAddress(value=ff::)>"
gn3 = x509.IPAddress(ipaddress.IPv4Network("192.168.0.0/24"))
assert repr(gn3) == "<IPAddress(value=192.168.0.0/24)>"
gn4 = x509.IPAddress(ipaddress.IPv6Network("ff::/96"))
assert repr(gn4) == "<IPAddress(value=ff::/96)>"
def test_eq(self):
gn = x509.IPAddress(ipaddress.IPv4Address("127.0.0.1"))
gn2 = x509.IPAddress(ipaddress.IPv4Address("127.0.0.1"))
assert gn == gn2
def test_ne(self):
gn = x509.IPAddress(ipaddress.IPv4Address("127.0.0.1"))
gn2 = x509.IPAddress(ipaddress.IPv4Address("127.0.0.2"))
assert gn != gn2
assert gn != object()
def test_hash(self):
gn = x509.IPAddress(ipaddress.IPv4Address("127.0.0.1"))
gn2 = x509.IPAddress(ipaddress.IPv4Address("127.0.0.1"))
gn3 = x509.IPAddress(ipaddress.IPv4Address("127.0.0.2"))
assert hash(gn) == hash(gn2)
assert hash(gn) != hash(gn3)
|
TestIPAddress
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.