language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sympy__sympy | sympy/geometry/line.py | {
"start": 61448,
"end": 67068
} | class ____(LinearEntity2D, Ray):
"""
A Ray is a semi-line in the space with a source point and a direction.
Parameters
==========
p1 : Point
The source of the Ray
p2 : Point or radian value
This point determines the direction in which the Ray propagates.
If given as an angle it is interpreted in radians with the positive
direction being ccw.
Attributes
==========
source
xdirection
ydirection
See Also
========
sympy.geometry.point.Point, Line
Examples
========
>>> from sympy import Point, pi, Ray
>>> r = Ray(Point(2, 3), Point(3, 5))
>>> r
Ray2D(Point2D(2, 3), Point2D(3, 5))
>>> r.points
(Point2D(2, 3), Point2D(3, 5))
>>> r.source
Point2D(2, 3)
>>> r.xdirection
oo
>>> r.ydirection
oo
>>> r.slope
2
>>> Ray(Point(0, 0), angle=pi/4).slope
1
"""
def __new__(cls, p1, pt=None, angle=None, **kwargs):
p1 = Point(p1, dim=2)
if pt is not None and angle is None:
try:
p2 = Point(pt, dim=2)
except (NotImplementedError, TypeError, ValueError):
raise ValueError(filldedent('''
The 2nd argument was not a valid Point; if
it was meant to be an angle it should be
given with keyword "angle".'''))
if p1 == p2:
raise ValueError('A Ray requires two distinct points.')
elif angle is not None and pt is None:
# we need to know if the angle is an odd multiple of pi/2
angle = sympify(angle)
c = _pi_coeff(angle)
p2 = None
if c is not None:
if c.is_Rational:
if c.q == 2:
if c.p == 1:
p2 = p1 + Point(0, 1)
elif c.p == 3:
p2 = p1 + Point(0, -1)
elif c.q == 1:
if c.p == 0:
p2 = p1 + Point(1, 0)
elif c.p == 1:
p2 = p1 + Point(-1, 0)
if p2 is None:
c *= S.Pi
else:
c = angle % (2*S.Pi)
if not p2:
m = 2*c/S.Pi
left = And(1 < m, m < 3) # is it in quadrant 2 or 3?
x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))
y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True))
p2 = p1 + Point(x, y)
else:
raise ValueError('A 2nd point or keyword "angle" must be used.')
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
@property
def xdirection(self):
"""The x direction of the ray.
Positive infinity if the ray points in the positive x direction,
negative infinity if the ray points in the negative x direction,
or 0 if the ray is vertical.
See Also
========
ydirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(0, -1)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.xdirection
oo
>>> r2.xdirection
0
"""
if self.p1.x < self.p2.x:
return S.Infinity
elif self.p1.x == self.p2.x:
return S.Zero
else:
return S.NegativeInfinity
@property
def ydirection(self):
"""The y direction of the ray.
Positive infinity if the ray points in the positive y direction,
negative infinity if the ray points in the negative y direction,
or 0 if the ray is horizontal.
See Also
========
xdirection
Examples
========
>>> from sympy import Point, Ray
>>> p1, p2, p3 = Point(0, 0), Point(-1, -1), Point(-1, 0)
>>> r1, r2 = Ray(p1, p2), Ray(p1, p3)
>>> r1.ydirection
-oo
>>> r2.ydirection
0
"""
if self.p1.y < self.p2.y:
return S.Infinity
elif self.p1.y == self.p2.y:
return S.Zero
else:
return S.NegativeInfinity
def closing_angle(r1, r2):
"""Return the angle by which r2 must be rotated so it faces the same
direction as r1.
Parameters
==========
r1 : Ray2D
r2 : Ray2D
Returns
=======
angle : angle in radians (ccw angle is positive)
See Also
========
LinearEntity.angle_between
Examples
========
>>> from sympy import Ray, pi
>>> r1 = Ray((0, 0), (1, 0))
>>> r2 = r1.rotate(-pi/2)
>>> angle = r1.closing_angle(r2); angle
pi/2
>>> r2.rotate(angle).direction.unit == r1.direction.unit
True
>>> r2.closing_angle(r1)
-pi/2
"""
if not all(isinstance(r, Ray2D) for r in (r1, r2)):
# although the direction property is defined for
# all linear entities, only the Ray is truly a
# directed object
raise TypeError('Both arguments must be Ray2D objects.')
a1 = atan2(*list(reversed(r1.direction.args)))
a2 = atan2(*list(reversed(r2.direction.args)))
if a1*a2 < 0:
a1 = 2*S.Pi + a1 if a1 < 0 else a1
a2 = 2*S.Pi + a2 if a2 < 0 else a2
return a1 - a2
| Ray2D |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 2403,
"end": 2759
} | class ____(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(cls):
super().setup_class()
cls.a = Quantity(cls.a, u.m)
cls.b = Quantity(cls.b, u.cm)
cls.c = Quantity(cls.c, u.km)
cls.sa = Quantity(cls.sa, u.m, dtype=cls.sdt)
cls.sb = Quantity(cls.sb, u.cm, dtype=cls.sdt)
| QuantitySetup |
python | django__django | tests/m2m_intermediary/models.py | {
"start": 776,
"end": 968
} | class ____(models.Model):
reporter = models.ForeignKey(Reporter, models.CASCADE)
article = models.ForeignKey(Article, models.CASCADE)
position = models.CharField(max_length=100)
| Writer |
python | django__django | tests/logging_tests/tests.py | {
"start": 926,
"end": 1732
} | class ____(SimpleTestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertIs(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertIs(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertIs(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertIs(filter_.filter("record is not used"), False)
| LoggingFiltersTest |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 619742,
"end": 625351
} | class ____(ExprNode):
# Deals with the two possible uses of an annotation.
# 1. The post PEP-563 use where an annotation is stored
# as a string
# 2. The Cython use where the annotation can indicate an
# object type
#
# Doesn't handle the pre PEP-563 version where the
# annotation is evaluated into a Python Object.
subexprs = []
is_annotation = True
# 'untyped' is set for fused specializations:
# Once a fused function has been created we don't want
# annotations to override an already set type.
untyped = False
def __init__(self, pos, expr, string=None):
"""string is expected to already be a UnicodeNode or None"""
ExprNode.__init__(self, pos)
if string is None:
# import doesn't work at top of file?
from .AutoDocTransforms import AnnotationWriter
string_value = StringEncoding.EncodedString(
AnnotationWriter(description="annotation").write(expr))
string = UnicodeNode(pos, value=string_value)
self.string = string
self.expr = expr
def analyse_types(self, env):
return self # nothing needs doing
def analyse_as_type(self, env):
# for compatibility when used as a return_type_node, have this interface too
return self.analyse_type_annotation(env)[1]
def _warn_on_unknown_annotation(self, env, annotation):
"""Method checks for cases when user should be warned that annotation contains unknown types."""
if isinstance(annotation, SliceIndexNode):
annotation = annotation.base
if annotation.is_name:
# Validate annotation in form `var: type`
if not env.lookup(annotation.name):
warning(annotation.pos,
"Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1)
elif annotation.is_attribute and annotation.obj.is_name:
# Validate annotation in form `var: module.type`
if not env.lookup(annotation.obj.name):
# `module` is undeclared
warning(annotation.pos,
"Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1)
elif annotation.obj.is_cython_module:
# `module` is cython
module_scope = annotation.obj.analyse_as_module(env)
if module_scope and not module_scope.lookup_type(annotation.attribute):
error(annotation.pos,
"Unknown type declaration '%s' in annotation" % self.string.value)
else:
module_scope = annotation.obj.analyse_as_module(env)
if module_scope and module_scope.pxd_file_loaded:
warning(annotation.pos,
"Unknown type declaration '%s' in annotation, ignoring" % self.string.value, level=1)
else:
warning(annotation.pos, "Unknown type declaration in annotation, ignoring")
def analyse_type_annotation(self, env, assigned_value=None):
if self.untyped:
# Already applied as a fused type, not re-evaluating it here.
return [], None
annotation = self.expr
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
warning(annotation.pos,
"Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.", level=1)
for name, value in annotation.key_value_pairs:
if not name.is_string_literal:
continue
if name.value in ('type', b'type'):
explicit_pytype = True
if not explicit_ctype:
annotation = value
elif name.value in ('ctype', b'ctype'):
explicit_ctype = True
annotation = value
if explicit_pytype and explicit_ctype:
warning(annotation.pos, "Duplicate type declarations found in signature annotation", level=1)
elif isinstance(annotation, TupleNode):
warning(annotation.pos,
"Tuples cannot be declared as simple tuples of types. Use 'tuple[type1, type2, ...]'.", level=1)
return [], None
with env.new_c_type_context(in_c_type_context=explicit_ctype):
arg_type = annotation.analyse_as_type(env)
if arg_type is None:
self._warn_on_unknown_annotation(env, annotation)
return [], arg_type
if annotation.is_string_literal:
warning(annotation.pos,
"Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.",
level=1)
if explicit_pytype and not explicit_ctype and not (arg_type.is_pyobject or arg_type.equivalent_type):
warning(annotation.pos,
"Python type declaration in signature annotation does not refer to a Python type")
if arg_type.is_complex:
# creating utility code needs to be special-cased for complex types
arg_type.create_declaration_utility_code(env)
# Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
modifiers = annotation.analyse_pytyping_modifiers(env) if annotation.is_subscript or isinstance(annotation, BitwiseOrNode) else []
return modifiers, arg_type
| AnnotationNode |
python | pytorch__pytorch | torch/utils/checkpoint.py | {
"start": 48704,
"end": 52081
} | class ____(torch.autograd.graph.saved_tensors_hooks):
def __init__(self, frame) -> None:
def pack_hook(x):
# See Rule 4 above
holder = _Holder()
frame.weak_holders.append(weakref.ref(holder))
# Save metadata to detect non-determinism
if frame.metadata_fn is not None:
with torch.no_grad():
frame.x_metadatas.append(frame.metadata_fn(x))
return holder
def unpack_hook(holder):
# First check if we're inside a GraphExecGroup context
gid: Union[GraphExecGroup, None, int] = GraphExecGroup._get_current_group()
if gid is None:
# Fallback to using the current graph task id
gid = torch._C._current_graph_task_id()
if gid == -1:
# generate a temporary id if we trigger unpack outside of a backward call
gid = int(uuid.uuid4())
if not frame.is_recomputed[gid]:
ctx = frame.input_saver.grad_fn
args = ctx.get_args(ctx.saved_tensors)
try:
with _recomputation_hook(
weakref.ref(frame), gid
), torch.autograd.enable_grad():
# See Note: [compiled autograd and checkpoint unpack hook]
_run_fn_with_dynamo_disabled(frame.recompute_fn, *args)
except _StopRecomputationError:
pass
frame.is_recomputed[gid] = True
frame.check_recomputed_tensors_match(gid)
_internal_assert(gid in holder.handles)
if holder.handles[gid] is None:
extra = ""
if torch._C._get_graph_exec_group() is not None:
extra = (
"Performing two backward calls that overlap (i.e. require the same "
"saved activation in order to compute gradients) is not allowed while "
"under the torch.utils.checkpoint.GraphExecGroup context. "
)
raise CheckpointError(
"torch.utils.checkpoint: Unpack is being triggered for a tensor that was already "
f"unpacked once. {extra}If you are calling ctx.saved_tensors in backward, make sure "
"to do so only once. Otherwise please open an issue with details on your use case."
)
_internal_assert(holder.handles[gid] in frame.recomputed[gid])
ret = frame.recomputed[gid][holder.handles[gid]]
holder.handles[gid] = None
return ret
if frame.unpack_error_cb is not None:
def unpack_hook_with_error_cb(holder):
try:
return unpack_hook(holder)
except CheckpointError as e:
frame.unpack_error_cb(e)
super().__init__(pack_hook, unpack_hook_with_error_cb)
else:
super().__init__(pack_hook, unpack_hook)
def _is_compiling(func, args, kwargs):
# Check if we are under AOTAutograd tracing
# Checking that a functional mode is active should always do what we want
return torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY) is not None
| _checkpoint_hook |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_managed_kafka.py | {
"start": 2331,
"end": 14895
} | class ____:
def setup_method(self):
with mock.patch(
BASE_STRING.format("GoogleBaseHook.__init__"), new=mock_base_gcp_hook_default_project_id
):
self.hook = ManagedKafkaHook(gcp_conn_id=TEST_GCP_CONN_ID)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_create_cluster(self, mock_client) -> None:
self.hook.create_cluster(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster=TEST_CLUSTER,
cluster_id=TEST_CLUSTER_ID,
)
mock_client.assert_called_once()
mock_client.return_value.create_cluster.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
cluster=TEST_CLUSTER,
cluster_id=TEST_CLUSTER_ID,
request_id=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_LOCATION)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_delete_cluster(self, mock_client) -> None:
self.hook.delete_cluster(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
)
mock_client.assert_called_once()
mock_client.return_value.delete_cluster.assert_called_once_with(
request=dict(name=mock_client.return_value.cluster_path.return_value, request_id=None),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.cluster_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_get_cluster(self, mock_client) -> None:
self.hook.get_cluster(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
)
mock_client.assert_called_once()
mock_client.return_value.get_cluster.assert_called_once_with(
request=dict(
name=mock_client.return_value.cluster_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.cluster_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_update_cluster(self, mock_client) -> None:
self.hook.update_cluster(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster=TEST_UPDATED_CLUSTER,
cluster_id=TEST_CLUSTER_ID,
update_mask=TEST_CLUSTER_UPDATE_MASK,
)
mock_client.assert_called_once()
mock_client.return_value.update_cluster.assert_called_once_with(
request=dict(
update_mask=TEST_CLUSTER_UPDATE_MASK,
cluster={
"name": mock_client.return_value.cluster_path.return_value,
**TEST_UPDATED_CLUSTER,
},
request_id=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.cluster_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_list_clusters(self, mock_client) -> None:
self.hook.list_clusters(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
)
mock_client.assert_called_once()
mock_client.return_value.list_clusters.assert_called_once_with(
request=dict(
parent=mock_client.return_value.common_location_path.return_value,
page_size=None,
page_token=None,
filter=None,
order_by=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.common_location_path.assert_called_once_with(TEST_PROJECT_ID, TEST_LOCATION)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_create_topic(self, mock_client) -> None:
self.hook.create_topic(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
topic=TEST_TOPIC,
)
mock_client.assert_called_once()
mock_client.return_value.create_topic.assert_called_once_with(
request=dict(
parent=mock_client.return_value.cluster_path.return_value,
topic_id=TEST_TOPIC_ID,
topic=TEST_TOPIC,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.cluster_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_delete_topic(self, mock_client) -> None:
self.hook.delete_topic(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
)
mock_client.assert_called_once()
mock_client.return_value.delete_topic.assert_called_once_with(
request=dict(name=mock_client.return_value.topic_path.return_value),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.topic_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID, TEST_TOPIC_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_get_topic(self, mock_client) -> None:
self.hook.get_topic(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
)
mock_client.assert_called_once()
mock_client.return_value.get_topic.assert_called_once_with(
request=dict(
name=mock_client.return_value.topic_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.topic_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_LOCATION,
TEST_CLUSTER_ID,
TEST_TOPIC_ID,
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_update_topic(self, mock_client) -> None:
self.hook.update_topic(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
topic_id=TEST_TOPIC_ID,
topic=TEST_UPDATED_TOPIC,
update_mask=TEST_TOPIC_UPDATE_MASK,
)
mock_client.assert_called_once()
mock_client.return_value.update_topic.assert_called_once_with(
request=dict(
update_mask=TEST_TOPIC_UPDATE_MASK,
topic={
"name": mock_client.return_value.topic_path.return_value,
**TEST_UPDATED_TOPIC,
},
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.topic_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID, TEST_TOPIC_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_list_topics(self, mock_client) -> None:
self.hook.list_topics(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
)
mock_client.assert_called_once()
mock_client.return_value.list_topics.assert_called_once_with(
request=dict(
parent=mock_client.return_value.cluster_path.return_value,
page_size=None,
page_token=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.cluster_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_delete_consumer_group(self, mock_client) -> None:
self.hook.delete_consumer_group(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
)
mock_client.assert_called_once()
mock_client.return_value.delete_consumer_group.assert_called_once_with(
request=dict(name=mock_client.return_value.consumer_group_path.return_value),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.consumer_group_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID, TEST_CONSUMER_GROUP_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_get_consumer_group(self, mock_client) -> None:
self.hook.get_consumer_group(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
)
mock_client.assert_called_once()
mock_client.return_value.get_consumer_group.assert_called_once_with(
request=dict(
name=mock_client.return_value.consumer_group_path.return_value,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.consumer_group_path.assert_called_once_with(
TEST_PROJECT_ID,
TEST_LOCATION,
TEST_CLUSTER_ID,
TEST_CONSUMER_GROUP_ID,
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_update_consumer_group(self, mock_client) -> None:
self.hook.update_consumer_group(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
consumer_group_id=TEST_CONSUMER_GROUP_ID,
consumer_group={},
update_mask={},
)
mock_client.assert_called_once()
mock_client.return_value.update_consumer_group.assert_called_once_with(
request=dict(
update_mask={},
consumer_group={
"name": mock_client.return_value.consumer_group_path.return_value,
**{},
},
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.consumer_group_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID, TEST_CONSUMER_GROUP_ID
)
@mock.patch(MANAGED_KAFKA_STRING.format("ManagedKafkaHook.get_managed_kafka_client"))
def test_list_consumer_groups(self, mock_client) -> None:
self.hook.list_consumer_groups(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_id=TEST_CLUSTER_ID,
)
mock_client.assert_called_once()
mock_client.return_value.list_consumer_groups.assert_called_once_with(
request=dict(
parent=mock_client.return_value.cluster_path.return_value,
page_size=None,
page_token=None,
),
metadata=(),
retry=DEFAULT,
timeout=None,
)
mock_client.return_value.cluster_path.assert_called_once_with(
TEST_PROJECT_ID, TEST_LOCATION, TEST_CLUSTER_ID
)
| TestManagedKafkaWithDefaultProjectIdHook |
python | django-mptt__django-mptt | mptt/admin.py | {
"start": 11815,
"end": 16514
} | class ____(RelatedFieldListFilter):
"""
Admin filter class which filters models related to parent model with all
its descendants.
Usage:
from mptt.admin import TreeRelatedFieldListFilter
@admin.register(models.MyModel)
class MyModelAdmin(admin.ModelAdmin):
model = models.MyModel
list_filter =
(
('my_related_model', TreeRelatedFieldListFilter),
)
"""
template = "admin/mptt_filter.html"
mptt_level_indent = 10
def __init__(self, field, request, params, model, model_admin, field_path):
self.other_model = get_model_from_relation(field)
if field.remote_field is not None and hasattr(
field.remote_field, "get_related_field"
):
self.rel_name = field.remote_field.get_related_field().name
else:
self.rel_name = self.other_model._meta.pk.name
self.changed_lookup_kwarg = f"{field_path}__{self.rel_name}__inhierarchy"
super().__init__(field, request, params, model, model_admin, field_path)
self.lookup_val = request.GET.get(self.changed_lookup_kwarg)
def expected_parameters(self):
return [self.changed_lookup_kwarg, self.lookup_kwarg_isnull]
# Ripped from contrib.admin.filters,FieldListFilter Django 1.8 to deal with
# lookup name 'inhierarchy'
def queryset(self, request, queryset):
try:
# #### MPTT ADDITION START
if self.lookup_val:
other_model = self.other_model.objects.get(pk=self.lookup_val)
other_models = other_model.get_descendants(True)
del self.used_parameters[self.changed_lookup_kwarg]
self.used_parameters.update(
{f"{self.field_path}__{self.rel_name}__in": other_models}
)
# #### MPTT ADDITION END
return queryset.filter(**self.used_parameters)
except ValidationError as e:
raise IncorrectLookupParameters(e)
# Adding padding_style to each choice tuple
def field_choices(self, field, request, model_admin):
mptt_level_indent = getattr(
model_admin, "mptt_level_indent", self.mptt_level_indent
)
language_bidi = get_language_bidi()
initial_choices = field.get_choices(include_blank=False)
pks = [pk for pk, val in initial_choices]
models = field.related_model._default_manager.filter(pk__in=pks)
levels_dict = {
model.pk: getattr(model, model._mptt_meta.level_attr) for model in models
}
choices = []
for pk, val in initial_choices:
padding_style = ' style="padding-{}:{}px"'.format(
"right" if language_bidi else "left",
mptt_level_indent * levels_dict[pk],
)
choices.append((pk, val, mark_safe(padding_style)))
return choices
# Ripped from contrib.admin.filters,RelatedFieldListFilter Django 1.8 to
# yield padding_style
def choices(self, cl):
# #### MPTT ADDITION START
EMPTY_CHANGELIST_VALUE = self.empty_value_display
# #### MPTT ADDITION END
yield {
"selected": self.lookup_val is None and not self.lookup_val_isnull,
"query_string": cl.get_query_string(
{}, [self.changed_lookup_kwarg, self.lookup_kwarg_isnull]
),
"display": _("All"),
}
for pk_val, val, padding_style in self.lookup_choices:
yield {
"selected": self.lookup_val == smart_str(pk_val),
"query_string": cl.get_query_string(
{
self.changed_lookup_kwarg: pk_val,
},
[self.lookup_kwarg_isnull],
),
"display": val,
# #### MPTT ADDITION START
"padding_style": padding_style,
# #### MPTT ADDITION END
}
if (
isinstance(self.field, ForeignObjectRel)
and (self.field.field.null or isinstance(self.field.field, ManyToManyField))
) or (
self.field.remote_field is not None
and (self.field.null or isinstance(self.field, ManyToManyField))
):
yield {
"selected": bool(self.lookup_val_isnull),
"query_string": cl.get_query_string(
{
self.lookup_kwarg_isnull: "True",
},
[self.changed_lookup_kwarg],
),
"display": EMPTY_CHANGELIST_VALUE,
}
| TreeRelatedFieldListFilter |
python | encode__django-rest-framework | rest_framework/exceptions.py | {
"start": 6449,
"end": 6879
} | class ____(APIException):
status_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
default_detail = _('Unsupported media type "{media_type}" in request.')
default_code = 'unsupported_media_type'
def __init__(self, media_type, detail=None, code=None):
if detail is None:
detail = force_str(self.default_detail).format(media_type=media_type)
super().__init__(detail, code)
| UnsupportedMediaType |
python | scipy__scipy | scipy/interpolate/tests/test_bsplines.py | {
"start": 104081,
"end": 110077
} | class ____:
def test_2D_separable_simple(self):
x = np.arange(6)
y = np.arange(6) + 0.5
values = x[:, None]**3 * (y**3 + 2*y)[None, :]
xi = [(a, b) for a, b in itertools.product(x, y)]
bspl = make_ndbspl((x, y), values, k=1)
xp_assert_close(bspl(xi), values.ravel(), atol=1e-15)
# test the coefficients vs outer product of 1D coefficients
spl_x = make_interp_spline(x, x**3, k=1)
spl_y = make_interp_spline(y, y**3 + 2*y, k=1)
cc = spl_x.c[:, None] * spl_y.c[None, :]
xp_assert_close(cc, bspl.c, atol=1e-11, rtol=0)
# test against RGI
from scipy.interpolate import RegularGridInterpolator as RGI
rgi = RGI((x, y), values, method='linear')
xp_assert_close(rgi(xi), bspl(xi), atol=1e-14)
def test_2D_separable_trailing_dims(self):
# test `c` with trailing dimensions, i.e. c.ndim > ndim
x = np.arange(6)
y = np.arange(6)
xi = [(a, b) for a, b in itertools.product(x, y)]
# make values4.shape = (6, 6, 4)
values = x[:, None]**3 * (y**3 + 2*y)[None, :]
values4 = np.dstack((values, values, values, values))
bspl = make_ndbspl((x, y), values4, k=3, solver=ssl.spsolve)
result = bspl(xi)
target = np.dstack((values, values, values, values)).astype(float)
assert result.shape == (36, 4)
xp_assert_close(result.reshape(6, 6, 4),
target, atol=1e-14)
# now two trailing dimensions
values22 = values4.reshape((6, 6, 2, 2))
bspl = make_ndbspl((x, y), values22, k=3, solver=ssl.spsolve)
result = bspl(xi)
assert result.shape == (36, 2, 2)
xp_assert_close(result.reshape(6, 6, 2, 2),
target.reshape((6, 6, 2, 2)), atol=1e-14)
@pytest.mark.parametrize('k', [(3, 3), (1, 1), (3, 1), (1, 3), (3, 5)])
def test_2D_mixed(self, k):
# make a 2D separable spline w/ len(tx) != len(ty)
x = np.arange(6)
y = np.arange(7) + 1.5
xi = [(a, b) for a, b in itertools.product(x, y)]
values = (x**3)[:, None] * (y**2 + 2*y)[None, :]
bspl = make_ndbspl((x, y), values, k=k, solver=ssl.spsolve)
xp_assert_close(bspl(xi), values.ravel(), atol=1e-15)
def test_2D_nans(self):
x = np.arange(6)
y = np.arange(6) + 0.5
y[-1] = np.nan
values = x[:, None]**3 * (y**3 + 2*y)[None, :]
with assert_raises(ValueError):
make_ndbspl((x, y), values, k=1)
def _get_sample_2d_data(self):
# from test_rgi.py::TestIntepN
x = np.array([.5, 2., 3., 4., 5.5, 6.])
y = np.array([.5, 2., 3., 4., 5.5, 6.])
z = np.array(
[
[1, 2, 1, 2, 1, 1],
[1, 2, 1, 2, 1, 1],
[1, 2, 3, 2, 1, 1],
[1, 2, 2, 2, 1, 1],
[1, 2, 1, 2, 1, 1],
[1, 2, 2, 2, 1, 1],
]
)
return x, y, z
def test_2D_vs_RGI_linear(self):
x, y, z = self._get_sample_2d_data()
bspl = make_ndbspl((x, y), z, k=1)
rgi = RegularGridInterpolator((x, y), z, method='linear')
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
xp_assert_close(bspl(xi), rgi(xi), atol=1e-14)
def test_2D_vs_RGI_cubic(self):
x, y, z = self._get_sample_2d_data()
bspl = make_ndbspl((x, y), z, k=3, solver=ssl.spsolve)
rgi = RegularGridInterpolator((x, y), z, method='cubic_legacy')
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
xp_assert_close(bspl(xi), rgi(xi), atol=1e-14)
@pytest.mark.parametrize('solver', [ssl.gmres, ssl.gcrotmk])
def test_2D_vs_RGI_cubic_iterative(self, solver):
# same as `test_2D_vs_RGI_cubic`, only with an iterative solver.
# Note the need to add an explicit `rtol` solver_arg to achieve the
# target accuracy of 1e-14. (the relation between solver atol/rtol
# and the accuracy of the final result is not direct and needs experimenting)
x, y, z = self._get_sample_2d_data()
bspl = make_ndbspl((x, y), z, k=3, solver=solver, rtol=1e-6)
rgi = RegularGridInterpolator((x, y), z, method='cubic_legacy')
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
xp_assert_close(bspl(xi), rgi(xi), atol=1e-14, rtol=1e-7)
def test_2D_vs_RGI_quintic(self):
x, y, z = self._get_sample_2d_data()
bspl = make_ndbspl((x, y), z, k=5, solver=ssl.spsolve)
rgi = RegularGridInterpolator((x, y), z, method='quintic_legacy')
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
xp_assert_close(bspl(xi), rgi(xi), atol=1e-14)
@pytest.mark.parametrize(
'k, meth', [(1, 'linear'), (3, 'cubic_legacy'), (5, 'quintic_legacy')]
)
def test_3D_random_vs_RGI(self, k, meth):
rndm = np.random.default_rng(123456)
x = np.cumsum(rndm.uniform(size=6))
y = np.cumsum(rndm.uniform(size=7))
z = np.cumsum(rndm.uniform(size=8))
values = rndm.uniform(size=(6, 7, 8))
bspl = make_ndbspl((x, y, z), values, k=k, solver=ssl.spsolve)
rgi = RegularGridInterpolator((x, y, z), values, method=meth)
xi = np.random.uniform(low=0.7, high=2.1, size=(11, 3))
xp_assert_close(bspl(xi), rgi(xi), atol=1e-14)
def test_solver_err_not_converged(self):
x, y, z = self._get_sample_2d_data()
solver_args = {'maxiter': 1}
with assert_raises(ValueError, match='solver'):
make_ndbspl((x, y), z, k=3, **solver_args)
with assert_raises(ValueError, match='solver'):
make_ndbspl((x, y), np.dstack((z, z)), k=3, **solver_args)
| TestMakeND |
python | ApeWorX__ape | src/ape/utils/abi.py | {
"start": 834,
"end": 2333
} | class ____(UnsignedIntegerDecoder):
"""
This class exists because uint256 values when not-padded
always cause issues, even with strict=False.
It can be deleted if https://github.com/ethereum/eth-abi/pull/240
merges.
"""
def read_data_from_stream(self, stream):
"""
Override to pad the value instead of raising an error.
"""
data_byte_size: int = self.data_byte_size # type: ignore
data = stream.read(data_byte_size)
if len(data) != data_byte_size:
# Pad the value (instead of raising InsufficientBytesError).
data = validate_bytes_size(data, 32)
return data
registry.unregister("uint")
registry.register(
BaseEquals("uint"),
UnsignedIntegerEncoder,
_ApeUnsignedIntegerDecoder,
label="uint",
)
def is_array(abi_type: Union[str, ABIType]) -> bool:
"""
Returns ``True`` if the given type is a probably an array.
Args:
abi_type (Union[str, ABIType]): The type to check.
Returns:
bool
"""
return ARRAY_PATTERN.match(str(abi_type)) is not None
def returns_array(abi: MethodABI) -> bool:
"""
Returns ``True`` if the given method ABI likely returns an array.
Args:
abi (MethodABI): An ABI method.
Returns:
bool
"""
return _is_array_return(abi.outputs)
def _is_array_return(outputs: Sequence[ABIType]):
return len(outputs) == 1 and is_array(outputs[0].type)
| _ApeUnsignedIntegerDecoder |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 56923,
"end": 57101
} | class ____(_ConfigBase):
enabled: bool
auto_tenant_creation: bool
auto_tenant_activation: bool
MultiTenancyConfig = _MultiTenancyConfig
@dataclass
| _MultiTenancyConfig |
python | django__django | tests/async/test_async_auth.py | {
"start": 275,
"end": 4677
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.test_user = User.objects.create_user(
"testuser", "test@example.com", "testpw"
)
async def test_aauthenticate(self):
user = await aauthenticate(username="testuser", password="testpw")
self.assertIsInstance(user, User)
self.assertEqual(user.username, self.test_user.username)
user.is_active = False
await user.asave()
self.assertIsNone(await aauthenticate(username="testuser", password="testpw"))
async def test_alogin(self):
request = HttpRequest()
request.session = await self.client.asession()
await alogin(request, self.test_user)
user = await aget_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, self.test_user.username)
async def test_changed_password_invalidates_aget_user(self):
request = HttpRequest()
request.session = await self.client.asession()
await alogin(request, self.test_user)
self.test_user.set_password("new_password")
await self.test_user.asave()
user = await aget_user(request)
self.assertIsNotNone(user)
self.assertTrue(user.is_anonymous)
# Session should be flushed.
self.assertIsNone(request.session.session_key)
async def test_alogin_new_user(self):
request = HttpRequest()
request.session = await self.client.asession()
await alogin(request, self.test_user)
second_user = await User.objects.acreate_user(
"testuser2", "test2@example.com", "testpw2"
)
await alogin(request, second_user)
user = await aget_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, second_user.username)
async def test_alogin_without_user(self):
request = HttpRequest()
request.session = await self.client.asession()
with self.assertRaisesMessage(
AttributeError,
"'NoneType' object has no attribute 'get_session_auth_hash'",
):
await alogin(request, None)
async def test_alogout(self):
await self.client.alogin(username="testuser", password="testpw")
request = HttpRequest()
request.session = await self.client.asession()
await alogout(request)
user = await aget_user(request)
self.assertIsInstance(user, AnonymousUser)
async def test_client_alogout(self):
await self.client.alogin(username="testuser", password="testpw")
request = HttpRequest()
request.session = await self.client.asession()
await self.client.alogout()
user = await aget_user(request)
self.assertIsInstance(user, AnonymousUser)
async def test_change_password(self):
await self.client.alogin(username="testuser", password="testpw")
request = HttpRequest()
request.session = await self.client.asession()
async def auser():
return self.test_user
request.auser = auser
await aupdate_session_auth_hash(request, self.test_user)
user = await aget_user(request)
self.assertIsInstance(user, User)
async def test_invalid_login(self):
self.assertEqual(
await self.client.alogin(username="testuser", password=""), False
)
async def test_client_aforce_login(self):
await self.client.aforce_login(self.test_user)
request = HttpRequest()
request.session = await self.client.asession()
user = await aget_user(request)
self.assertEqual(user.username, self.test_user.username)
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"django.contrib.auth.backends.AllowAllUsersModelBackend",
]
)
async def test_client_aforce_login_backend(self):
self.test_user.is_active = False
await self.test_user.asave()
await self.client.aforce_login(
self.test_user,
backend="django.contrib.auth.backends.AllowAllUsersModelBackend",
)
request = HttpRequest()
request.session = await self.client.asession()
user = await aget_user(request)
self.assertEqual(user.username, self.test_user.username)
| AsyncAuthTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-gcd-sum-of-a-subarray.py | {
"start": 2146,
"end": 4412
} | class ____(object):
def maxGcdSum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
def binary_search_right(left, right, check):
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
# RMQ - Sparse Table
# Template: https://github.com/kamyu104/GoogleCodeJam-Farewell-Rounds/blob/main/Round%20D/genetic_sequences2.py3
# Time: ctor: O(NlogN) * O(fn)
# query: O(fn)
# Space: O(NlogN)
class SparseTable(object):
def __init__(self, arr, fn):
self.fn = fn
self.bit_length = [0]
n = len(arr)
k = n.bit_length()-1 # log2_floor(n)
for i in xrange(k+1):
self.bit_length.extend(i+1 for _ in xrange(min(1<<i, (n+1)-len(self.bit_length))))
self.st = [[0]*n for _ in xrange(k+1)]
self.st[0] = arr[:]
for i in xrange(1, k+1): # Time: O(NlogN) * O(fn)
for j in xrange((n-(1<<i))+1):
self.st[i][j] = fn(self.st[i-1][j], self.st[i-1][j+(1<<(i-1))])
def query(self, L, R): # Time: O(fn)
i = self.bit_length[R-L+1]-1 # log2_floor(R-L+1)
return self.fn(self.st[i][L], self.st[i][R-(1<<i)+1])
prefix = [0]*(len(nums)+1)
for i, x in enumerate(nums):
prefix[i+1] = prefix[i]+x
result = 0
rmq = SparseTable(nums, gcd)
for left, x in enumerate(nums):
right = left
while right < len(nums): # O(logr) times
g = rmq.query(left, right)
right = binary_search_right(right, len(nums)-1, lambda x: rmq.query(left, x) >= g) # Time: O(logn) * O(logr)
if right-left+1 >= k:
result = max(result, (prefix[right+1]-prefix[left])*g)
right += 1
return result
| Solution3_TLE |
python | sqlalchemy__sqlalchemy | test/base/test_except.py | {
"start": 531,
"end": 619
} | class ____(DatabaseError):
pass
# exception with a totally different name...
| OutOfSpec |
python | pypa__warehouse | tests/unit/utils/test_http.py | {
"start": 1523,
"end": 2355
} | class ____:
@pytest.mark.parametrize(
"uri",
[
"https://example.com/",
"http://example.com/",
"https://sub.example.com/path?query#thing",
],
)
def test_valid(self, uri):
assert is_valid_uri(uri)
@pytest.mark.parametrize(
"uri", ["javascript:alert(0)", "UNKNOWN", "ftp://example.com/", ""]
)
def test_invalid(self, uri):
assert not is_valid_uri(uri)
def test_plain_schemes(self):
assert is_valid_uri(
"ftp://example.com/", require_scheme=True, allowed_schemes=[]
)
def test_scheme_not_required(self):
assert is_valid_uri("//example.com", require_scheme=False)
def test_authority_not_required(self):
assert is_valid_uri("http://", require_authority=False)
| TestIsValidURI |
python | pytorch__pytorch | test/functorch/test_eager_transforms.py | {
"start": 176894,
"end": 181717
} | class ____(TestCase):
"""Tests for tolist() method with GradTrackingTensor (functorch tensors)."""
def test_tolist_with_grad(self):
"""Test to see if tolist works inside grad transformation."""
def f(x):
# inside grad, x is a GradTrackingTensor
result = x.tolist()
# tolist should return a python list and not fail
self.assertIsInstance(result, list)
self.assertEqual(result, [1.0, 2.0, 3.0])
return (x**2).sum()
x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
grad_f = torch.func.grad(f)
result = grad_f(x)
self.assertIsInstance(result, torch.Tensor)
# gradients should still be computed correctly
self.assertEqual(result, [2.0, 4.0, 6.0])
def test_tolist_nested_grad(self):
"""Test `tolist` with nested grad transformations."""
def f(x):
def g(y):
# y is gradTrackingTensor(lvl=1)
inner_list = y.tolist()
self.assertIsInstance(inner_list, list)
return (y**2).sum()
# x is a gradTrackingTensor(lvl=0)
outer_list = x.tolist()
self.assertIsInstance(outer_list, list)
grad_g = torch.func.grad(g)
return grad_g(x).sum()
x = torch.tensor([1.0, 2.0, 3.0], requires_grad=True)
grad_f = torch.func.grad(f)
result = grad_f(x)
# should compute second derivate
self.assertIsInstance(result, torch.Tensor)
# grad_f should return the derivate of g(y) which is (2*x).sum
self.assertEqual(
result,
[
2.0,
2.0,
2.0,
],
)
def test_tolist_multidimensional_grad(self):
"""Test tolist with multi-dimensional tensors in grad."""
def f(x):
result = x.tolist()
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertEqual(result, [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
return x.sum()
x = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], requires_grad=True)
grad_f = torch.func.grad(f)
result = grad_f(x)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(
result,
[
[
1.0,
1.0,
1.0,
],
[1.0, 1.0, 1.0],
],
)
def test_tolist_conj_neg_grad(self):
"""Test tolist method with conjugate/negative tensors in grad context."""
def f(x):
# test with the conjugate view
x_conj = x.conj()
result_conj = x_conj.tolist()
self.assertIsInstance(result_conj, list)
return (x * x.conj()).real.sum()
x = torch.tensor([1.0 + 2.0j, 3.0 + 4.0j], requires_grad=True)
grad_f = torch.func.grad(f)
result = grad_f(x)
self.assertIsInstance(result, torch.Tensor)
self.assertEqual(result, [2.0 + 4.0j, 6.0 + 8.0j])
only_for = ("cpu", "cuda")
instantiate_device_type_tests(
TestGradTransform,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestVmapOfGrad,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestJac,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestJvp,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestLinearize,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestVmapJvpInplaceView,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestHessian,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestComposability,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestExamplesCorrectness,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestHigherOrderOperatorInteraction,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestFunctionalize,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestAutogradFunction,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestAutogradFunctionVmapAPI,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestHelpers,
globals(),
only_for=only_for,
)
instantiate_parametrized_tests(
TestMakeFunctional,
)
instantiate_device_type_tests(
TestCompileTransforms,
globals(),
only_for=only_for,
)
instantiate_device_type_tests(
TestGradTrackingTensorToList, globals(), only_for=only_for
)
if __name__ == "__main__":
run_tests()
| TestGradTrackingTensorToList |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 186263,
"end": 187029
} | class ____(NetCDF3Only, CFEncodedBase):
engine: T_NetcdfEngine = "netcdf4"
file_format: T_NetcdfTypes = "NETCDF3_CLASSIC"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode="w", format="NETCDF3_CLASSIC"
) as store:
yield store
def test_encoding_kwarg_vlen_string(self) -> None:
original = Dataset({"x": ["foo", "bar", "baz"]})
kwargs = dict(encoding={"x": {"dtype": str}})
with pytest.raises(ValueError, match=r"encoding dtype=str for vlen"):
with self.roundtrip(original, save_kwargs=kwargs):
pass
@requires_netCDF4
| TestNetCDF3ViaNetCDF4Data |
python | has2k1__plotnine | doc/_renderer.py | {
"start": 2207,
"end": 3663
} | class ____(RenderDocClass):
@cached_property
def _usage(self) -> tuple[str, Literal["signature", "code"]] | None:
"""
Parse the docstring **Usage** block
"""
docstring = self.obj.docstring.value if self.obj.docstring else ""
if m := usage_pattern.search(docstring):
content = dedent(m.group("indented_block")).strip()
return (
content,
"signature" if signature_pattern.match(content) else "code",
)
return None
def __str__(self):
content = super().__str__()
# R
if res := self._usage:
# Render the content of the usage as code and if it looks
# a signature mark it as one.
usage, kind = res
before, classes = Para("Usage"), ["doc-class", "doc-usage"]
if kind == "signature":
before = None
classes.insert(0, "doc-signature")
new = Div(
[before, CodeBlock(usage, Attr(classes=["python"]))],
Attr(classes=classes),
)
content = usage_pattern.sub(f"{new}\n", content)
return content
def render_signature(self):
# A "Usage" that is a function signature voids the original
# signature
if (res := self._usage) and res[1] == "signature":
return None
return super().render_signature()
| _RenderDocClass |
python | pytest-dev__pytest | src/_pytest/mark/structures.py | {
"start": 10641,
"end": 19333
} | class ____:
"""A decorator for applying a mark on test functions and classes.
``MarkDecorators`` are created with ``pytest.mark``::
mark1 = pytest.mark.NAME # Simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a ``MarkDecorator`` is called, it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches the mark to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches the mark to the function,
containing all the arguments already stored internally in the
``MarkDecorator``.
3. When called in any other case, it returns a new ``MarkDecorator``
instance with the original ``MarkDecorator``'s content updated with
the arguments passed to this call.
Note: The rules above prevent a ``MarkDecorator`` from storing only a
single function or class reference as its positional argument with no
additional keyword or positional arguments. You can work around this by
using `with_args()`.
"""
mark: Mark
def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None:
""":meta private:"""
check_ispytest(_ispytest)
self.mark = mark
@property
def name(self) -> str:
"""Alias for mark.name."""
return self.mark.name
@property
def args(self) -> tuple[Any, ...]:
"""Alias for mark.args."""
return self.mark.args
@property
def kwargs(self) -> Mapping[str, Any]:
"""Alias for mark.kwargs."""
return self.mark.kwargs
@property
def markname(self) -> str:
""":meta private:"""
return self.name # for backward-compat (2.4.1 had this attr)
def with_args(self, *args: object, **kwargs: object) -> MarkDecorator:
"""Return a MarkDecorator with extra arguments added.
Unlike calling the MarkDecorator, with_args() can be used even
if the sole argument is a callable/class.
"""
mark = Mark(self.name, args, kwargs, _ispytest=True)
return MarkDecorator(self.mark.combined_with(mark), _ispytest=True)
# Type ignored because the overloads overlap with an incompatible
# return type. Not much we can do about that. Thankfully mypy picks
# the first match so it works out even if we break the rules.
@overload
def __call__(self, arg: Markable) -> Markable: # type: ignore[overload-overlap]
pass
@overload
def __call__(self, *args: object, **kwargs: object) -> MarkDecorator:
pass
def __call__(self, *args: object, **kwargs: object):
"""Call the MarkDecorator."""
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
# For staticmethods/classmethods, the marks are eventually fetched from the
# function object, not the descriptor, so unwrap.
unwrapped_func = func
if isinstance(func, staticmethod | classmethod):
unwrapped_func = func.__func__
if len(args) == 1 and (istestfunc(unwrapped_func) or is_class):
store_mark(unwrapped_func, self.mark)
return func
return self.with_args(*args, **kwargs)
def get_unpacked_marks(
obj: object | type,
*,
consider_mro: bool = True,
) -> list[Mark]:
"""Obtain the unpacked marks that are stored on an object.
If obj is a class and consider_mro is true, return marks applied to
this class and all of its super-classes in MRO order. If consider_mro
is false, only return marks applied directly to this class.
"""
if isinstance(obj, type):
if not consider_mro:
mark_lists = [obj.__dict__.get("pytestmark", [])]
else:
mark_lists = [
x.__dict__.get("pytestmark", []) for x in reversed(obj.__mro__)
]
mark_list = []
for item in mark_lists:
if isinstance(item, list):
mark_list.extend(item)
else:
mark_list.append(item)
else:
mark_attribute = getattr(obj, "pytestmark", [])
if isinstance(mark_attribute, list):
mark_list = mark_attribute
else:
mark_list = [mark_attribute]
return list(normalize_mark_list(mark_list))
def normalize_mark_list(
mark_list: Iterable[Mark | MarkDecorator],
) -> Iterable[Mark]:
"""
Normalize an iterable of Mark or MarkDecorator objects into a list of marks
by retrieving the `mark` attribute on MarkDecorator instances.
:param mark_list: marks to normalize
:returns: A new list of the extracted Mark objects
"""
for mark in mark_list:
mark_obj = getattr(mark, "mark", mark)
if not isinstance(mark_obj, Mark):
raise TypeError(f"got {mark_obj!r} instead of Mark")
yield mark_obj
def store_mark(obj, mark: Mark) -> None:
"""Store a Mark on an object.
This is used to implement the Mark declarations/decorators correctly.
"""
assert isinstance(mark, Mark), mark
from ..fixtures import getfixturemarker
if getfixturemarker(obj) is not None:
fail(
"Marks cannot be applied to fixtures.\n"
"See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function"
)
# Always reassign name to avoid updating pytestmark in a reference that
# was only borrowed.
obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark]
# Typing for builtin pytest marks. This is cheating; it gives builtin marks
# special privilege, and breaks modularity. But practicality beats purity...
if TYPE_CHECKING:
class _SkipMarkDecorator(MarkDecorator):
@overload # type: ignore[override,no-overload-impl]
def __call__(self, arg: Markable) -> Markable: ...
@overload
def __call__(self, reason: str = ...) -> MarkDecorator: ...
class _SkipifMarkDecorator(MarkDecorator):
def __call__( # type: ignore[override]
self,
condition: str | bool = ...,
*conditions: str | bool,
reason: str = ...,
) -> MarkDecorator: ...
class _XfailMarkDecorator(MarkDecorator):
@overload # type: ignore[override,no-overload-impl]
def __call__(self, arg: Markable) -> Markable: ...
@overload
def __call__(
self,
condition: str | bool = False,
*conditions: str | bool,
reason: str = ...,
run: bool = ...,
raises: None
| type[BaseException]
| tuple[type[BaseException], ...]
| AbstractRaises[BaseException] = ...,
strict: bool = ...,
) -> MarkDecorator: ...
class _ParametrizeMarkDecorator(MarkDecorator):
@overload # type: ignore[override,no-overload-impl]
def __call__(
self,
argnames: str | Sequence[str],
argvalues: Collection[ParameterSet | Sequence[object] | object],
*,
indirect: bool | Sequence[str] = ...,
ids: Iterable[None | str | float | int | bool]
| Callable[[Any], object | None]
| None = ...,
scope: _ScopeName | None = ...,
) -> MarkDecorator: ...
@overload
@deprecated(
"Passing a non-Collection iterable to the 'argvalues' parameter of @pytest.mark.parametrize is deprecated. "
"Convert argvalues to a list or tuple.",
)
def __call__(
self,
argnames: str | Sequence[str],
argvalues: Iterable[ParameterSet | Sequence[object] | object],
*,
indirect: bool | Sequence[str] = ...,
ids: Iterable[None | str | float | int | bool]
| Callable[[Any], object | None]
| None = ...,
scope: _ScopeName | None = ...,
) -> MarkDecorator: ...
class _UsefixturesMarkDecorator(MarkDecorator):
def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override]
...
class _FilterwarningsMarkDecorator(MarkDecorator):
def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override]
...
@final
| MarkDecorator |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-feedly-rss/llama_index/readers/feedly_rss/base.py | {
"start": 164,
"end": 2141
} | class ____(BaseReader):
"""
Feedly Rss Reader.
Get entries from Feedly Rss Reader
Uses Feedly Official python-api-client: https://github.com/feedly/python-api-client
"""
def __init__(self, bearer_token: str) -> None:
"""Initialize with parameters."""
super().__init__()
self.bearer_token = bearer_token
def setup_auth(
self, directory: Path = Path.home() / ".config/feedly", overwrite: bool = False
):
"""
Modified from python-api-client/feedly/api_client/utils.py
Instead promopting for user input, we take the token as an argument.
"""
directory.mkdir(exist_ok=True, parents=True)
auth_file = directory / "access.token"
if not auth_file.exists() or overwrite:
auth = self.bearer_token
auth_file.write_text(auth.strip())
def load_data(self, category_name, max_count=100):
"""Get the entries from a feedly category."""
from feedly.api_client.session import FeedlySession
from feedly.api_client.stream import StreamOptions
self.setup_auth(overwrite=True)
sess = FeedlySession()
category = sess.user.user_categories.get(category_name)
documents = []
for article in category.stream_contents(
options=StreamOptions(max_count=max_count)
):
# doc for available fields: https://developer.feedly.com/v3/streams/
entry = {
"title": article["title"],
"published": article["published"],
"summary": article["summary"],
"author": article["author"],
"content": article["content"],
"keywords": article["keywords"],
"commonTopics": article["commonTopics"],
}
text = json.dumps(entry, ensure_ascii=False)
documents.append(Document(text=text))
return documents
| FeedlyRssReader |
python | pandas-dev__pandas | pandas/tests/plotting/test_hist_method.py | {
"start": 24013,
"end": 34581
} | class ____:
def test_grouped_hist_legacy(self):
rs = np.random.default_rng(10)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
df["D"] = ["X"] * 10
axes = _grouped_hist(df.A, by=df.C)
_check_axes_shape(axes, axes_num=4, layout=(2, 2))
def test_grouped_hist_legacy_axes_shape_no_col(self):
rs = np.random.default_rng(10)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
df["D"] = ["X"] * 10
axes = df.hist(by=df.C)
_check_axes_shape(axes, axes_num=4, layout=(2, 2))
def test_grouped_hist_legacy_single_key(self):
rs = np.random.default_rng(2)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
df["D"] = ["X"] * 10
# group by a key with single value
axes = df.hist(by="D", rot=30)
_check_axes_shape(axes, axes_num=1, layout=(1, 1))
_check_ticks_props(axes, xrot=30)
def test_grouped_hist_legacy_grouped_hist_kwargs(self):
rs = np.random.default_rng(2)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
# make sure kwargs to hist are handled
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = _grouped_hist(
df.A,
by=df.C,
cumulative=True,
bins=4,
xlabelsize=xf,
xrot=xrot,
ylabelsize=yf,
yrot=yrot,
density=True,
)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
rects = [
x for x in ax.get_children() if isinstance(x, mpl.patches.Rectangle)
]
height = rects[-1].get_height()
tm.assert_almost_equal(height, 1.0)
_check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
def test_grouped_hist_legacy_grouped_hist(self):
rs = np.random.default_rng(2)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
df["D"] = ["X"] * 10
axes = _grouped_hist(df.A, by=df.C, log=True)
# scale of y must be 'log'
_check_ax_scales(axes, yaxis="log")
def test_grouped_hist_legacy_external_err(self):
rs = np.random.default_rng(2)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
df["D"] = ["X"] * 10
# propagate attr exception from matplotlib.Axes.hist
with tm.external_error_raised(AttributeError):
_grouped_hist(df.A, by=df.C, foo="bar")
def test_grouped_hist_legacy_figsize_err(self):
rs = np.random.default_rng(2)
df = DataFrame(rs.standard_normal((10, 1)), columns=["A"])
df["B"] = to_datetime(
rs.integers(
812419200000000000,
819331200000000000,
size=10,
dtype=np.int64,
)
)
df["C"] = rs.integers(0, 4, 10)
df["D"] = ["X"] * 10
msg = "Specify figure size by tuple instead"
with pytest.raises(ValueError, match=msg):
df.hist(by="C", figsize="default")
def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.default_rng(2).normal(166, 20, size=n))
height = Series(np.random.default_rng(2).normal(60, 10, size=n))
gender_int = np.random.default_rng(2).choice([0, 1], size=n)
df_int = DataFrame({"height": height, "weight": weight, "gender": gender_int})
gb = df_int.groupby("gender")
axes = gb.hist()
assert len(axes) == 2
assert len(mpl.pyplot.get_fignums()) == 2
@pytest.mark.slow
@pytest.mark.parametrize(
"msg, plot_col, by_col, layout",
[
[
"Layout of 1x1 must be larger than required size 2",
"weight",
"gender",
(1, 1),
],
[
"Layout of 1x3 must be larger than required size 4",
"height",
"category",
(1, 3),
],
[
"At least one dimension of layout must be positive",
"height",
"category",
(-1, -1),
],
],
)
def test_grouped_hist_layout_error(self, hist_df, msg, plot_col, by_col, layout):
df = hist_df
with pytest.raises(ValueError, match=msg):
df.hist(column=plot_col, by=getattr(df, by_col), layout=layout)
@pytest.mark.slow
def test_grouped_hist_layout_warning(self, hist_df):
df = hist_df
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(
df.hist, column="height", by=df.gender, layout=(2, 1)
)
_check_axes_shape(axes, axes_num=2, layout=(2, 1))
@pytest.mark.slow
@pytest.mark.parametrize(
"layout, check_layout, figsize",
[[(4, 1), (4, 1), None], [(-1, 1), (4, 1), None], [(4, 2), (4, 2), (12, 8)]],
)
def test_grouped_hist_layout_figsize(self, hist_df, layout, check_layout, figsize):
df = hist_df
axes = df.hist(column="height", by=df.category, layout=layout, figsize=figsize)
_check_axes_shape(axes, axes_num=4, layout=check_layout, figsize=figsize)
@pytest.mark.slow
@pytest.mark.parametrize("kwargs", [{}, {"column": "height", "layout": (2, 2)}])
def test_grouped_hist_layout_by_warning(self, hist_df, kwargs):
df = hist_df
# GH 6769
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(df.hist, by="classroom", **kwargs)
_check_axes_shape(axes, axes_num=3, layout=(2, 2))
@pytest.mark.slow
@pytest.mark.parametrize(
"kwargs, axes_num, layout",
[
[{"by": "gender", "layout": (3, 5)}, 2, (3, 5)],
[{"column": ["height", "weight", "category"]}, 3, (2, 2)],
],
)
def test_grouped_hist_layout_axes(self, hist_df, kwargs, axes_num, layout):
df = hist_df
axes = df.hist(**kwargs)
_check_axes_shape(axes, axes_num=axes_num, layout=layout)
def test_grouped_hist_multiple_axes(self, hist_df):
# GH 6970, GH 7069
df = hist_df
fig, axes = mpl.pyplot.subplots(2, 3)
returned = df.hist(column=["height", "weight", "category"], ax=axes[0])
_check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
def test_grouped_hist_multiple_axes_no_cols(self, hist_df):
# GH 6970, GH 7069
df = hist_df
fig, axes = mpl.pyplot.subplots(2, 3)
returned = df.hist(by="classroom", ax=axes[1])
_check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
def test_grouped_hist_multiple_axes_error(self, hist_df):
# GH 6970, GH 7069
df = hist_df
fig, axes = mpl.pyplot.subplots(2, 3)
# pass different number of axes from required
msg = "The number of passed axes must be 1, the same as the output plot"
with pytest.raises(ValueError, match=msg):
axes = df.hist(column="height", ax=axes)
def test_axis_share_x(self, hist_df):
df = hist_df
# GH4089
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True)
# share x
assert get_x_axis(ax1).joined(ax1, ax2)
assert get_x_axis(ax2).joined(ax1, ax2)
# don't share y
assert not get_y_axis(ax1).joined(ax1, ax2)
assert not get_y_axis(ax2).joined(ax1, ax2)
def test_axis_share_y(self, hist_df):
df = hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True)
# share y
assert get_y_axis(ax1).joined(ax1, ax2)
assert get_y_axis(ax2).joined(ax1, ax2)
# don't share x
assert not get_x_axis(ax1).joined(ax1, ax2)
assert not get_x_axis(ax2).joined(ax1, ax2)
def test_axis_share_xy(self, hist_df):
df = hist_df
ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True)
# share both x and y
assert get_x_axis(ax1).joined(ax1, ax2)
assert get_x_axis(ax2).joined(ax1, ax2)
assert get_y_axis(ax1).joined(ax1, ax2)
assert get_y_axis(ax2).joined(ax1, ax2)
@pytest.mark.parametrize(
"histtype, expected",
[
("bar", True),
("barstacked", True),
("step", False),
("stepfilled", True),
],
)
def test_histtype_argument(self, histtype, expected):
# GH23992 Verify functioning of histtype argument
df = DataFrame(
np.random.default_rng(2).integers(1, 10, size=(10, 2)), columns=["a", "b"]
)
ax = df.hist(by="a", histtype=histtype)
_check_patches_all_filled(ax, filled=expected)
| TestDataFrameGroupByPlots |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_filters.py | {
"start": 2783,
"end": 4962
} | class ____(OrganizationFilterTestCase):
def get_filterset_for_user(self, user, organization, data=None, **kwargs):
self.client.force_login(user)
url = reverse("organization_list")
resp = self.client.get(url, data=data)
return resp.context_data.get("filter")
def test_unfiltered_queryset(self, user, organization):
"""No active filters returns full queryset."""
filter = self.get_filterset_for_user(user, organization)
assertQuerySetEqual(
filter.qs,
[organization],
transform=lambda o: o,
ordered=False,
)
def test_filtered_queryset_choice(self, user, organization):
"""Valid project choice returns expected results."""
filter = self.get_filterset_for_user(
user,
organization,
data={"slug": organization.slug},
)
assert filter.is_valid()
assertQuerySetEqual(
filter.qs,
[organization],
transform=lambda o: o,
ordered=False,
)
def test_filtered_queryset_invalid_choice(self, user, organization):
"""Invalid project choice returns the original queryset."""
wrong_organization = fixture.get(
Organization,
owners=[],
projects=[],
teams=[],
)
filter = self.get_filterset_for_user(
user,
organization,
data={"slug": wrong_organization.slug},
)
assert not filter.is_valid()
# Validation will fail, but the full queryset is still returned. This is
# handled differently at the view level however.
assertQuerySetEqual(
filter.qs,
[organization],
transform=lambda o: o,
ordered=False,
)
def test_organization_filter_choices(self, user, organization):
filter = self.get_filterset_for_user(
user,
organization,
)
assert list(dict(filter.filters["slug"].field.choices).keys()) == [
"",
organization.slug,
]
| TestOrganizationFilterSet |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column.py | {
"start": 114034,
"end": 116545
} | class ____(
_CategoricalColumn,
collections.namedtuple('_CrossedColumn',
['keys', 'hash_bucket_size', 'hash_key'])):
"""See `crossed_column`."""
@property
def name(self):
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, _FeatureColumn):
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def _parse_example_spec(self):
config = {}
for key in self.keys:
if isinstance(key, _FeatureColumn):
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
def _transform_feature(self, inputs):
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, _CategoricalColumn):
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `_CrossedColumn`.
Returns:
A list of strings or `_CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, _CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
| _CrossedColumn |
python | tensorflow__tensorflow | tensorflow/compiler/tests/adagrad_test.py | {
"start": 1027,
"end": 5523
} | class ____(xla_test.XLATestCase):
def testBasic(self):
for dtype in self.float_types | self.complex_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
def testTensorLearningRate(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
def testSharing(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEqual(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEqual(slot1.get_shape(), var1.get_shape())
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
if __name__ == "__main__":
test.main()
| AdagradOptimizerTest |
python | python-pillow__Pillow | src/PIL/PpmImagePlugin.py | {
"start": 9790,
"end": 12391
} | class ____(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
data = bytearray()
maxval = self.args[-1]
in_byte_count = 1 if maxval < 256 else 2
out_byte_count = 4 if self.mode == "I" else 1
out_max = 65535 if self.mode == "I" else 255
bands = Image.getmodebands(self.mode)
dest_length = self.state.xsize * self.state.ysize * bands * out_byte_count
while len(data) < dest_length:
pixels = self.fd.read(in_byte_count * bands)
if len(pixels) < in_byte_count * bands:
# eof
break
for b in range(bands):
value = (
pixels[b] if in_byte_count == 1 else i16(pixels, b * in_byte_count)
)
value = min(out_max, round(value / maxval * out_max))
data += o32(value) if self.mode == "I" else o8(value)
rawmode = "I;32" if self.mode == "I" else self.mode
self.set_as_raw(bytes(data), rawmode)
return -1, 0
#
# --------------------------------------------------------------------
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode == "1":
rawmode, head = "1;I", b"P4"
elif im.mode == "L":
rawmode, head = "L", b"P5"
elif im.mode in ("I", "I;16"):
rawmode, head = "I;16B", b"P5"
elif im.mode in ("RGB", "RGBA"):
rawmode, head = "RGB", b"P6"
elif im.mode == "F":
rawmode, head = "F;32F", b"Pf"
else:
msg = f"cannot write mode {im.mode} as PPM"
raise OSError(msg)
fp.write(head + b"\n%d %d\n" % im.size)
if head == b"P6":
fp.write(b"255\n")
elif head == b"P5":
if rawmode == "L":
fp.write(b"255\n")
else:
fp.write(b"65535\n")
elif head == b"Pf":
fp.write(b"-1.0\n")
row_order = -1 if im.mode == "F" else 1
ImageFile._save(
im, fp, [ImageFile._Tile("raw", (0, 0) + im.size, 0, (rawmode, 0, row_order))]
)
#
# --------------------------------------------------------------------
Image.register_open(PpmImageFile.format, PpmImageFile, _accept)
Image.register_save(PpmImageFile.format, _save)
Image.register_decoder("ppm", PpmDecoder)
Image.register_decoder("ppm_plain", PpmPlainDecoder)
Image.register_extensions(PpmImageFile.format, [".pbm", ".pgm", ".ppm", ".pnm", ".pfm"])
Image.register_mime(PpmImageFile.format, "image/x-portable-anymap")
| PpmDecoder |
python | patrick-kidger__equinox | equinox/internal/_omega.py | {
"start": 133,
"end": 211
} | class ____ω(type):
def __rpow__(cls, value):
return cls(value)
| _Meta |
python | walkccc__LeetCode | solutions/3255. Find the Power of K-Size Subarrays II/3255.py | {
"start": 0,
"end": 350
} | class ____:
# Same as 3254. Find the Power of K-Size Subarrays I
def resultsArray(self, nums: list[int], k: int) -> list[int]:
ans = []
start = 0
for i, num in enumerate(nums):
if i > 0 and num != nums[i - 1] + 1:
start = i
if i >= k - 1:
ans.append(num if i - start + 1 >= k else -1)
return ans
| Solution |
python | mlflow__mlflow | mlflow/legacy_databricks_cli/configure/provider.py | {
"start": 7224,
"end": 7948
} | class ____(DatabricksConfigProvider):
"""Look for credentials in a chain of default locations."""
def __init__(self):
# The order of providers here will be used to determine
# the precedence order for the config provider used in `get_config`
self._providers = (
SparkTaskContextConfigProvider(),
EnvironmentVariableConfigProvider(),
ProfileConfigProvider(),
DatabricksModelServingConfigProvider(),
)
def get_config(self):
for provider in self._providers:
config = provider.get_config()
if config is not None and config.is_valid:
return config
return None
| DefaultConfigProvider |
python | doocs__leetcode | solution/1700-1799/1739.Building Boxes/Solution.py | {
"start": 0,
"end": 332
} | class ____:
def minimumBoxes(self, n: int) -> int:
s, k = 0, 1
while s + k * (k + 1) // 2 <= n:
s += k * (k + 1) // 2
k += 1
k -= 1
ans = k * (k + 1) // 2
k = 1
while s < n:
ans += 1
s += k
k += 1
return ans
| Solution |
python | python-poetry__poetry | src/poetry/packages/direct_origin.py | {
"start": 1454,
"end": 3572
} | class ____:
def __init__(self, artifact_cache: ArtifactCache) -> None:
self._artifact_cache = artifact_cache
config = Config.create()
self._max_retries = config.get("requests.max-retries", 0)
self._authenticator = get_default_authenticator()
@classmethod
def get_package_from_file(cls, file_path: Path) -> Package:
try:
package = PackageInfo.from_path(path=file_path).to_package(
root_dir=file_path
)
except PackageInfoError:
raise RuntimeError(
f"Unable to determine package info from path: {file_path}"
)
return package
@classmethod
def get_package_from_directory(cls, directory: Path) -> Package:
return PackageInfo.from_directory(path=directory).to_package(root_dir=directory)
def _download_file(self, url: str, dest: Path) -> None:
download_file(
url, dest, session=self._authenticator, max_retries=self._max_retries
)
def get_package_from_url(self, url: str) -> Package:
link = Link(url)
artifact = self._artifact_cache.get_cached_archive_for_link(
link, strict=True, download_func=self._download_file
)
package = self.get_package_from_file(artifact)
package.files = [
{"file": link.filename, "hash": "sha256:" + get_file_hash(artifact)}
]
package._source_type = "url"
package._source_url = url
return package
@staticmethod
def get_package_from_vcs(
vcs: str,
url: str,
branch: str | None = None,
tag: str | None = None,
rev: str | None = None,
subdirectory: str | None = None,
source_root: Path | None = None,
) -> Package:
if vcs != "git":
raise ValueError(f"Unsupported VCS dependency {vcs}")
return _get_package_from_git(
url=url,
branch=branch,
tag=tag,
rev=rev,
subdirectory=subdirectory,
source_root=source_root,
)
| DirectOrigin |
python | ray-project__ray | rllib/core/distribution/torch/torch_distribution.py | {
"start": 19196,
"end": 25070
} | class ____(Distribution):
"""Action distribution that operates on multiple, possibly nested actions."""
def __init__(
self,
child_distribution_struct: Union[Tuple, List, Dict],
):
"""Initializes a TorchMultiDistribution object.
Args:
child_distribution_struct: A complex struct that contains the child
distribution instances that make up this multi-distribution.
"""
super().__init__()
self._original_struct = child_distribution_struct
self._flat_child_distributions = tree.flatten(child_distribution_struct)
@override(Distribution)
def rsample(
self,
*,
sample_shape: Tuple[int, ...] = None,
**kwargs,
) -> Union[TensorType, Tuple[TensorType, TensorType]]:
rsamples = []
for dist in self._flat_child_distributions:
rsample = dist.rsample(sample_shape=sample_shape, **kwargs)
rsamples.append(rsample)
rsamples = tree.unflatten_as(self._original_struct, rsamples)
return rsamples
@override(Distribution)
def logp(self, value: TensorType) -> TensorType:
# Different places in RLlib use this method with different inputs.
# We therefore need to handle a flattened and concatenated input, as well as
# a nested one.
# TODO(Artur): Deprecate tensor inputs, only allow nested structures.
if isinstance(value, torch.Tensor):
split_indices = []
for dist in self._flat_child_distributions:
if isinstance(dist, TorchCategorical):
split_indices.append(1)
elif isinstance(dist, TorchMultiCategorical):
split_indices.append(len(dist._cats))
else:
sample = dist.sample()
# Cover Box(shape=()) case.
if len(sample.shape) == 1:
split_indices.append(1)
else:
split_indices.append(sample.size()[1])
split_value = list(torch.split(value, split_indices, dim=1))
else:
split_value = tree.flatten(value)
def map_(val, dist):
# Remove extra dimension if present.
if (
isinstance(dist, TorchCategorical)
and val.shape[-1] == 1
and len(val.shape) > 1
):
val = torch.squeeze(val, dim=-1)
return dist.logp(val)
flat_logps = tree.map_structure(
map_, split_value, self._flat_child_distributions
)
return sum(flat_logps)
@override(Distribution)
def kl(self, other: Distribution) -> TensorType:
kl_list = [
d.kl(o)
for d, o in zip(
self._flat_child_distributions, other._flat_child_distributions
)
]
return sum(kl_list)
@override(Distribution)
def entropy(self):
entropy_list = [d.entropy() for d in self._flat_child_distributions]
return sum(entropy_list)
@override(Distribution)
def sample(self):
child_distributions_struct = tree.unflatten_as(
self._original_struct, self._flat_child_distributions
)
return tree.map_structure(lambda s: s.sample(), child_distributions_struct)
@staticmethod
@override(Distribution)
def required_input_dim(
space: gym.Space, input_lens: List[int], as_list: bool = False, **kwargs
) -> int:
if as_list:
return input_lens
else:
return sum(input_lens)
@classmethod
@override(Distribution)
def from_logits(
cls,
logits: "torch.Tensor",
child_distribution_cls_struct: Union[Dict, Iterable],
input_lens: Union[Dict, List[int]],
**kwargs,
) -> "TorchMultiDistribution":
"""Creates this Distribution from logits (and additional arguments).
If you wish to create this distribution from logits only, please refer to
`Distribution.get_partial_dist_cls()`.
Args:
logits: The tensor containing logits to be separated by `input_lens`.
child_distribution_cls_struct: A struct of Distribution classes that can
be instantiated from the given logits.
child_distribution_cls_struct: A struct of Distribution classes that can
be instantiated from the given logits.
input_lens: A list or dict of integers that indicate the length of each
logit. If this is given as a dict, the structure should match the
structure of child_distribution_cls_struct.
**kwargs: Forward compatibility kwargs.
Returns:
A TorchMultiDistribution object.
"""
logit_lens = tree.flatten(input_lens)
child_distribution_cls_list = tree.flatten(child_distribution_cls_struct)
split_logits = torch.split(logits, logit_lens, dim=-1)
child_distribution_list = tree.map_structure(
lambda dist, input_: dist.from_logits(input_),
child_distribution_cls_list,
list(split_logits),
)
child_distribution_struct = tree.unflatten_as(
child_distribution_cls_struct, child_distribution_list
)
return cls(
child_distribution_struct=child_distribution_struct,
)
def to_deterministic(self) -> "TorchMultiDistribution":
flat_deterministic_dists = [
dist.to_deterministic() for dist in self._flat_child_distributions
]
deterministic_dists = tree.unflatten_as(
self._original_struct, flat_deterministic_dists
)
return TorchMultiDistribution(deterministic_dists)
| TorchMultiDistribution |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0041_index-repo-field.py | {
"start": 190,
"end": 782
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0040_increase_path_max_length"),
]
operations = [
migrations.AlterField(
model_name="project",
name="repo",
field=models.CharField(
db_index=True,
help_text="Hosted documentation repository URL",
max_length=255,
validators=[readthedocs.projects.validators.RepositoryURLValidator()],
verbose_name="Repository URL",
),
),
]
| Migration |
python | numpy__numpy | numpy/lib/_function_base_impl.py | {
"start": 72395,
"end": 193189
} | class ____:
"""
vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None,
cache=False, signature=None)
Returns an object that acts like pyfunc, but takes arrays as input.
Define a vectorized function which takes a nested sequence of objects or
numpy arrays as inputs and returns a single numpy array or a tuple of numpy
arrays. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable, optional
A python function or method.
Can be omitted to produce a decorator with keyword arguments.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
cache : bool, optional
If neither `otypes` nor `signature` are provided, and `cache` is ``True``, then
cache the number of outputs.
signature : string, optional
Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for
vectorized matrix-vector multiplication. If provided, ``pyfunc`` will
be called with (and expected to return) arrays with shapes given by the
size of corresponding core dimensions. By default, ``pyfunc`` is
assumed to take scalars as input and output.
Returns
-------
out : callable
A vectorized function if ``pyfunc`` was provided,
a decorator otherwise.
See Also
--------
frompyfunc : Takes an arbitrary Python function and returns a ufunc
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If neither `otypes` nor `signature` are specified, then a call to the function with
the first argument will be used to determine the number of outputs. The results of
this call will be cached if `cache` is `True` to prevent calling the function
twice. However, to implement the cache, the original function must be wrapped
which will slow down subsequent calls, so only do this if your function is
expensive.
The new keyword argument interface and `excluded` argument support
further degrades performance.
References
----------
.. [1] :doc:`/reference/c-api/generalized-ufuncs`
Examples
--------
>>> import numpy as np
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified:
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified:
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<class 'numpy.int64'>
>>> vfunc = np.vectorize(myfunc, otypes=[float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<class 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
Here, we exclude the zeroth argument from vectorization whether it is
passed by position or keyword.
>>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'})
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
The `signature` argument allows for vectorizing functions that act on
non-scalar arrays of fixed length. For example, you can use it for a
vectorized calculation of Pearson correlation coefficient and its p-value:
>>> import scipy.stats
>>> pearsonr = np.vectorize(scipy.stats.pearsonr,
... signature='(n),(n)->(),()')
>>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
(array([ 1., -1.]), array([ 0., 0.]))
Or for a vectorized convolution:
>>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
>>> convolve(np.eye(4), [1, 2, 1])
array([[1., 2., 1., 0., 0., 0.],
[0., 1., 2., 1., 0., 0.],
[0., 0., 1., 2., 1., 0.],
[0., 0., 0., 1., 2., 1.]])
Decorator syntax is supported. The decorator can be called as
a function to provide keyword arguments:
>>> @np.vectorize
... def identity(x):
... return x
...
>>> identity([0, 1, 2])
array([0, 1, 2])
>>> @np.vectorize(otypes=[float])
... def as_float(x):
... return x
...
>>> as_float([0, 1, 2])
array([0., 1., 2.])
"""
def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None,
excluded=None, cache=False, signature=None):
if (pyfunc != np._NoValue) and (not callable(pyfunc)):
# Splitting the error message to keep
# the length below 79 characters.
part1 = "When used as a decorator, "
part2 = "only accepts keyword arguments."
raise TypeError(part1 + part2)
self.pyfunc = pyfunc
self.cache = cache
self.signature = signature
if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'):
self.__name__ = pyfunc.__name__
self._ufunc = {} # Caching to improve default performance
self._doc = None
self.__doc__ = doc
if doc is None and hasattr(pyfunc, '__doc__'):
self.__doc__ = pyfunc.__doc__
else:
self._doc = doc
if isinstance(otypes, str):
for char in otypes:
if char not in typecodes['All']:
raise ValueError(f"Invalid otype specified: {char}")
elif iterable(otypes):
otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes]
elif otypes is not None:
raise ValueError("Invalid otype specification")
self.otypes = otypes
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if signature is not None:
self._in_and_out_core_dims = _parse_gufunc_signature(signature)
else:
self._in_and_out_core_dims = None
def _init_stage_2(self, pyfunc, *args, **kwargs):
self.__name__ = pyfunc.__name__
self.pyfunc = pyfunc
if self._doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = self._doc
def _call_as_normal(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(zip(names, vargs[len(inds):]))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def __call__(self, *args, **kwargs):
if self.pyfunc is np._NoValue:
self._init_stage_2(*args, **kwargs)
return self
return self._call_as_normal(*args, **kwargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError('args can not be empty')
if self.otypes is not None:
otypes = self.otypes
# self._ufunc is a dictionary whose keys are the number of
# arguments (i.e. len(args)) and whose values are ufuncs created
# by frompyfunc. len(args) can be different for different calls if
# self.pyfunc has parameters with default values. We only use the
# cache when func is self.pyfunc, which occurs when the call uses
# only positional arguments and no arguments are excluded.
nin = len(args)
nout = len(self.otypes)
if func is not self.pyfunc or nin not in self._ufunc:
ufunc = frompyfunc(func, nin, nout)
else:
ufunc = None # We'll get it from self._ufunc
if func is self.pyfunc:
ufunc = self._ufunc.setdefault(nin, ufunc)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
args = [asarray(a) for a in args]
if builtins.any(arg.size == 0 for arg in args):
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
inputs = [arg.flat[0] for arg in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if self.signature is not None:
res = self._vectorize_call_with_signature(func, args)
elif not args:
res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# gh-29196: `dtype=object` should eventually be removed
args = [asanyarray(a, dtype=object) for a in args]
outputs = ufunc(*args, out=...)
if ufunc.nout == 1:
res = asanyarray(outputs, dtype=otypes[0])
else:
res = tuple(asanyarray(x, dtype=t)
for x, t in zip(outputs, otypes))
return res
def _vectorize_call_with_signature(self, func, args):
"""Vectorized call over positional arguments with a signature."""
input_core_dims, output_core_dims = self._in_and_out_core_dims
if len(args) != len(input_core_dims):
raise TypeError('wrong number of positional arguments: '
'expected %r, got %r'
% (len(input_core_dims), len(args)))
args = tuple(asanyarray(arg) for arg in args)
broadcast_shape, dim_sizes = _parse_input_dimensions(
args, input_core_dims)
input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,
input_core_dims)
args = [np.broadcast_to(arg, shape, subok=True)
for arg, shape in zip(args, input_shapes)]
outputs = None
otypes = self.otypes
nout = len(output_core_dims)
for index in np.ndindex(*broadcast_shape):
results = func(*(arg[index] for arg in args))
n_results = len(results) if isinstance(results, tuple) else 1
if nout != n_results:
raise ValueError(
'wrong number of outputs from pyfunc: expected %r, got %r'
% (nout, n_results))
if nout == 1:
results = (results,)
if outputs is None:
for result, core_dims in zip(results, output_core_dims):
_update_dim_sizes(dim_sizes, result, core_dims)
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes, results)
for output, result in zip(outputs, results):
output[index] = result
if outputs is None:
# did not call the function even once
if otypes is None:
raise ValueError('cannot call `vectorize` on size 0 inputs '
'unless `otypes` is set')
if builtins.any(dim not in dim_sizes
for dims in output_core_dims
for dim in dims):
raise ValueError('cannot call `vectorize` with a signature '
'including new output dimensions on size 0 '
'inputs')
outputs = _create_arrays(broadcast_shape, dim_sizes,
output_core_dims, otypes)
return outputs[0] if nout == 1 else outputs
def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,
fweights=None, aweights=None, *, dtype=None):
return (m, y, fweights, aweights)
@array_function_dispatch(_cov_dispatcher)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None, *, dtype=None):
"""
Estimate a covariance matrix, given data and weights.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ..., x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
See the notes for an outline of the algorithm.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same form
as that of `m`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N - 1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
If not ``None`` the default value implied by `bias` is overridden.
Note that ``ddof=1`` will return the unbiased estimate, even if both
`fweights` and `aweights` are specified, and ``ddof=0`` will return
the simple average. See the notes for the details. The default value
is ``None``.
fweights : array_like, int, optional
1-D array of integer frequency weights; the number of times each
observation vector should be repeated.
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
dtype : data-type, optional
Data-type of the result. By default, the return data-type will have
at least `numpy.float64` precision.
.. versionadded:: 1.20
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Notes
-----
Assume that the observations are in the columns of the observation
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
>>> m = np.arange(10, dtype=np.float64)
>>> f = np.arange(10) * 2
>>> a = np.arange(10) ** 2.
>>> ddof = 1
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
>>> m -= np.sum(m * w, axis=None, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``
as it should.
Examples
--------
>>> import numpy as np
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.stack((x, y), axis=0)
>>> np.cov(X)
array([[11.71 , -4.286 ], # may vary
[-4.286 , 2.144133]])
>>> np.cov(x, y)
array([[11.71 , -4.286 ], # may vary
[-4.286 , 2.144133]])
>>> np.cov(x)
array(11.71)
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError(
"ddof must be integer")
# Handles complex arrays too
m = np.asarray(m)
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions")
if y is not None:
y = np.asarray(y)
if y.ndim > 2:
raise ValueError("y has more than 2 dimensions")
if dtype is None:
if y is None:
dtype = np.result_type(m, np.float64)
else:
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if not rowvar and m.ndim != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
if y is not None:
y = array(y, copy=None, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
# Get the product of frequencies and weights
w = None
if fweights is not None:
fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
if fweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional fweights")
if fweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and fweights")
if any(fweights < 0):
raise ValueError(
"fweights cannot be negative")
w = fweights
if aweights is not None:
aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
if aweights.shape[0] != X.shape[1]:
raise RuntimeError(
"incompatible numbers of samples and aweights")
if any(aweights < 0):
raise ValueError(
"aweights cannot be negative")
if w is None:
w = aweights
else:
w *= aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
# Determine the normalization
if w is None:
fact = X.shape[1] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof * sum(w * aweights) / w_sum
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
RuntimeWarning, stacklevel=2)
fact = 0.0
X -= avg[:, None]
if w is None:
X_T = X.T
else:
X_T = (X * w).T
c = dot(X, X_T.conj())
c *= np.true_divide(1, fact)
return c.squeeze()
def _corrcoef_dispatcher(x, y=None, rowvar=None, *,
dtype=None):
return (x, y)
@array_function_dispatch(_corrcoef_dispatcher)
def corrcoef(x, y=None, rowvar=True, *,
dtype=None):
"""
Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
.. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } }
The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
dtype : data-type, optional
Data-type of the result. By default, the return data-type will have
at least `numpy.float64` precision.
.. versionadded:: 1.20
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
Notes
-----
Due to floating point rounding the resulting array may not be Hermitian,
the diagonal elements may not be 1, and the elements may not satisfy the
inequality abs(a) <= 1. The real and imaginary parts are clipped to the
interval [-1, 1] in an attempt to improve on that situation but is not
much help in the complex case.
Examples
--------
>>> import numpy as np
In this example we generate two random arrays, ``xarr`` and ``yarr``, and
compute the row-wise and column-wise Pearson correlation coefficients,
``R``. Since ``rowvar`` is true by default, we first find the row-wise
Pearson correlation coefficients between the variables of ``xarr``.
>>> import numpy as np
>>> rng = np.random.default_rng(seed=42)
>>> xarr = rng.random((3, 3))
>>> xarr
array([[0.77395605, 0.43887844, 0.85859792],
[0.69736803, 0.09417735, 0.97562235],
[0.7611397 , 0.78606431, 0.12811363]])
>>> R1 = np.corrcoef(xarr)
>>> R1
array([[ 1. , 0.99256089, -0.68080986],
[ 0.99256089, 1. , -0.76492172],
[-0.68080986, -0.76492172, 1. ]])
If we add another set of variables and observations ``yarr``, we can
compute the row-wise Pearson correlation coefficients between the
variables in ``xarr`` and ``yarr``.
>>> yarr = rng.random((3, 3))
>>> yarr
array([[0.45038594, 0.37079802, 0.92676499],
[0.64386512, 0.82276161, 0.4434142 ],
[0.22723872, 0.55458479, 0.06381726]])
>>> R2 = np.corrcoef(xarr, yarr)
>>> R2
array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 ,
-0.99004057],
[ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098,
-0.99981569],
[-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355,
0.77714685],
[ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855,
-0.83571711],
[-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. ,
0.97517215],
[-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215,
1. ]])
Finally if we use the option ``rowvar=False``, the columns are now
being treated as the variables and we will find the column-wise Pearson
correlation coefficients between variables in ``xarr`` and ``yarr``.
>>> R3 = np.corrcoef(xarr, yarr, rowvar=False)
>>> R3
array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 ,
0.22423734],
[ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587,
-0.44069024],
[-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648,
0.75137473],
[-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469,
0.47536961],
[-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. ,
-0.46666491],
[ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491,
1. ]])
"""
c = cov(x, y, rowvar, dtype=dtype)
try:
d = diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
@set_module('numpy')
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> np.blackman(12)
array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from numpy.fft import fft, fftshift
window = np.blackman(51)
plt.plot(window)
plt.title("Blackman window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show() # doctest: +SKIP
plt.figure()
A = fft(window, 2048) / 25.5
mag = np.abs(fftshift(A))
freq = np.linspace(-0.5, 0.5, len(A))
with np.errstate(divide='ignore', invalid='ignore'):
response = 20 * np.log10(mag)
response = np.clip(response, -100, 100)
plt.plot(freq, response)
plt.title("Frequency response of Blackman window")
plt.ylabel("Magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")
plt.axis('tight')
plt.show()
"""
# Ensures at least float64 via 0.0. M should be an integer, but conversion
# to double is safe for a range.
values = np.array([0.0, M])
M = values[1]
if M < 1:
return array([], dtype=values.dtype)
if M == 1:
return ones(1, dtype=values.dtype)
n = arange(1 - M, M, 2)
return 0.42 + 0.5 * cos(pi * n / (M - 1)) + 0.08 * cos(2.0 * pi * n / (M - 1))
@set_module('numpy')
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. Note that convolution with this window produces linear
interpolation. It is also known as an apodization (which means "removing
the foot", i.e. smoothing discontinuities at the beginning and end of the
sampled signal) or tapering function. The Fourier transform of the
Bartlett window is the product of two sinc functions. Note the excellent
discussion in Kanasewich [2]_.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib).
.. plot::
:include-source:
import matplotlib.pyplot as plt
from numpy.fft import fft, fftshift
window = np.bartlett(51)
plt.plot(window)
plt.title("Bartlett window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
plt.figure()
A = fft(window, 2048) / 25.5
mag = np.abs(fftshift(A))
freq = np.linspace(-0.5, 0.5, len(A))
with np.errstate(divide='ignore', invalid='ignore'):
response = 20 * np.log10(mag)
response = np.clip(response, -100, 100)
plt.plot(freq, response)
plt.title("Frequency response of Bartlett window")
plt.ylabel("Magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")
plt.axis('tight')
plt.show()
"""
# Ensures at least float64 via 0.0. M should be an integer, but conversion
# to double is safe for a range.
values = np.array([0.0, M])
M = values[1]
if M < 1:
return array([], dtype=values.dtype)
if M == 1:
return ones(1, dtype=values.dtype)
n = arange(1 - M, M, 2)
return where(less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1))
@set_module('numpy')
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> import numpy as np
>>> np.hanning(12)
array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from numpy.fft import fft, fftshift
window = np.hanning(51)
plt.plot(window)
plt.title("Hann window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
plt.figure()
A = fft(window, 2048) / 25.5
mag = np.abs(fftshift(A))
freq = np.linspace(-0.5, 0.5, len(A))
with np.errstate(divide='ignore', invalid='ignore'):
response = 20 * np.log10(mag)
response = np.clip(response, -100, 100)
plt.plot(freq, response)
plt.title("Frequency response of the Hann window")
plt.ylabel("Magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")
plt.axis('tight')
plt.show()
"""
# Ensures at least float64 via 0.0. M should be an integer, but conversion
# to double is safe for a range.
values = np.array([0.0, M])
M = values[1]
if M < 1:
return array([], dtype=values.dtype)
if M == 1:
return ones(1, dtype=values.dtype)
n = arange(1 - M, M, 2)
return 0.5 + 0.5 * cos(pi * n / (M - 1))
@set_module('numpy')
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> import numpy as np
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from numpy.fft import fft, fftshift
window = np.hamming(51)
plt.plot(window)
plt.title("Hamming window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
plt.figure()
A = fft(window, 2048) / 25.5
mag = np.abs(fftshift(A))
freq = np.linspace(-0.5, 0.5, len(A))
response = 20 * np.log10(mag)
response = np.clip(response, -100, 100)
plt.plot(freq, response)
plt.title("Frequency response of Hamming window")
plt.ylabel("Magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")
plt.axis('tight')
plt.show()
"""
# Ensures at least float64 via 0.0. M should be an integer, but conversion
# to double is safe for a range.
values = np.array([0.0, M])
M = values[1]
if M < 1:
return array([], dtype=values.dtype)
if M == 1:
return ones(1, dtype=values.dtype)
n = arange(1 - M, M, 2)
return 0.54 + 0.46 * cos(pi * n / (M - 1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x * b1 - b2 + vals[i]
return 0.5 * (b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x / 2.0 - 2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x)
def _i0_dispatcher(x):
return (x,)
@array_function_dispatch(_i0_dispatcher)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`.
Parameters
----------
x : array_like of float
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = float
The modified Bessel function evaluated at each of the elements of `x`.
See Also
--------
scipy.special.i0, scipy.special.iv, scipy.special.ive
Notes
-----
The scipy implementation is recommended over this function: it is a
proper ufunc written in C, and more than an order of magnitude faster.
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
polynomial expansions are employed in each interval. Relative error on
the domain [0,30] using IEEE arithmetic is documented [3]_ as having a
peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
https://personal.math.ubc.ca/~cbm/aands/page_379.htm
.. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero
Examples
--------
>>> import numpy as np
>>> np.i0(0.)
array(1.0)
>>> np.i0([0, 1, 2, 3])
array([1. , 1.26606588, 2.2795853 , 4.88079259])
"""
x = np.asanyarray(x)
if x.dtype.kind == 'c':
raise TypeError("i0 not supported for complex values")
if x.dtype.kind != 'f':
x = x.astype(float)
x = np.abs(x)
return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
## End of cephes code for i0
@set_module('numpy')
def kaiser(M, beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple
approximation to the DPSS window based on Bessel functions. The Kaiser
window is a very good approximation to the Digital Prolate Spheroidal
Sequence, or Slepian window, which is the transform which maximizes the
energy in the main lobe of the window relative to total energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> np.kaiser(12, 14)
array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from numpy.fft import fft, fftshift
window = np.kaiser(51, 14)
plt.plot(window)
plt.title("Kaiser window")
plt.ylabel("Amplitude")
plt.xlabel("Sample")
plt.show()
plt.figure()
A = fft(window, 2048) / 25.5
mag = np.abs(fftshift(A))
freq = np.linspace(-0.5, 0.5, len(A))
response = 20 * np.log10(mag)
response = np.clip(response, -100, 100)
plt.plot(freq, response)
plt.title("Frequency response of Kaiser window")
plt.ylabel("Magnitude [dB]")
plt.xlabel("Normalized frequency [cycles per sample]")
plt.axis('tight')
plt.show()
"""
# Ensures at least float64 via 0.0. M should be an integer, but conversion
# to double is safe for a range. (Simplified result_type with 0.0
# strongly typed. result-type is not/less order sensitive, but that mainly
# matters for integers anyway.)
values = np.array([0.0, M, beta])
M = values[1]
beta = values[2]
if M == 1:
return np.ones(1, dtype=values.dtype)
n = arange(0, M)
alpha = (M - 1) / 2.0
return i0(beta * sqrt(1 - ((n - alpha) / alpha)**2.0)) / i0(beta)
def _sinc_dispatcher(x):
return (x,)
@array_function_dispatch(_sinc_dispatcher)
def sinc(x):
r"""
Return the normalized sinc function.
The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument
:math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not
only everywhere continuous but also infinitely differentiable.
.. note::
Note the normalization factor of ``pi`` used in the definition.
This is the most commonly used definition in signal processing.
Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function
:math:`\sin(x)/x` that is more common in mathematics.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to calculate
``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. https://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
https://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
Text(0.5, 1.0, 'Sinc Function')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("X")
Text(0.5, 0, 'X')
>>> plt.show()
"""
x = np.asanyarray(x)
x = pi * x
# Hope that 1e-20 is sufficient for objects...
eps = np.finfo(x.dtype).eps if x.dtype.kind == "f" else 1e-20
y = where(x, x, eps)
return sin(y) / y
def _ureduce(a, func, keepdims=False, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
func : callable
Reduction function capable of receiving a single axis argument.
It is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
a = np.asanyarray(a)
axis = kwargs.get('axis')
out = kwargs.get('out')
if keepdims is np._NoValue:
keepdims = False
nd = a.ndim
if axis is not None:
axis = _nx.normalize_axis_tuple(axis, nd)
if keepdims and out is not None:
index_out = tuple(
0 if i in axis else slice(None) for i in range(nd))
kwargs['out'] = out[(Ellipsis, ) + index_out]
if len(axis) == 1:
kwargs['axis'] = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
elif keepdims and out is not None:
index_out = (0, ) * nd
kwargs['out'] = out[(Ellipsis, ) + index_out]
r = func(a, **kwargs)
if out is not None:
return out
if keepdims:
if axis is None:
index_r = (np.newaxis, ) * nd
else:
index_r = tuple(
np.newaxis if i in axis else slice(None)
for i in range(nd))
r = r[(Ellipsis, ) + index_r]
return r
def _median_dispatcher(
a, axis=None, out=None, overwrite_input=None, keepdims=None):
return (a, out)
@array_function_dispatch(_median_dispatcher)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default,
axis=None, will compute the median along a flattened version of
the array. If a sequence of axes, the array is first flattened
along the given axes, then the median is computed along the
resulting flattened axis.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
`median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. If `overwrite_input` is ``True`` and `a` is not already an
`ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float64``, then the output data-type is
``np.float64``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Notes
-----
Given a vector ``V`` of length ``N``, the median of ``V`` is the
middle value of a sorted copy of ``V``, ``V_sorted`` - i
e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
>>> import numpy as np
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
np.float64(3.5)
>>> np.median(a, axis=0)
array([6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([7., 2.])
>>> np.median(a, axis=(0, 1))
np.float64(3.5)
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
np.float64(3.5)
>>> assert not np.all(a==b)
"""
return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
overwrite_input=overwrite_input)
def _median(a, axis=None, out=None, overwrite_input=False):
# can't be reasonably be implemented in terms of percentile as we have to
# call mean to not break astropy
a = np.asanyarray(a)
# Set the partition indexes
if axis is None:
sz = a.size
else:
sz = a.shape[axis]
if sz % 2 == 0:
szh = sz // 2
kth = [szh - 1, szh]
else:
kth = [(sz - 1) // 2]
# We have to check for NaNs (as of writing 'M' doesn't actually work).
supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm'
if supports_nans:
kth.append(-1)
if overwrite_input:
if axis is None:
part = a.ravel()
part.partition(kth)
else:
a.partition(kth, axis=axis)
part = a
else:
part = partition(a, kth, axis=axis)
if part.shape == ():
# make 0-D arrays work
return part.item()
if axis is None:
axis = 0
indexer = [slice(None)] * part.ndim
index = part.shape[axis] // 2
if part.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index + 1)
else:
indexer[axis] = slice(index - 1, index + 1)
indexer = tuple(indexer)
# Use mean in both odd and even case to coerce data type,
# using out array if needed.
rout = mean(part[indexer], axis=axis, out=out)
if supports_nans and sz > 0:
# If nans are possible, warn and replace by nans like mean would.
rout = np.lib._utils_impl._median_nancheck(part, rout, axis)
return rout
def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
method=None, keepdims=None, *, weights=None):
return (a, q, out, weights)
@array_function_dispatch(_percentile_dispatcher)
def percentile(a,
q,
axis=None,
out=None,
overwrite_input=False,
method="linear",
keepdims=False,
*,
weights=None):
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like of real numbers
Input array or object that can be converted to an array.
q : array_like of float
Percentage or sequence of percentages for the percentiles to compute.
Values must be between 0 and 100 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The
default is to compute the percentile(s) along a flattened
version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by intermediate
calculations, to save memory. In this case, the contents of the input
`a` after this function completes is undefined.
method : str, optional
This parameter specifies the method to use for estimating the
percentile. There are many different methods, some unique to NumPy.
See the notes for explanation. The options sorted by their R type
as summarized in the H&F paper [1]_ are:
1. 'inverted_cdf'
2. 'averaged_inverted_cdf'
3. 'closest_observation'
4. 'interpolated_inverted_cdf'
5. 'hazen'
6. 'weibull'
7. 'linear' (default)
8. 'median_unbiased'
9. 'normal_unbiased'
The first three methods are discontinuous. NumPy further defines the
following discontinuous variations of the default 'linear' (7.) option:
* 'lower'
* 'higher',
* 'midpoint'
* 'nearest'
.. versionchanged:: 1.22.0
This argument was previously called "interpolation" and only
offered the "linear" default and last four options.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the percentile according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
Only `method="inverted_cdf"` supports weights.
See the notes for more details.
.. versionadded:: 2.0.0
Returns
-------
percentile : scalar or ndarray
If `q` is a single percentile and `axis=None`, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the percentiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
median : equivalent to ``percentile(..., 50)``
nanpercentile
quantile : equivalent to percentile, except q in the range [0, 1].
Notes
-----
The behavior of `numpy.percentile` with percentage `q` is
that of `numpy.quantile` with argument ``q/100``.
For more information, please see `numpy.quantile`.
Examples
--------
>>> import numpy as np
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([7., 2.])
>>> assert not np.all(a == b)
The different methods can be visualized graphically:
.. plot::
import matplotlib.pyplot as plt
a = np.arange(4)
p = np.linspace(0, 100, 6001)
ax = plt.gca()
lines = [
('linear', '-', 'C0'),
('inverted_cdf', ':', 'C1'),
# Almost the same as `inverted_cdf`:
('averaged_inverted_cdf', '-.', 'C1'),
('closest_observation', ':', 'C2'),
('interpolated_inverted_cdf', '--', 'C1'),
('hazen', '--', 'C3'),
('weibull', '-.', 'C4'),
('median_unbiased', '--', 'C5'),
('normal_unbiased', '-.', 'C6'),
]
for method, style, color in lines:
ax.plot(
p, np.percentile(a, p, method=method),
label=method, linestyle=style, color=color)
ax.set(
title='Percentiles for different methods and data: ' + str(a),
xlabel='Percentile',
ylabel='Estimated percentile value',
yticks=a)
ax.legend(bbox_to_anchor=(1.03, 1))
plt.tight_layout()
plt.show()
References
----------
.. [1] R. J. Hyndman and Y. Fan,
"Sample quantiles in statistical packages,"
The American Statistician, 50(4), pp. 361-365, 1996
"""
a = np.asanyarray(a)
if a.dtype.kind == "c":
raise TypeError("a must be an array of real numbers")
q = np.true_divide(q, 100, out=...)
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
if weights is not None:
if method != "inverted_cdf":
msg = ("Only method 'inverted_cdf' supports weights. "
f"Got: {method}.")
raise ValueError(msg)
if axis is not None:
axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
weights = _weights_are_valid(weights=weights, a=a, axis=axis)
if np.any(weights < 0):
raise ValueError("Weights must be non-negative.")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, method, keepdims, weights)
def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
method=None, keepdims=None, *, weights=None):
return (a, q, out, weights)
@array_function_dispatch(_quantile_dispatcher)
def quantile(a,
q,
axis=None,
out=None,
overwrite_input=False,
method="linear",
keepdims=False,
*,
weights=None):
"""
Compute the q-th quantile of the data along the specified axis.
Parameters
----------
a : array_like of real numbers
Input array or object that can be converted to an array.
q : array_like of float
Probability or sequence of probabilities of the quantiles to compute.
Values must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed. The default is
to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and buffer length as the expected output, but the
type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow the input array `a` to be modified by
intermediate calculations, to save memory. In this case, the
contents of the input `a` after this function completes is
undefined.
method : str, optional
This parameter specifies the method to use for estimating the
quantile. There are many different methods, some unique to NumPy.
The recommended options, numbered as they appear in [1]_, are:
1. 'inverted_cdf'
2. 'averaged_inverted_cdf'
3. 'closest_observation'
4. 'interpolated_inverted_cdf'
5. 'hazen'
6. 'weibull'
7. 'linear' (default)
8. 'median_unbiased'
9. 'normal_unbiased'
The first three methods are discontinuous. For backward compatibility
with previous versions of NumPy, the following discontinuous variations
of the default 'linear' (7.) option are available:
* 'lower'
* 'higher',
* 'midpoint'
* 'nearest'
See Notes for details.
.. versionchanged:: 1.22.0
This argument was previously called "interpolation" and only
offered the "linear" default and last four options.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
result will broadcast correctly against the original array `a`.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the quantile according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
Only `method="inverted_cdf"` supports weights.
See the notes for more details.
.. versionadded:: 2.0.0
Returns
-------
quantile : scalar or ndarray
If `q` is a single probability and `axis=None`, then the result
is a scalar. If multiple probability levels are given, first axis
of the result corresponds to the quantiles. The other axes are
the axes that remain after the reduction of `a`. If the input
contains integers or floats smaller than ``float64``, the output
data-type is ``float64``. Otherwise, the output data-type is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean
percentile : equivalent to quantile, but with q in the range [0, 100].
median : equivalent to ``quantile(..., 0.5)``
nanquantile
Notes
-----
Given a sample `a` from an underlying distribution, `quantile` provides a
nonparametric estimate of the inverse cumulative distribution function.
By default, this is done by interpolating between adjacent elements in
``y``, a sorted copy of `a`::
(1-g)*y[j] + g*y[j+1]
where the index ``j`` and coefficient ``g`` are the integral and
fractional components of ``q * (n-1)``, and ``n`` is the number of
elements in the sample.
This is a special case of Equation 1 of H&F [1]_. More generally,
- ``j = (q*n + m - 1) // 1``, and
- ``g = (q*n + m - 1) % 1``,
where ``m`` may be defined according to several different conventions.
The preferred convention may be selected using the ``method`` parameter:
=============================== =============== ===============
``method`` number in H&F ``m``
=============================== =============== ===============
``interpolated_inverted_cdf`` 4 ``0``
``hazen`` 5 ``1/2``
``weibull`` 6 ``q``
``linear`` (default) 7 ``1 - q``
``median_unbiased`` 8 ``q/3 + 1/3``
``normal_unbiased`` 9 ``q/4 + 3/8``
=============================== =============== ===============
Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to
``n - 1`` when the results of the formula would be outside the allowed
range of non-negative indices. The ``- 1`` in the formulas for ``j`` and
``g`` accounts for Python's 0-based indexing.
The table above includes only the estimators from H&F that are continuous
functions of probability `q` (estimators 4-9). NumPy also provides the
three discontinuous estimators from H&F (estimators 1-3), where ``j`` is
defined as above, ``m`` is defined as follows, and ``g`` is a function
of the real-valued ``index = q*n + m - 1`` and ``j``.
1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)``
2. ``averaged_inverted_cdf``: ``m = 0`` and
``g = (1 + int(index - j > 0)) / 2``
3. ``closest_observation``: ``m = -1/2`` and
``g = 1 - int((index == j) & (j%2 == 1))``
For backward compatibility with previous versions of NumPy, `quantile`
provides four additional discontinuous estimators. Like
``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``,
but ``g`` is defined as follows.
- ``lower``: ``g = 0``
- ``midpoint``: ``g = 0.5``
- ``higher``: ``g = 1``
- ``nearest``: ``g = (q*(n-1) % 1) > 0.5``
**Weighted quantiles:**
More formally, the quantile at probability level :math:`q` of a cumulative
distribution function :math:`F(y)=P(Y \\leq y)` with probability measure
:math:`P` is defined as any number :math:`x` that fulfills the
*coverage conditions*
.. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q
with random variable :math:`Y\\sim P`.
Sample quantiles, the result of `quantile`, provide nonparametric
estimation of the underlying population counterparts, represented by the
unknown :math:`F`, given a data vector `a` of length ``n``.
Some of the estimators above arise when one considers :math:`F` as the
empirical distribution function of the data, i.e.
:math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`.
Then, different methods correspond to different choices of :math:`x` that
fulfill the above coverage conditions. Methods that follow this approach
are ``inverted_cdf`` and ``averaged_inverted_cdf``.
For weighted quantiles, the coverage conditions still hold. The
empirical cumulative distribution is simply replaced by its weighted
version, i.e.
:math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`.
Only ``method="inverted_cdf"`` supports weights.
Examples
--------
>>> import numpy as np
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.quantile(a, 0.5)
3.5
>>> np.quantile(a, 0.5, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, 0.5, axis=1)
array([7., 2.])
>>> np.quantile(a, 0.5, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, 0.5, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
array([7., 2.])
>>> assert not np.all(a == b)
See also `numpy.percentile` for a visualization of most methods.
References
----------
.. [1] R. J. Hyndman and Y. Fan,
"Sample quantiles in statistical packages,"
The American Statistician, 50(4), pp. 361-365, 1996
"""
a = np.asanyarray(a)
if a.dtype.kind == "c":
raise TypeError("a must be an array of real numbers")
q = np.asanyarray(q)
if not _quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if weights is not None:
if method != "inverted_cdf":
msg = ("Only method 'inverted_cdf' supports weights. "
f"Got: {method}.")
raise ValueError(msg)
if axis is not None:
axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")
weights = _weights_are_valid(weights=weights, a=a, axis=axis)
if np.any(weights < 0):
raise ValueError("Weights must be non-negative.")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, method, keepdims, weights)
def _quantile_unchecked(a,
q,
axis=None,
out=None,
overwrite_input=False,
method="linear",
keepdims=False,
weights=None):
"""Assumes that q is in [0, 1], and is an ndarray"""
return _ureduce(a,
func=_quantile_ureduce_func,
q=q,
weights=weights,
keepdims=keepdims,
axis=axis,
out=out,
overwrite_input=overwrite_input,
method=method)
def _quantile_is_valid(q):
# avoid expensive reductions, relevant for arrays with < O(1000) elements
if q.ndim == 1 and q.size < 10:
for i in range(q.size):
if not (0.0 <= q[i] <= 1.0):
return False
elif not (q.min() >= 0 and q.max() <= 1):
return False
return True
def _compute_virtual_index(n, quantiles, alpha: float, beta: float):
"""
Compute the floating point indexes of an array for the linear
interpolation of quantiles.
n : array_like
The sample sizes.
quantiles : array_like
The quantiles values.
alpha : float
A constant used to correct the index computed.
beta : float
A constant used to correct the index computed.
alpha and beta values depend on the chosen method
(see quantile documentation)
Reference:
Hyndman&Fan paper "Sample Quantiles in Statistical Packages",
DOI: 10.1080/00031305.1996.10473566
"""
return n * quantiles + (
alpha + quantiles * (1 - alpha - beta)
) - 1
def _get_gamma(virtual_indexes, previous_indexes, method, dtype):
"""
Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation
of quantiles.
virtual_indexes : array_like
The indexes where the percentile is supposed to be found in the sorted
sample.
previous_indexes : array_like
The floor values of virtual_indexes.
method : dict
The interpolation method chosen, which may have a specific rule
modifying gamma.
gamma is usually the fractional part of virtual_indexes but can be modified
by the interpolation method.
"""
gamma = np.asanyarray(virtual_indexes - previous_indexes)
gamma = method["fix_gamma"](gamma, virtual_indexes)
# Ensure both that we have an array, and that we keep the dtype
# (which may have been matched to the input array).
return np.asanyarray(gamma, dtype=dtype)
def _lerp(a, b, t, out=None):
"""
Compute the linear interpolation weighted by gamma on each point of
two same shape array.
a : array_like
Left bound.
b : array_like
Right bound.
t : array_like
The interpolation weight.
out : array_like
Output array.
"""
diff_b_a = b - a
lerp_interpolation = add(a, diff_b_a * t, out=... if out is None else out)
subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5,
casting='unsafe', dtype=type(lerp_interpolation.dtype))
if lerp_interpolation.ndim == 0 and out is None:
lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays
return lerp_interpolation
def _get_gamma_mask(shape, default_value, conditioned_value, where):
out = np.full(shape, default_value)
np.copyto(out, conditioned_value, where=where, casting="unsafe")
return out
def _discrete_interpolation_to_boundaries(index, gamma_condition_fun):
previous = np.floor(index)
next = previous + 1
gamma = index - previous
res = _get_gamma_mask(shape=index.shape,
default_value=next,
conditioned_value=previous,
where=gamma_condition_fun(gamma, index)
).astype(np.intp)
# Some methods can lead to out-of-bound integers, clip them:
res[res < 0] = 0
return res
def _closest_observation(n, quantiles):
# "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362).
# Order is 1-based so for zero-based indexing round to nearest odd index.
gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1)
return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5,
gamma_fun)
def _inverted_cdf(n, quantiles):
gamma_fun = lambda gamma, _: (gamma == 0)
return _discrete_interpolation_to_boundaries((n * quantiles) - 1,
gamma_fun)
def _quantile_ureduce_func(
a: np.ndarray,
q: np.ndarray,
weights: np.ndarray | None,
axis: int | None = None,
out: np.ndarray | None = None,
overwrite_input: bool = False,
method: str = "linear",
) -> np.ndarray:
if q.ndim > 2:
# The code below works fine for nd, but it might not have useful
# semantics. For now, keep the supported dimensions the same as it was
# before.
raise ValueError("q must be a scalar or 1d")
if overwrite_input:
if axis is None:
axis = 0
arr = a.ravel()
wgt = None if weights is None else weights.ravel()
else:
arr = a
wgt = weights
elif axis is None:
axis = 0
arr = a.flatten()
wgt = None if weights is None else weights.flatten()
else:
arr = a.copy()
wgt = weights
result = _quantile(arr,
quantiles=q,
axis=axis,
method=method,
out=out,
weights=wgt)
return result
def _get_indexes(arr, virtual_indexes, valid_values_count):
"""
Get the valid indexes of arr neighbouring virtual_indexes.
Note
This is a companion function to linear interpolation of
Quantiles
Returns
-------
(previous_indexes, next_indexes): Tuple
A Tuple of virtual_indexes neighbouring indexes
"""
previous_indexes = floor(virtual_indexes, out=...)
next_indexes = add(previous_indexes, 1, out=...)
indexes_above_bounds = virtual_indexes >= valid_values_count - 1
# When indexes is above max index, take the max value of the array
if indexes_above_bounds.any():
previous_indexes[indexes_above_bounds] = -1
next_indexes[indexes_above_bounds] = -1
# When indexes is below min index, take the min value of the array
indexes_below_bounds = virtual_indexes < 0
if indexes_below_bounds.any():
previous_indexes[indexes_below_bounds] = 0
next_indexes[indexes_below_bounds] = 0
if np.issubdtype(arr.dtype, np.inexact):
# After the sort, slices having NaNs will have for last element a NaN
virtual_indexes_nans = np.isnan(virtual_indexes)
if virtual_indexes_nans.any():
previous_indexes[virtual_indexes_nans] = -1
next_indexes[virtual_indexes_nans] = -1
previous_indexes = previous_indexes.astype(np.intp)
next_indexes = next_indexes.astype(np.intp)
return previous_indexes, next_indexes
def _quantile(
arr: "np.typing.ArrayLike",
quantiles: np.ndarray,
axis: int = -1,
method: str = "linear",
out: np.ndarray | None = None,
weights: "np.typing.ArrayLike | None" = None,
) -> np.ndarray:
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
It computes the quantiles of the array for the given axis.
A linear interpolation is performed based on the `method`.
By default, the method is "linear" where alpha == beta == 1 which
performs the 7th method of Hyndman&Fan.
With "median_unbiased" we get alpha == beta == 1/3
thus the 8th method of Hyndman&Fan.
"""
# --- Setup
arr = np.asanyarray(arr)
values_count = arr.shape[axis]
# The dimensions of `q` are prepended to the output shape, so we need the
# axis being sampled from `arr` to be last.
if axis != 0: # But moveaxis is slow, so only call it if necessary.
arr = np.moveaxis(arr, axis, destination=0)
supports_nans = (
np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm'
)
if weights is None:
# --- Computation of indexes
# Index where to find the value in the sorted array.
# Virtual because it is a floating point value, not a valid index.
# The nearest neighbours are used for interpolation
try:
method_props = _QuantileMethods[method]
except KeyError:
raise ValueError(
f"{method!r} is not a valid method. Use one of: "
f"{_QuantileMethods.keys()}") from None
virtual_indexes = method_props["get_virtual_index"](values_count,
quantiles)
virtual_indexes = np.asanyarray(virtual_indexes)
if method_props["fix_gamma"] is None:
supports_integers = True
else:
int_virtual_indices = np.issubdtype(virtual_indexes.dtype,
np.integer)
supports_integers = method == 'linear' and int_virtual_indices
if supports_integers:
# No interpolation needed, take the points along axis
if supports_nans:
# may contain nan, which would sort to the end
arr.partition(
concatenate((virtual_indexes.ravel(), [-1])), axis=0,
)
slices_having_nans = np.isnan(arr[-1, ...])
else:
# cannot contain nan
arr.partition(virtual_indexes.ravel(), axis=0)
slices_having_nans = np.array(False, dtype=bool)
result = take(arr, virtual_indexes, axis=0, out=out)
else:
previous_indexes, next_indexes = _get_indexes(arr,
virtual_indexes,
values_count)
# --- Sorting
arr.partition(
np.unique(np.concatenate(([0, -1],
previous_indexes.ravel(),
next_indexes.ravel(),
))),
axis=0)
if supports_nans:
slices_having_nans = np.isnan(arr[-1, ...])
else:
slices_having_nans = None
# --- Get values from indexes
previous = arr[previous_indexes]
next = arr[next_indexes]
# --- Linear interpolation
if arr.dtype.kind in "iu":
gtype = None
elif arr.dtype.kind == "f":
# make sure the return value matches the input array type
gtype = arr.dtype
else:
gtype = virtual_indexes.dtype
gamma = _get_gamma(virtual_indexes, previous_indexes,
method_props, gtype)
result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1)
gamma = gamma.reshape(result_shape)
result = _lerp(previous,
next,
gamma,
out=out)
else:
# Weighted case
# This implements method="inverted_cdf", the only supported weighted
# method, which needs to sort anyway.
weights = np.asanyarray(weights)
if axis != 0:
weights = np.moveaxis(weights, axis, destination=0)
index_array = np.argsort(arr, axis=0)
# arr = arr[index_array, ...] # but this adds trailing dimensions of
# 1.
arr = np.take_along_axis(arr, index_array, axis=0)
if weights.shape == arr.shape:
weights = np.take_along_axis(weights, index_array, axis=0)
else:
# weights is 1d
weights = weights.reshape(-1)[index_array, ...]
if supports_nans:
# may contain nan, which would sort to the end
slices_having_nans = np.isnan(arr[-1, ...])
else:
# cannot contain nan
slices_having_nans = np.array(False, dtype=bool)
# We use the weights to calculate the empirical cumulative
# distribution function cdf
cdf = weights.cumsum(axis=0, dtype=np.float64)
cdf /= cdf[-1, ...] # normalization to 1
# Search index i such that
# sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i)
# is then equivalent to
# cdf[i-1] < quantile <= cdf[i]
# Unfortunately, searchsorted only accepts 1-d arrays as first
# argument, so we will need to iterate over dimensions.
# Without the following cast, searchsorted can return surprising
# results, e.g.
# np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]),
# np.array(0.4, dtype=np.float32), side="left")
# returns 2 instead of 1 because 0.4 is not binary representable.
if quantiles.dtype.kind == "f":
cdf = cdf.astype(quantiles.dtype)
# Weights must be non-negative, so we might have zero weights at the
# beginning leading to some leading zeros in cdf. The call to
# np.searchsorted for quantiles=0 will then pick the first element,
# but should pick the first one larger than zero. We
# therefore simply set 0 values in cdf to -1.
if np.any(cdf[0, ...] == 0):
cdf[cdf == 0] = -1
def find_cdf_1d(arr, cdf):
indices = np.searchsorted(cdf, quantiles, side="left")
# We might have reached the maximum with i = len(arr), e.g. for
# quantiles = 1, and need to cut it to len(arr) - 1.
indices = minimum(indices, values_count - 1)
result = take(arr, indices, axis=0)
return result
r_shape = arr.shape[1:]
if quantiles.ndim > 0:
r_shape = quantiles.shape + r_shape
if out is None:
result = np.empty_like(arr, shape=r_shape)
else:
if out.shape != r_shape:
msg = (f"Wrong shape of argument 'out', shape={r_shape} is "
f"required; got shape={out.shape}.")
raise ValueError(msg)
result = out
# See apply_along_axis, which we do for axis=0. Note that Ni = (,)
# always, so we remove it here.
Nk = arr.shape[1:]
for kk in np.ndindex(Nk):
result[(...,) + kk] = find_cdf_1d(
arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk]
)
# Make result the same as in unweighted inverted_cdf.
if result.shape == () and result.dtype == np.dtype("O"):
result = result.item()
if np.any(slices_having_nans):
if result.ndim == 0 and out is None:
# can't write to a scalar, but indexing will be correct
result = arr[-1]
else:
np.copyto(result, arr[-1, ...], where=slices_having_nans)
return result
def _trapezoid_dispatcher(y, x=None, dx=None, axis=None):
return (y, x)
@array_function_dispatch(_trapezoid_dispatcher)
def trapezoid(y, x=None, dx=1.0, axis=-1):
r"""
Integrate along the given axis using the composite trapezoidal rule.
If `x` is provided, the integration happens in sequence along its
elements - they are not sorted.
Integrate `y` (`x`) along each 1d slice on the given axis, compute
:math:`\int y(x) dx`.
When `x` is specified, this integrates along the parametric curve,
computing :math:`\int_t y(t) dt =
\int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
.. versionadded:: 2.0.0
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
The sample points corresponding to the `y` values. If `x` is None,
the sample points are assumed to be evenly spaced `dx` apart. The
default is None.
dx : scalar, optional
The spacing between sample points when `x` is None. The default is 1.
axis : int, optional
The axis along which to integrate.
Returns
-------
trapezoid : float or ndarray
Definite integral of `y` = n-dimensional array as approximated along
a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
then the result is a float. If `n` is greater than 1, then the result
is an `n`-1 dimensional array.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
will be taken from `y` array, by default x-axis distances between
points will be 1.0, alternatively they can be provided with `x` array
or with `dx` scalar. Return value will be equal to combined area under
the red lines.
References
----------
.. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> import numpy as np
Use the trapezoidal rule on evenly spaced points:
>>> np.trapezoid([1, 2, 3])
4.0
The spacing between sample points can be selected by either the
``x`` or ``dx`` arguments:
>>> np.trapezoid([1, 2, 3], x=[4, 6, 8])
8.0
>>> np.trapezoid([1, 2, 3], dx=2)
8.0
Using a decreasing ``x`` corresponds to integrating in reverse:
>>> np.trapezoid([1, 2, 3], x=[8, 6, 4])
-8.0
More generally ``x`` is used to integrate along a parametric curve. We can
estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
>>> x = np.linspace(0, 1, num=50)
>>> y = x**2
>>> np.trapezoid(y, x)
0.33340274885464394
Or estimate the area of a circle, noting we repeat the sample which closes
the curve:
>>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
>>> np.trapezoid(np.cos(theta), x=np.sin(theta))
3.141571941375841
``np.trapezoid`` can be applied along a specified axis to do multiple
computations in one call:
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapezoid(a, axis=0)
array([1.5, 2.5, 3.5])
>>> np.trapezoid(a, axis=1)
array([2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = y.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
try:
ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
except ValueError:
# Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis)
return ret
def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
return xi
# Based on scitools meshgrid
@array_function_dispatch(_meshgrid_dispatcher)
def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
"""
Return a tuple of coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
sparse : bool, optional
If True the shape of the returned coordinate array for dimension *i*
is reduced from ``(N1, ..., Ni, ... Nn)`` to
``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are
intended to be used with :ref:`basics.broadcasting`. When all
coordinates are used in an expression, broadcasting still leads to a
fully-dimensonal result array.
Default is False.
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
Returns
-------
X1, X2,..., XN : tuple of ndarrays
For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``,
returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing
keyword argument. Giving the string 'ij' returns a meshgrid with
matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.
In the 2-D case with inputs of length M and N, the outputs are of shape
(N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case
with inputs of length M, N and P, outputs are of shape (N, M, P) for
'xy' indexing and (M, N, P) for 'ij' indexing. The difference is
illustrated by the following code snippet::
xv, yv = np.meshgrid(x, y, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = np.meshgrid(x, y, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
mgrid : Construct a multi-dimensional "meshgrid" using indexing notation.
ogrid : Construct an open multi-dimensional "meshgrid" using indexing
notation.
:ref:`how-to-index`
Examples
--------
>>> import numpy as np
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = np.meshgrid(x, y)
>>> xv
array([[0. , 0.5, 1. ],
[0. , 0.5, 1. ]])
>>> yv
array([[0., 0., 0.],
[1., 1., 1.]])
The result of `meshgrid` is a coordinate grid:
>>> import matplotlib.pyplot as plt
>>> plt.plot(xv, yv, marker='o', color='k', linestyle='none')
>>> plt.show()
You can create sparse output arrays to save memory and computation time.
>>> xv, yv = np.meshgrid(x, y, sparse=True)
>>> xv
array([[0. , 0.5, 1. ]])
>>> yv
array([[0.],
[1.]])
`meshgrid` is very useful to evaluate functions on a grid. If the
function depends on all coordinates, both dense and sparse outputs can be
used.
>>> x = np.linspace(-5, 5, 101)
>>> y = np.linspace(-5, 5, 101)
>>> # full coordinate arrays
>>> xx, yy = np.meshgrid(x, y)
>>> zz = np.sqrt(xx**2 + yy**2)
>>> xx.shape, yy.shape, zz.shape
((101, 101), (101, 101), (101, 101))
>>> # sparse coordinate arrays
>>> xs, ys = np.meshgrid(x, y, sparse=True)
>>> zs = np.sqrt(xs**2 + ys**2)
>>> xs.shape, ys.shape, zs.shape
((1, 101), (101, 1), (101, 101))
>>> np.array_equal(zz, zs)
True
>>> h = plt.contourf(x, y, zs)
>>> plt.axis('scaled')
>>> plt.colorbar()
>>> plt.show()
"""
ndim = len(xi)
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:])
for i, x in enumerate(xi)]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + s0[2:]
output[1].shape = (-1, 1) + s0[2:]
if not sparse:
# Return the full N-D matrix (not only the 1-D vector)
output = np.broadcast_arrays(*output, subok=True)
if copy:
output = tuple(x.copy() for x in output)
return output
def _delete_dispatcher(arr, obj, axis=None):
return (arr, obj)
@array_function_dispatch(_delete_dispatcher)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : array_like
Input array.
obj : slice, int, array-like of ints or bools
Indicate indices of sub-arrays to remove along the specified axis.
.. versionchanged:: 1.19.0
Boolean indices are now treated as a mask of elements to remove,
rather than being cast to the integers 0 and 1.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Notes
-----
Often it is preferable to use a boolean mask. For example:
>>> arr = np.arange(12) + 1
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further
use of `mask`.
Examples
--------
>>> import numpy as np
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
conv = _array_converter(arr)
arr, = conv.as_arrays(subok=False)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
# needed for np.matrix, which is still not 1d after being ravelled
ndim = arr.ndim
axis = ndim - 1
else:
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)] * ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
start, stop, step = obj.indices(N)
xr = range(start, stop, step)
numtodel = len(xr)
if numtodel <= 0:
return conv.wrap(arr.copy(order=arrorder), to_scalar=False)
# Invert if step is negative:
if step < 0:
step = -step
start = xr[-1]
stop = xr[0] + 1
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arrorder)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[tuple(slobj)] = arr[tuple(slobj)]
# copy end chunk
if stop == N:
pass
else:
slobj[axis] = slice(stop - numtodel, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(stop, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
keep = ones(stop - start, dtype=bool)
keep[:stop - start:step] = False
slobj[axis] = slice(start, stop - numtodel)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(start, stop)
arr = arr[tuple(slobj2)]
slobj2[axis] = keep
new[tuple(slobj)] = arr[tuple(slobj2)]
return conv.wrap(new, to_scalar=False)
if isinstance(obj, (int, integer)) and not isinstance(obj, bool):
single_value = True
else:
single_value = False
_obj = obj
obj = np.asarray(obj)
# `size == 0` to allow empty lists similar to indexing, but (as there)
# is really too generic:
if obj.size == 0 and not isinstance(_obj, np.ndarray):
obj = obj.astype(intp)
elif obj.size == 1 and obj.dtype.kind in "ui":
# For a size 1 integer array we can use the single-value path
# (most dtypes, except boolean, should just fail later).
obj = obj.item()
single_value = True
if single_value:
# optimization for a single value
if (obj < -N or obj >= N):
raise IndexError(
f"index {obj} is out of bounds for axis {axis} with "
f"size {N}")
if (obj < 0):
obj += N
newshape[axis] -= 1
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, obj)
new[tuple(slobj)] = arr[tuple(slobj)]
slobj[axis] = slice(obj, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(obj + 1, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
else:
if obj.dtype == bool:
if obj.shape != (N,):
raise ValueError('boolean array argument obj to delete '
'must be one dimensional and match the axis '
f'length of {N}')
# optimization, the other branch is slower
keep = ~obj
else:
keep = ones(N, dtype=bool)
keep[obj,] = False
slobj[axis] = keep
new = arr[tuple(slobj)]
return conv.wrap(new, to_scalar=False)
def _insert_dispatcher(arr, obj, values, axis=None):
return (arr, obj, values)
@array_function_dispatch(_insert_dispatcher)
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : slice, int, array-like of ints or bools
Object that defines the index or indices before which `values` is
inserted.
.. versionchanged:: 2.1.2
Boolean indices are now treated as a mask of elements to insert,
rather than being cast to the integers 0 and 1.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (similar to calling insert multiple
times).
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
`values` should be shaped so that ``arr[...,obj,...] = values``
is legal.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
concatenate : Join a sequence of arrays along an existing axis.
delete : Delete elements from an array.
Notes
-----
Note that for higher dimensional inserts ``obj=0`` behaves very different
from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from
``arr[:,[0],:] = values``. This is because of the difference between basic
and advanced :ref:`indexing <basics.indexing>`.
Examples
--------
>>> import numpy as np
>>> a = np.arange(6).reshape(3, 2)
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.insert(a, 1, 6)
array([0, 6, 1, 2, 3, 4, 5])
>>> np.insert(a, 1, 6, axis=1)
array([[0, 6, 1],
[2, 6, 3],
[4, 6, 5]])
Difference between sequence and scalars,
showing how ``obj=[1]`` behaves different from ``obj=1``:
>>> np.insert(a, [1], [[7],[8],[9]], axis=1)
array([[0, 7, 1],
[2, 8, 3],
[4, 9, 5]])
>>> np.insert(a, 1, [[7],[8],[9]], axis=1)
array([[0, 7, 8, 9, 1],
[2, 7, 8, 9, 3],
[4, 7, 8, 9, 5]])
>>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1),
... np.insert(a, [1], [[7],[8],[9]], axis=1))
True
>>> b = a.flatten()
>>> b
array([0, 1, 2, 3, 4, 5])
>>> np.insert(b, [2, 2], [6, 7])
array([0, 1, 6, 7, 2, 3, 4, 5])
>>> np.insert(b, slice(2, 4), [7, 8])
array([0, 1, 7, 2, 8, 3, 4, 5])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([0, 1, 7, 0, 2, 3, 4, 5])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
conv = _array_converter(arr)
arr, = conv.as_arrays(subok=False)
ndim = arr.ndim
arrorder = 'F' if arr.flags.fnc else 'C'
if axis is None:
if ndim != 1:
arr = arr.ravel()
# needed for np.matrix, which is still not 1d after being ravelled
ndim = arr.ndim
axis = ndim - 1
else:
axis = normalize_axis_index(axis, ndim)
slobj = [slice(None)] * ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, slice):
# turn it into a range object
indices = arange(*obj.indices(N), dtype=intp)
else:
# need to copy obj, because indices will be changed in-place
indices = np.array(obj)
if indices.dtype == bool:
if obj.ndim != 1:
raise ValueError('boolean array argument obj to insert '
'must be one dimensional')
indices = np.flatnonzero(obj)
elif indices.ndim > 1:
raise ValueError(
"index array argument obj to insert must be one dimensional "
"or scalar")
if indices.size == 1:
index = indices.item()
if index < -N or index > N:
raise IndexError(f"index {obj} is out of bounds for axis {axis} "
f"with size {N}")
if (index < 0):
index += N
# There are some object array corner cases here, but we cannot avoid
# that:
values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype)
if indices.ndim == 0:
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
values = np.moveaxis(values, 0, axis)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
slobj[axis] = slice(None, index)
new[tuple(slobj)] = arr[tuple(slobj)]
slobj[axis] = slice(index, index + numnew)
new[tuple(slobj)] = values
slobj[axis] = slice(index + numnew, None)
slobj2 = [slice(None)] * ndim
slobj2[axis] = slice(index, None)
new[tuple(slobj)] = arr[tuple(slobj2)]
return conv.wrap(new, to_scalar=False)
elif indices.size == 0 and not isinstance(obj, np.ndarray):
# Can safely cast the empty list to intp
indices = indices.astype(intp)
indices[indices < 0] += N
numnew = len(indices)
order = indices.argsort(kind='mergesort') # stable sort
indices[order] += np.arange(numnew)
newshape[axis] += numnew
old_mask = ones(newshape[axis], dtype=bool)
old_mask[indices] = False
new = empty(newshape, arr.dtype, arrorder)
slobj2 = [slice(None)] * ndim
slobj[axis] = indices
slobj2[axis] = old_mask
new[tuple(slobj)] = values
new[tuple(slobj2)] = arr
return conv.wrap(new, to_scalar=False)
def _append_dispatcher(arr, values, axis=None):
return (arr, values)
@array_function_dispatch(_append_dispatcher)
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> import numpy as np
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, ..., 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: all the input arrays must have same number of dimensions, but
the array at index 0 has 2 dimension(s) and the array at index 1 has 1
dimension(s)
>>> a = np.array([1, 2], dtype=int)
>>> c = np.append(a, [])
>>> c
array([1., 2.])
>>> c.dtype
float64
Default dtype for empty ndarrays is `float64` thus making the output of dtype
`float64` when appended with dtype `int64`
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim - 1
return concatenate((arr, values), axis=axis)
def _digitize_dispatcher(x, bins, right=None):
return (x, bins)
@array_function_dispatch(_digitize_dispatcher)
def digitize(x, bins, right=False):
"""
Return the indices of the bins to which each value in input array belongs.
========= ============= ============================
`right` order of bins returned index `i` satisfies
========= ============= ============================
``False`` increasing ``bins[i-1] <= x < bins[i]``
``True`` increasing ``bins[i-1] < x <= bins[i]``
``False`` decreasing ``bins[i-1] > x >= bins[i]``
``True`` decreasing ``bins[i-1] >= x > bins[i]``
========= ============= ============================
If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
returned as appropriate.
Parameters
----------
x : array_like
Input array to be binned. Prior to NumPy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
indices : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique, searchsorted
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`numpy.digitize` is implemented in terms of `numpy.searchsorted`.
This means that a binary search is used to bin the values, which scales
much better for larger number of bins than the previous linear search.
It also removes the requirement for the input array to be 1-dimensional.
For monotonically *increasing* `bins`, the following are equivalent::
np.digitize(x, bins, right=True)
np.searchsorted(bins, x, side='left')
Note that as the order of the arguments are reversed, the side must be too.
The `searchsorted` call is marginally faster, as it does not do any
monotonicity checks. Perhaps more importantly, it supports all dtypes.
Examples
--------
>>> import numpy as np
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
"""
x = _nx.asarray(x)
bins = _nx.asarray(bins)
# here for compatibility, searchsorted below is happy to take this
if np.issubdtype(x.dtype, _nx.complexfloating):
raise TypeError("x may not be complex")
mono = _monotonicity(bins)
if mono == 0:
raise ValueError("bins must be monotonically increasing or decreasing")
# this is backwards because the arguments below are swapped
side = 'left' if right else 'right'
if mono == -1:
# reverse the bins, and invert the results
return len(bins) - _nx.searchsorted(bins[::-1], x, side=side)
else:
return _nx.searchsorted(bins, x, side=side)
| vectorize |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/extractors/test_base.py | {
"start": 4419,
"end": 5267
} | class ____(BaseOperator):
def execute(self, context) -> Any:
pass
def get_openlineage_facets_on_start(self) -> OperatorLineage:
return OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=JOB_FACETS,
)
def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage:
return OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=FINISHED_FACETS,
)
def get_openlineage_facets_on_failure(self, task_instance) -> OperatorLineage:
return OperatorLineage(
inputs=INPUTS,
outputs=OUTPUTS,
run_facets=RUN_FACETS,
job_facets=FAILED_FACETS,
)
| OperatorWithAllOlMethods |
python | django__django | tests/model_fields/models.py | {
"start": 13038,
"end": 13421
} | class ____(models.Model):
class StringifiedJSONField(models.JSONField):
def get_prep_value(self, value):
return json.dumps(value, cls=self.encoder)
json_field = StringifiedJSONField()
class Meta:
required_db_features = {
"supports_json_field",
"supports_primitives_in_json_field",
}
| CustomSerializationJSONModel |
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 2655,
"end": 2823
} | class ____(APIView):
renderer_classes = (JSONRenderer,)
def get(self, request, **kwargs):
return Response(status=status.HTTP_204_NO_CONTENT)
| EmptyGETView |
python | ansible__ansible | test/lib/ansible_test/_internal/ci/local.py | {
"start": 9127,
"end": 9906
} | class ____(Authenticator):
"""Authenticate using an SSH key."""
@classmethod
def priority(cls) -> int:
return 100
@classmethod
def config_file(cls) -> pathlib.Path:
return pathlib.Path('~/.ansible-core-ci.auth').expanduser()
def prepare_auth_request(self, config: dict[str, object], context: AuthContext) -> dict[str, object]:
parts = self.config_file().read_text().strip().split(maxsplit=1)
username, key_file = parts
request: dict[str, object] = dict(
type="remote:ssh",
config=config,
username=username,
)
auth_helper = AuthHelper(pathlib.Path(key_file).expanduser())
auth_helper.sign_request(request, context)
return request
| SshAuthenticator |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/snowflake_datasource.py | {
"start": 8668,
"end": 8863
} | class ____(pydantic.UrlError):
"""
Custom Pydantic error for missing password in SnowflakeDsn.
"""
code = "url.password"
msg_template = "URL password invalid"
| _UrlPasswordError |
python | kamyu104__LeetCode-Solutions | Python/clear-digits.py | {
"start": 44,
"end": 457
} | class ____(object):
def clearDigits(self, s):
"""
:type s: str
:rtype: str
"""
s = list(s)
j = 0
for i, x in enumerate(s):
if x.isdigit():
j -= 1
continue
s[j] = x
j += 1
while len(s) > j:
s.pop()
return "".join(s)
# Time: O(n)
# Space: O(1)
# stack
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 741333,
"end": 743221
} | class ____(
sgqlc.types.Type,
Comment,
Updatable,
Deletable,
Labelable,
Lockable,
RepositoryNode,
Subscribable,
Reactable,
Votable,
Node,
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"answer",
"answer_chosen_at",
"answer_chosen_by",
"category",
"comments",
"number",
"poll",
"resource_path",
"title",
"url",
)
answer = sgqlc.types.Field("DiscussionComment", graphql_name="answer")
answer_chosen_at = sgqlc.types.Field(DateTime, graphql_name="answerChosenAt")
answer_chosen_by = sgqlc.types.Field(Actor, graphql_name="answerChosenBy")
category = sgqlc.types.Field(
sgqlc.types.non_null("DiscussionCategory"), graphql_name="category"
)
comments = sgqlc.types.Field(
sgqlc.types.non_null(DiscussionCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
number = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="number")
poll = sgqlc.types.Field("DiscussionPoll", graphql_name="poll")
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
title = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="title")
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
| Discussion |
python | django__django | django/contrib/gis/db/backends/base/operations.py | {
"start": 335,
"end": 7013
} | class ____:
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mariadb = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = "%s"
@cached_property
def select_extent(self):
return self.select
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ""
# Mapping between Django function names and backend names, when names do
# not match; used in spatial_function_name().
function_names = {}
# Set of known unsupported functions of the backend
unsupported_functions = {
"Area",
"AsGeoJSON",
"AsGML",
"AsKML",
"AsSVG",
"AsWKB",
"AsWKT",
"Azimuth",
"BoundingCircle",
"Centroid",
"ClosestPoint",
"Difference",
"Distance",
"DistanceSpheroid",
"Envelope",
"ForcePolygonCW",
"FromWKB",
"FromWKT",
"GeoHash",
"GeometryDistance",
"GeometryType",
"Intersection",
"IsEmpty",
"IsValid",
"Length",
"LineLocatePoint",
"MakeValid",
"MemSize",
"NumDimensions",
"NumGeometries",
"NumPoints",
"Perimeter",
"PointOnSurface",
"Reverse",
"Rotate",
"Scale",
"SnapToGrid",
"SymDifference",
"Transform",
"Translate",
"Union",
}
# Constructors
from_text = False
# Default conversion functions for aggregates; will be overridden if
# implemented for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError(
"Aggregate extent not implemented for this spatial backend."
)
def convert_extent3d(self, box, srid):
raise NotImplementedError(
"Aggregate 3D extent not implemented for this spatial backend."
)
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Return the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError(
"subclasses of BaseSpatialOperations must provide a geo_db_type() method"
)
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError(
"Distance operations not available on this spatial backend."
)
def get_geom_placeholder(self, f, value, compiler):
"""
Return the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
def transform_value(value, field):
return value is not None and value.srid != field.srid
if hasattr(value, "as_sql"):
return (
"%s(%%s, %s)" % (self.spatial_function_name("Transform"), f.srid)
if transform_value(value.output_field, f)
else "%s"
)
if transform_value(value, f):
# Add Transform() to the SQL placeholder.
return "%s(%s(%%s,%s), %s)" % (
self.spatial_function_name("Transform"),
self.from_text,
value.srid,
f.srid,
)
elif self.connection.features.has_spatialrefsys_table:
return "%s(%%s,%s)" % (self.from_text, f.srid)
else:
# For backwards compatibility on MySQL (#27464).
return "%s(%%s)" % self.from_text
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotSupportedError(
"%s spatial aggregation is not supported by this database backend."
% expression.name
)
super().check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError(
"Aggregate support not implemented for this spatial backend."
)
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotSupportedError(
"This backend doesn't support the %s function." % func_name
)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError(
"Subclasses of BaseSpatialOperations must provide a geometry_columns() "
"method."
)
def spatial_ref_sys(self):
raise NotImplementedError(
"subclasses of BaseSpatialOperations must a provide spatial_ref_sys() "
"method"
)
distance_expr_for_lookup = staticmethod(Distance)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
if isinstance(expression.output_field, GeometryField):
converters.append(self.get_geometry_converter(expression))
return converters
def get_geometry_converter(self, expression):
raise NotImplementedError(
"Subclasses of BaseSpatialOperations must provide a "
"get_geometry_converter() method."
)
def get_area_att_for_field(self, field):
if field.geodetic(self.connection):
if self.connection.features.supports_area_geodetic:
return "sq_m"
raise NotImplementedError(
"Area on geodetic coordinate systems not supported."
)
else:
units_name = field.units_name(self.connection)
if units_name:
return AreaMeasure.unit_attname(units_name)
def get_distance_att_for_field(self, field):
dist_att = None
if field.geodetic(self.connection):
if self.connection.features.supports_distance_geodetic:
dist_att = "m"
else:
units = field.units_name(self.connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
return dist_att
| BaseSpatialOperations |
python | spack__spack | lib/spack/spack/environment/list.py | {
"start": 5782,
"end": 5890
} | class ____(NamedTuple):
name: str
yaml_list: List[Union[str, Dict]]
when: Optional[str]
| Definition |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/field_utils.py | {
"start": 1073,
"end": 1252
} | class ____:
pass
FIELD_NO_DEFAULT_PROVIDED = __FieldValueSentinel
INFER_OPTIONAL_COMPOSITE_FIELD = __InferOptionalCompositeFieldSentinel
| __InferOptionalCompositeFieldSentinel |
python | apache__airflow | providers/yandex/tests/unit/yandex/hooks/test_dataproc.py | {
"start": 2331,
"end": 7883
} | class ____:
def _init_hook(self):
with mock.patch(f"{BASEHOOK_PATCH_PATH}.get_connection") as mock_get_connection:
mock_get_connection.return_value = self.connection
self.hook = DataprocHook()
def setup_method(self):
self.connection = Connection(extra=json.dumps({"oauth": OAUTH_TOKEN}))
self._init_hook()
@mock.patch("yandexcloud.SDK.create_operation_and_get_result")
def test_create_dataproc_cluster_mocked(self, mock_create_operation):
self._init_hook()
self.hook.dataproc_client.create_cluster(
cluster_name=CLUSTER_NAME,
ssh_public_keys=SSH_PUBLIC_KEYS,
folder_id=FOLDER_ID,
subnet_id=SUBNET_ID,
zone=AVAILABILITY_ZONE_ID,
s3_bucket=S3_BUCKET_NAME_FOR_LOGS,
cluster_image_version=CLUSTER_IMAGE_VERSION,
service_account_id=SERVICE_ACCOUNT_ID,
)
assert mock_create_operation.called
@mock.patch("yandexcloud.SDK.create_operation_and_get_result")
def test_delete_dataproc_cluster_mocked(self, mock_create_operation):
self._init_hook()
self.hook.dataproc_client.delete_cluster("my_cluster_id")
assert mock_create_operation.called
@mock.patch("yandexcloud.SDK.create_operation_and_get_result")
def test_create_hive_job_hook(self, mock_create_operation):
self._init_hook()
self.hook.dataproc_client.create_hive_job(
cluster_id="my_cluster_id",
continue_on_failure=False,
name="Hive job",
properties=None,
query="SELECT 1;",
script_variables=None,
)
assert mock_create_operation.called
@mock.patch("yandexcloud.SDK.create_operation_and_get_result")
def test_create_mapreduce_job_hook(self, mock_create_operation):
self._init_hook()
self.hook.dataproc_client.create_mapreduce_job(
archive_uris=None,
args=[
"-mapper",
"mapper.py",
"-reducer",
"reducer.py",
"-numReduceTasks",
"1",
"-input",
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"-output",
"s3a://some-out-bucket/dataproc/job/results",
],
cluster_id="my_cluster_id",
file_uris=[
"s3a://some-in-bucket/jobs/sources/mapreduce-001/mapper.py",
"s3a://some-in-bucket/jobs/sources/mapreduce-001/reducer.py",
],
jar_file_uris=None,
main_class="org.apache.hadoop.streaming.HadoopStreaming",
main_jar_file_uri=None,
name="Mapreduce job",
properties={
"yarn.app.mapreduce.am.resource.mb": "2048",
"yarn.app.mapreduce.am.command-opts": "-Xmx2048m",
"mapreduce.job.maps": "6",
},
)
assert mock_create_operation.called
@mock.patch("yandexcloud.SDK.create_operation_and_get_result")
def test_create_spark_job_hook(self, mock_create_operation):
self._init_hook()
self.hook.dataproc_client.create_spark_job(
archive_uris=["s3a://some-in-bucket/jobs/sources/data/country-codes.csv.zip"],
args=[
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"s3a://some-out-bucket/dataproc/job/results/${{JOB_ID}}",
],
cluster_id="my_cluster_id",
file_uris=["s3a://some-in-bucket/jobs/sources/data/config.json"],
jar_file_uris=[
"s3a://some-in-bucket/jobs/sources/java/icu4j-61.1.jar",
"s3a://some-in-bucket/jobs/sources/java/commons-lang-2.6.jar",
"s3a://some-in-bucket/jobs/sources/java/opencsv-4.1.jar",
"s3a://some-in-bucket/jobs/sources/java/json-20190722.jar",
],
main_class="ru.yandex.cloud.dataproc.examples.PopulationSparkJob",
main_jar_file_uri="s3a://data-proc-public/jobs/sources/java/dataproc-examples-1.0.jar",
name="Spark job",
properties={"spark.submit.deployMode": "cluster"},
)
assert mock_create_operation.called
@mock.patch("yandexcloud.SDK.create_operation_and_get_result")
def test_create_pyspark_job_hook(self, mock_create_operation):
self._init_hook()
self.hook.dataproc_client.create_pyspark_job(
archive_uris=["s3a://some-in-bucket/jobs/sources/data/country-codes.csv.zip"],
args=[
"s3a://some-in-bucket/jobs/sources/data/cities500.txt.bz2",
"s3a://some-out-bucket/jobs/results/${{JOB_ID}}",
],
cluster_id="my_cluster_id",
file_uris=["s3a://some-in-bucket/jobs/sources/data/config.json"],
jar_file_uris=[
"s3a://some-in-bucket/jobs/sources/java/dataproc-examples-1.0.jar",
"s3a://some-in-bucket/jobs/sources/java/icu4j-61.1.jar",
"s3a://some-in-bucket/jobs/sources/java/commons-lang-2.6.jar",
],
main_python_file_uri="s3a://some-in-bucket/jobs/sources/pyspark-001/main.py",
name="Pyspark job",
properties={"spark.submit.deployMode": "cluster"},
python_file_uris=["s3a://some-in-bucket/jobs/sources/pyspark-001/geonames.py"],
)
assert mock_create_operation.called
| TestYandexCloudDataprocHook |
python | modin-project__modin | versioneer.py | {
"start": 23583,
"end": 67910
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f: Callable) -> Callable:
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands: List[str],
args: List[str],
cwd: Optional[str] = None,
verbose: bool = False,
hide_stderr: bool = False,
env: Optional[Dict[str, str]] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs: Dict[str, Any] = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError as e:
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(
parentdir_prefix: str,
root: str,
verbose: bool,
) -> Dict[str, Any]:
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs: str) -> Dict[str, str]:
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords: Dict[str, str] = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(
keywords: Dict[str, str],
tag_prefix: str,
verbose: bool,
) -> Dict[str, Any]:
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(
tag_prefix: str,
root: str,
verbose: bool,
runner: Callable = run_command
) -> Dict[str, Any]:
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, [
"describe", "--tags", "--dirty", "--always", "--long",
"--match", f"{tag_prefix}[[:digit:]]*"
], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces: Dict[str, Any] = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces: Dict[str, Any]) -> str:
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces: Dict[str, Any]) -> str:
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces: Dict[str, Any]) -> str:
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]:
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces: Dict[str, Any]) -> str:
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces: Dict[str, Any]) -> str:
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces: Dict[str, Any]) -> str:
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions() -> Dict[str, Any]:
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs: str) -> Dict[str, str]:
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords: Dict[str, str] = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(
keywords: Dict[str, str],
tag_prefix: str,
verbose: bool,
) -> Dict[str, Any]:
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(
tag_prefix: str, root: str, verbose: bool, runner: Callable = run_command
) -> Dict[str, Any]:
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
f"{tag_prefix}[[:digit:]]*",
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces: Dict[str, Any] = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(versionfile_source: str, ipy: Optional[str]) -> None:
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [versionfile_source]
if ipy:
files.append(ipy)
if "VERSIONEER_PEP518" not in globals():
try:
my_path = __file__
if my_path.endswith((".pyc", ".pyo")):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(
parentdir_prefix: str,
root: str,
verbose: bool,
) -> Dict[str, Any]:
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.29) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename: str) -> Dict[str, Any]:
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None:
"""Write the given version number to the given _version.py file."""
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces: Dict[str, Any]) -> str:
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces: Dict[str, Any]) -> str:
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces: Dict[str, Any]) -> str:
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]:
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces: Dict[str, Any]) -> str:
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces: Dict[str, Any]) -> str:
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces: Dict[str, Any]) -> str:
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces: Dict[str, Any]) -> str:
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
| NotThisMethod |
python | walkccc__LeetCode | solutions/2549. Count Distinct Numbers on Board/2549.py | {
"start": 0,
"end": 86
} | class ____:
def distinctIntegers(self, n: int) -> int:
return max(n - 1, 1)
| Solution |
python | astropy__astropy | astropy/extern/ply/cpp.py | {
"start": 3601,
"end": 4163
} | class ____(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
| Macro |
python | pytorch__pytorch | test/dynamo/test_trace_rules.py | {
"start": 11171,
"end": 19361
} | class ____(torch._dynamo.test_case.TestCase):
def _check_set_equality(self, generated, used, rule_map, ignored_set):
x = generated - used
y = used - generated
msg1 = (
f"New torch objects: {x} "
f"were not added to `trace_rules.{rule_map}` or `test_trace_rules.{ignored_set}`. "
"Refer the instruction in `torch/_dynamo/trace_rules.py` for more details."
)
msg2 = (
f"Existing torch objects: {y} were removed. "
f"Please remove them from `trace_rules.{rule_map}` or `test_trace_rules.{ignored_set}`. "
"Refer the instruction in `torch/_dynamo/trace_rules.py` for more details."
)
self.assertTrue(len(x) == 0, msg1)
self.assertTrue(len(y) == 0, msg2)
# We are using python function and module string names for these inlinelist,
# this unit test is to make sure the functions/modules can be correctly imported
# or loaded in case there is typo in the strings.
def test_skipfiles_inlinelist(self):
for m in LEGACY_MOD_INLINELIST.union(MOD_INLINELIST):
try:
mod = importlib.import_module(m)
except ImportError:
continue
else:
self.assertTrue(
isinstance(mod, types.ModuleType),
f"{m} from trace_rules.MOD_INLINELIST/LEGACY_MOD_INLINELIST "
"is not a python module, please check and correct it.",
)
@unittest.skip(
"This test keeps getting broken and our disable infra is not handling well. see #120627"
)
def test_torch_name_rule_map_updated(self):
# Generate the allowed objects based on heuristic defined in `allowed_functions.py`,
objs = gen_allowed_objs_and_ids(record=True, c_binding_only=True)
# Test C binding in graph functions are updated in torch_name_rule_map.
generated = objs.c_binding_in_graph_functions
used = set()
for x in (
set(torch_c_binding_in_graph_functions.keys())
| ignored_c_binding_in_graph_function_names
):
obj = load_object(x)
if obj is not None:
used.add(obj)
self._check_set_equality(
generated,
used,
"torch_c_binding_in_graph_functions",
"ignored_c_binding_in_graph_function_names",
)
# For non C binding in graph functions, we only test if they can be loaded successfully.
for f in torch_non_c_binding_in_graph_functions:
self.assertTrue(
isinstance(
load_object(f),
(
types.FunctionType,
types.BuiltinFunctionType,
types.MethodDescriptorType,
types.WrapperDescriptorType,
),
)
)
def test_force_inline_torch_function(self):
# `torch._dynamo.utils.istype` is skipped by default
def fn(x):
if istype(x, torch.Tensor):
return x + 1
else:
return x - 1
_manual_torch_name_rule_map = manual_torch_name_rule_map.copy()
# Force inline `torch._dynamo.utils.istype` by setting trace rule.
_manual_torch_name_rule_map["torch._dynamo.utils.istype"] = UserFunctionVariable
_torch_name_rule_map = [
_manual_torch_name_rule_map,
torch_c_binding_in_graph_functions,
torch_non_c_binding_in_graph_functions,
]
self.assertTrue(
"torch._dynamo" not in torch._dynamo.trace_rules.LEGACY_MOD_INLINELIST
)
self.assertTrue("torch._dynamo" not in torch._dynamo.trace_rules.MOD_INLINELIST)
with (
unittest.mock.patch(
"torch._dynamo.trace_rules.torch_name_rule_map",
_torch_name_rule_map,
),
unittest.mock.patch(
"torch._dynamo.trace_rules.get_torch_obj_rule_map",
torch._dynamo.trace_rules.get_torch_obj_rule_map.__wrapped__, # bypass functools.lru_cache
),
):
x = torch.rand(3)
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_force_inline_custom_function(self):
mod, func = create_dummy_module_and_function()
def fn(x):
return func(x)
_manual_torch_name_rule_map = manual_torch_name_rule_map.copy()
# Force inline `mod.func` by setting trace rule.
_manual_torch_name_rule_map[f"{mod.__name__}.{func.__name__}"] = (
UserFunctionVariable
)
_torch_name_rule_map = [
_manual_torch_name_rule_map,
torch_c_binding_in_graph_functions,
torch_non_c_binding_in_graph_functions,
]
with (
unittest.mock.patch(
"torch._dynamo.trace_rules.torch_name_rule_map",
_torch_name_rule_map,
),
unittest.mock.patch(
"torch._dynamo.trace_rules.get_torch_obj_rule_map",
torch._dynamo.trace_rules.get_torch_obj_rule_map.__wrapped__,
),
):
# First adding the module to SKIP_DIRS so that it will be skipped by default.
torch._dynamo.trace_rules.add(mod.__name__)
x = torch.rand(3)
opt_fn = torch.compile(backend="eager", fullgraph=True)(fn)
ref = fn(x)
res = opt_fn(x)
self.assertEqual(ref, res)
def test_no_special_handlers_for_torch_non_c_bindings(self):
handlers = TorchInGraphFunctionVariable._get_handlers()
# These handlers are manually audited to be safe
safe_handlers = (
"handle_tracing_state_functions", # No global state (constant)
"handle_radians", # No global state (constant)
"handle_is_tensor", # No global state
"handle_torch_compile", # No global state, constant
"handle_ntuple", # No global state
"handle_is_grad_enabled", # Safely implemented
"handle_use_deterministic_algorithms", # Guarded variable
"handle_are_deterministic_algorithms_enabled", # Guarded constant
"handle_device_interface_stream", # No global state
"handle_cudnn_is_acceptable", # No global state
"handle_assert", # No global state (constant)
"handle_nested_tensor", # No global state
"handle_current_stream", # Safely implemented
)
for fn in handlers:
if isinstance(fn, staticmethod) or inspect.ismethod(fn):
fn_name = f"{fn.__module__}#{fn.__name__}"
else:
fn_name = f"{fn.__module__}.{fn.__name__}"
if handlers[fn].__name__ in safe_handlers:
continue
self.assertFalse(
fn_name in torch_non_c_binding_in_graph_functions,
(
f"torch function {fn_name} has a special handler {handlers[fn].__name__}.\n"
"We expected all functions in `torch_non_c_binding_in_graph_functions` to be safe to cache.\n"
"Functions with special handlers may not be safe to cache, since they can close over global state.\n"
"If your handler/function is safe to cache, please add it to the list of safe handlers above.\n"
"Otherwise, add it to `manual_torch_name_rule_map` instead."
),
)
def test_almost_impossible_missing_name(self):
class weird: # noqa: UP004
def __getattribute__(self, name):
if name == "__name__":
raise AttributeError("test")
w = weird()
o = set()
with self.assertRaises(AttributeError):
w.__name__
self.assertEqual(lookup_inner(w, name=None, reasons=o), SkipFunctionVariable)
| TraceRuleTests |
python | python-poetry__poetry | src/poetry/utils/env/virtual_env.py | {
"start": 753,
"end": 6056
} | class ____(Env):
"""
A virtual Python environment.
"""
def __init__(self, path: Path, base: Path | None = None) -> None:
super().__init__(path, base)
# If base is None, it probably means this is
# a virtualenv created from VIRTUAL_ENV.
# In this case we need to get sys.base_prefix
# from inside the virtualenv.
if base is None:
output = self.run_python_script(GET_BASE_PREFIX)
self._base = Path(output.strip())
@property
def sys_path(self) -> list[str]:
output = self.run_python_script(GET_SYS_PATH)
paths: list[str] = json.loads(output)
return paths
def get_supported_tags(self) -> list[Tag]:
from packaging.tags import compatible_tags
from packaging.tags import cpython_tags
from packaging.tags import generic_tags
python = self.version_info[:3]
interpreter_name = self.marker_env["interpreter_name"]
interpreter_version = self.marker_env["interpreter_version"]
sysconfig_platform = self.marker_env["sysconfig_platform"]
free_threading = self.marker_env["free_threading"]
abis: list[str] | None = None
if interpreter_name == "pp":
interpreter = "pp3"
elif interpreter_name == "cp":
interpreter = f"{interpreter_name}{interpreter_version}"
if free_threading:
abis = [f"{interpreter}t"]
else:
interpreter = None
# Why using sysconfig.get_platform() and not ...
# ... platform.machine()
# This one is also different for x86_64 Linux and aarch64 Linux,
# but it is the same for a 32 Bit and a 64 Bit Python on Windows!
# ... platform.architecture()
# This one is also different for a 32 Bit and a 64 Bit Python on Windows,
# but it is the same for x86_64 Linux and aarch64 Linux!
platforms = None
if sysconfig_platform != sysconfig.get_platform():
# Relevant for the following use cases, for example:
# - using a 32 Bit Python on a 64 Bit Windows
# - using an emulated aarch Python on an x86_64 Linux
output = self.run_python_script(GET_PLATFORMS)
platforms = json.loads(output)
return [
*(
cpython_tags(python, abis=abis, platforms=platforms)
if interpreter_name == "cp"
else generic_tags(platforms=platforms)
),
*compatible_tags(python, interpreter=interpreter, platforms=platforms),
]
def get_marker_env(self) -> MarkerEnv:
output = self.run_python_script(GET_ENVIRONMENT_INFO)
env: MarkerEnv = json.loads(output)
# Lists and tuples are the same in JSON and loaded as list.
env["version_info"] = tuple(env["version_info"]) # type: ignore[typeddict-item]
return env
def get_paths(self) -> dict[str, str]:
output = self.run_python_script(GET_PATHS)
paths: dict[str, str] = json.loads(output)
return paths
def is_venv(self) -> bool:
return True
def is_sane(self) -> bool:
# A virtualenv is considered sane if "python" exists.
return os.path.exists(self.python)
def _run(self, cmd: list[str], **kwargs: Any) -> str:
kwargs["env"] = self.get_temp_environ(environ=kwargs.get("env"))
return super()._run(cmd, **kwargs)
def get_temp_environ(
self,
environ: dict[str, str] | None = None,
exclude: list[str] | None = None,
**kwargs: str,
) -> dict[str, str]:
exclude = exclude or []
exclude.extend(["PYTHONHOME", "__PYVENV_LAUNCHER__"])
if environ:
environ = deepcopy(environ)
for key in exclude:
environ.pop(key, None)
else:
environ = {k: v for k, v in os.environ.items() if k not in exclude}
environ.update(kwargs)
environ["PATH"] = self._updated_path()
environ["VIRTUAL_ENV"] = str(self._path)
return environ
def execute(self, bin: str, *args: str, **kwargs: Any) -> int:
kwargs["env"] = self.get_temp_environ(environ=kwargs.get("env"))
return super().execute(bin, *args, **kwargs)
@contextmanager
def temp_environ(self) -> Iterator[None]:
environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(environ)
def _updated_path(self) -> str:
return os.pathsep.join([str(self._bin_dir), os.environ.get("PATH", "")])
@cached_property
def includes_system_site_packages(self) -> bool:
pyvenv_cfg = self._path / "pyvenv.cfg"
return pyvenv_cfg.exists() and (
re.search(
r"^\s*include-system-site-packages\s*=\s*true\s*$",
pyvenv_cfg.read_text(encoding="utf-8"),
re.IGNORECASE | re.MULTILINE,
)
is not None
)
def is_path_relative_to_lib(self, path: Path) -> bool:
return super().is_path_relative_to_lib(path) or (
self.includes_system_site_packages
and self.parent_env.is_path_relative_to_lib(path)
)
| VirtualEnv |
python | fastai__fastai | fastai/vision/core.py | {
"start": 10676,
"end": 11753
} | class ____(Transform):
def setups(self, dl): self.vocab = dl.vocab
def decode (self, x, **kwargs):
self.bbox,self.lbls = None,None
return self._call('decodes', x, **kwargs)
def decodes(self, x:TensorMultiCategory):
self.lbls = [self.vocab[a] for a in x]
return x if self.bbox is None else LabeledBBox(self.bbox, self.lbls)
def decodes(self, x:TensorBBox):
self.bbox = x
return self.bbox if self.lbls is None else LabeledBBox(self.bbox, self.lbls)
# %% ../../nbs/07_vision.core.ipynb 96
#LabeledBBox can be sent in a tl with MultiCategorize (depending on the order of the tls) but it is already decoded.
@MultiCategorize
def decodes(self, x:LabeledBBox): return x
# %% ../../nbs/07_vision.core.ipynb 97
@PointScaler
def encodes(self, x:TensorBBox):
pnts = self.encodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
@PointScaler
def decodes(self, x:TensorBBox):
pnts = self.decodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
| BBoxLabeler |
python | doocs__leetcode | solution/0400-0499/0431.Encode N-ary Tree to Binary Tree/Solution.py | {
"start": 235,
"end": 356
} | class ____:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
"""
| TreeNode |
python | ipython__ipython | IPython/core/magic_arguments.py | {
"start": 8147,
"end": 8710
} | class ____(ArgDecorator):
"""
Base class to define a wrapper for ArgumentParser method.
Child class must define either `_method_name` or `add_to_parser`.
"""
_method_name: str
def __init__(self, *args, **kwds):
self.args = args
self.kwds = kwds
def add_to_parser(self, parser, group):
""" Add this object's information to the parser.
"""
if group is not None:
parser = group
getattr(parser, self._method_name)(*self.args, **self.kwds)
return None
| ArgMethodWrapper |
python | sanic-org__sanic | guide/webapp/display/page/page.py | {
"start": 550,
"end": 772
} | class ____:
language: str = _DEFAULT
title: str = ""
description: str = ""
layout: str = "main"
features: list[dict[str, str]] = field(default_factory=list)
content_class: str = ""
@dataclass
| PageMeta |
python | openai__openai-python | src/openai/types/beta/threads/refusal_delta_block.py | {
"start": 223,
"end": 423
} | class ____(BaseModel):
index: int
"""The index of the refusal part in the message."""
type: Literal["refusal"]
"""Always `refusal`."""
refusal: Optional[str] = None
| RefusalDeltaBlock |
python | keras-team__keras | keras/src/backend/torch/optimizers/torch_adam.py | {
"start": 147,
"end": 1889
} | class ____(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adam):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
local_step = ops.cast(self.iterations + 1, dtype)
beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step)
beta_2_power = ops.power(ops.cast(self.beta_2, dtype), local_step)
alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
v_list = [
self._velocities[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_mul_(m_list, self.beta_1)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1)
torch._foreach_mul_(v_list, self.beta_2)
torch._foreach_add_(
v_list, torch._foreach_mul(grads, grads), alpha=1 - self.beta_2
)
if self.amsgrad:
v_hat_list = [
self._velocity_hats[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_maximum_(v_hat_list, v_list)
v_list = v_hat_list
torch._foreach_add_(
variables,
torch._foreach_div(
torch._foreach_mul(m_list, alpha),
torch._foreach_add(torch._foreach_sqrt(v_list), self.epsilon),
),
alpha=-1,
)
| Adam |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/fc.py | {
"start": 4554,
"end": 4600
} | class ____(ccroot.stlink_task):
pass
| fcstlib |
python | sphinx-doc__sphinx | tests/roots/test-root/autodoc_target.py | {
"start": 1298,
"end": 1375
} | class ____:
def inheritedmeth(self):
"""Inherited function."""
| Base |
python | sqlalchemy__sqlalchemy | test/ext/test_baked.py | {
"start": 9868,
"end": 12324
} | class ____(BakedTest):
@classmethod
def setup_mappers(cls):
User = cls.classes.User
Address = cls.classes.Address
Order = cls.classes.Order
cls.mapper_registry.map_imperatively(
User,
cls.tables.users,
properties={
"addresses": relationship(
Address, order_by=cls.tables.addresses.c.id
),
"orders": relationship(Order, order_by=cls.tables.orders.c.id),
},
)
cls.mapper_registry.map_imperatively(Address, cls.tables.addresses)
cls.mapper_registry.map_imperatively(Order, cls.tables.orders)
@contextlib.contextmanager
def _fixture(self):
from sqlalchemy import event
User = self.classes.User
with testing.db.connect() as conn:
@event.listens_for(conn, "before_execute")
def before_execute(
conn, clauseelement, multiparams, params, execution_options
):
# execution options are kind of moving around a bit,
# test both places
assert (
"yes" in clauseelement._execution_options
or "yes" in execution_options
)
bq = self.bakery(lambda s: s.query(User.id).order_by(User.id))
sess = Session(conn)
yield sess, bq
def test_first(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.first(), (7,))
def test_iter(self):
with self._fixture() as (sess, bq):
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_spoiled(self):
with self._fixture() as (sess, bq):
result = bq.spoil()(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(list(result)[0], (7,))
def test_get(self):
User = self.classes.User
with self._fixture() as (sess, bq):
bq = self.bakery(lambda s: s.query(User))
result = bq(sess).with_post_criteria(
lambda q: q.execution_options(yes=True)
)
eq_(result.get(7), User(id=7))
| ResultPostCriteriaTest |
python | jazzband__django-waffle | waffle/tests/test_waffle.py | {
"start": 951,
"end": 18808
} | class ____(TestCase):
databases = DATABASES
def assert_flag_dynamically_created_with_value(self, expected_value):
FLAG_NAME = 'my_dynamically_created_flag'
flag_model = waffle.get_waffle_flag_model()
assert flag_model.objects.count() == 0
assert expected_value == waffle.flag_is_active(get(), FLAG_NAME)
assert flag_model.objects.count() == 1
flag = flag_model.objects.get(name=FLAG_NAME)
assert flag.name == FLAG_NAME
# We assert no queries are made to ensure flags created when the
# `CREATE_MISSING_FLAGS` setting is active are properly cached.
with self.assertNumQueries(0):
assert expected_value == waffle.flag_is_active(get(), FLAG_NAME)
def test_persist_active_flag(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='0.1')
request = get()
# Flag stays on.
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('True', response.cookies['dwf_myflag'].value)
def test_persist_inactive_flag(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='99.9')
request = get()
# Flag stays off.
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('False', response.cookies['dwf_myflag'].value)
def test_no_set_unused_flag(self):
"""An unused flag shouldn't have its cookie reset."""
request = get()
request.COOKIES['dwf_unused'] = 'True'
response = process_request(request, views.flag_in_view)
assert 'dwf_unused' not in response.cookies
def test_superuser(self):
"""Test the superuser switch."""
waffle.get_waffle_flag_model().objects.create(name='myflag', superusers=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
superuser = get_user_model()(username='foo', is_superuser=True)
request.user = superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_superuser = get_user_model()(username='bar', is_superuser=False)
non_superuser.save()
request.user = non_superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_staff(self):
"""Test the staff switch."""
waffle.get_waffle_flag_model().objects.create(name='myflag', staff=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
staff = get_user_model()(username='foo', is_staff=True)
request.user = staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_staff = get_user_model()(username='foo', is_staff=False)
non_staff.save()
request.user = non_staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_languages(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', languages='en,fr')
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
request.LANGUAGE_CODE = 'en'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
request.LANGUAGE_CODE = 'de'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_user(self):
"""Test the per-user switch."""
user = get_user_model().objects.create(username='foo')
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model().objects.create(username='someone_else')
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a user should have an effect.
flag.users.remove(user)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_remove_from_user(self):
"""Same operation of `test_user` but performed with reverse relation"""
user = get_user_model().objects.create(username='foo')
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model().objects.create(username='someone_else')
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a user should have an effect.
user.flag_set.remove(flag)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_group(self):
"""Test the per-group switch."""
group = Group.objects.create(name='foo')
user = get_user_model().objects.create(username='bar')
user.groups.add(group)
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='someone_else')
request.user.save()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a group should have an effect.
flag.groups.remove(group)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_remove_from_group(self):
"""Same operation of `test_group` but performed with reverse relation"""
group = Group.objects.create(name='foo')
user = get_user_model().objects.create(username='bar')
user.groups.add(group)
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='someone_else')
request.user.save()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a group should have an effect.
group.flag_set.remove(flag)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_authenticated(self):
"""Test the authenticated/anonymous switch."""
waffle.get_waffle_flag_model().objects.create(name='myflag', authenticated=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='foo')
assert request.user.is_authenticated
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_on(self):
"""Test the 'everyone' switch on."""
waffle.get_waffle_flag_model().objects.create(name='myflag', everyone=True)
request = get()
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='foo')
assert request.user.is_authenticated
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_off(self):
"""Test the 'everyone' switch off."""
waffle.get_waffle_flag_model().objects.create(
name="myflag", everyone=False, authenticated=True
)
request = get()
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='foo')
assert request.user.is_authenticated
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_percent(self):
"""If you have no cookie, you get a cookie!"""
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='50.0')
request = get()
response = process_request(request, views.flag_in_view)
assert 'dwf_myflag' in response.cookies
@mock.patch.object(random, 'uniform')
def test_reroll(self, uniform):
"""Even without a cookie, calling flag_is_active twice should return
the same value."""
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='50.0')
# Make sure we're not really random.
request = get() # Create a clean request.
assert not hasattr(request, 'waffles')
uniform.return_value = '10' # < 50. Flag is True.
assert waffle.flag_is_active(request, 'myflag')
assert hasattr(request, 'waffles') # We should record this flag.
assert 'myflag' in request.waffles
assert request.waffles['myflag'][0]
uniform.return_value = '70' # > 50. Normally, Flag would be False.
assert waffle.flag_is_active(request, 'myflag')
assert request.waffles['myflag'][0]
def test_undefined(self):
"""Undefined flags are always false."""
request = get()
assert not waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_undefined_default(self):
"""WAFFLE_FLAG_DEFAULT controls undefined flags."""
request = get()
assert waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_OVERRIDE=True)
def test_override(self):
request = get(foo='1')
waffle.get_waffle_flag_model().objects.create(name='foo') # Off for everyone.
assert waffle.flag_is_active(request, 'foo')
def test_testing_flag(self):
waffle.get_waffle_flag_model().objects.create(name='foo', testing=True)
request = get(dwft_foo='1')
assert waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert request.waffle_tests['foo']
# GET param should override cookie
request = get(dwft_foo='0')
request.COOKIES['dwft_foo'] = 'True'
assert not waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert not request.waffle_tests['foo']
def test_testing_disabled_flag(self):
waffle.get_waffle_flag_model().objects.create(name='foo')
request = get(dwft_foo='1')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
request = get(dwft_foo='0')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
def test_testing_flag_header(self):
waffle.get_waffle_flag_model().objects.create(name='foo', testing=True)
request = RequestFactory().get('/foo', HTTP_DWFT_FOO='1')
request.user = AnonymousUser()
assert waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert request.waffle_tests['foo']
# header should override cookie
request = RequestFactory().get('/foo', HTTP_DWFT_FOO='0')
request.user = AnonymousUser()
request.COOKIES['dwft_foo'] = 'True'
assert not waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert not request.waffle_tests['foo']
def test_set_then_unset_testing_flag(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', testing=True)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=0')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
@override_settings(DATABASE_ROUTERS=['waffle.tests.base.ReplicationRouter'])
def test_everyone_on_read_from_write_db(self):
flag = waffle.get_waffle_flag_model().objects.create(name='myflag', everyone=True)
request = get()
response = process_request(request, views.flag_in_view)
# By default, flag_is_active should hit whatever it configured as the
# read DB (so values will be stale if replication is lagged).
self.assertEqual(b'off', response.content)
with override_settings(WAFFLE_READ_FROM_WRITE_DB=True):
# Save the flag again to flush the cache.
flag.save()
# The next read should now be directed to the write DB, ensuring
# the cache and DB are in sync.
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
@override_settings(
WAFFLE_FLAG_MODEL="test_app.CompanyAwareFlag",
AUTH_USER_MODEL="test_app.CompanyUser",
)
def test_pluggable_flag_model(self):
flag_model = waffle.get_waffle_flag_model()
self.assertEqual(CompanyAwareFlag, flag_model)
acme_company = Company.objects.create(name='Acme Ltd.')
feline_company = Company.objects.create(name='Feline LLC')
acme_company_flag = waffle.get_waffle_flag_model().objects.create(
name="myflag", superusers=True
)
acme_company_flag.companies.add(acme_company)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
acme_user = get_user_model()(username='acme.mcfield', company=acme_company)
request.user = acme_user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
feline_user = get_user_model()(username='acme.mcfield', company=feline_company)
request.user = feline_user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
@override_settings(WAFFLE_CREATE_MISSING_FLAGS=True)
@override_settings(WAFFLE_FLAG_DEFAULT=False)
def test_flag_created_dynamically_default_false(self):
self.assert_flag_dynamically_created_with_value(False)
@override_settings(WAFFLE_CREATE_MISSING_FLAGS=True)
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_flag_created_dynamically_default_true(self):
self.assert_flag_dynamically_created_with_value(True)
@mock.patch('waffle.models.logger')
def test_no_logging_missing_flag_by_default(self, mock_logger):
request = get()
waffle.flag_is_active(request, 'foo')
mock_logger.log.call_count == 0
@override_settings(WAFFLE_LOG_MISSING_FLAGS=logging.WARNING)
@mock.patch('waffle.models.logger')
def test_logging_missing_flag(self, mock_logger):
request = get()
waffle.flag_is_active(request, 'foo')
mock_logger.log.assert_called_with(logging.WARNING, 'Flag %s not found', 'foo')
| WaffleTests |
python | Netflix__metaflow | metaflow/plugins/datatools/s3/s3op.py | {
"start": 1993,
"end": 22236
} | class ____(object):
def __init__(
self,
bucket,
path,
url,
local,
prefix,
content_type=None,
encryption=None,
metadata=None,
range=None,
idx=None,
):
self.bucket = bucket
self.path = path
self.url = url
self.local = local
self.prefix = prefix
self.content_type = content_type
self.metadata = metadata
self.range = range
self.idx = idx
self.encryption = encryption
def __str__(self):
return self.url
# We use error codes instead of Exceptions, which are trickier to
# handle reliably in a multiprocess world
ERROR_INVALID_URL = 4
ERROR_NOT_FULL_PATH = 5
ERROR_URL_NOT_FOUND = 6
ERROR_URL_ACCESS_DENIED = 7
ERROR_WORKER_EXCEPTION = 8
ERROR_VERIFY_FAILED = 9
ERROR_LOCAL_FILE_NOT_FOUND = 10
ERROR_INVALID_RANGE = 11
ERROR_TRANSIENT = 12
ERROR_OUT_OF_DISK_SPACE = 13
def format_result_line(idx, prefix, url="", local=""):
# We prefix each output with the index corresponding to the line number on the
# initial request (ie: prior to any transient errors). This allows us to
# properly maintain the order in which things were requested even in the presence
# of transient retries where we do not know what succeeds and what does not.
# Basically, when we retry an operation, we can trace it back to its original
# position in the first request.
return " ".join(
[str(idx)] + [url_quote(x).decode("utf-8") for x in (prefix, url, local)]
)
# I can't understand what's the right way to deal
# with boto errors. This function can be replaced
# with better error handling code.
def normalize_client_error(err):
error_code = err.response["Error"]["Code"]
try:
return int(error_code)
except ValueError:
if error_code in ("AccessDenied", "AllAccessDisabled", "InvalidAccessKeyId"):
return 403
if error_code in ("NoSuchKey", "NoSuchBucket"):
return 404
if error_code == "InvalidRange":
return 416
# We "normalize" retriable server errors to 503. These are also considered
# transient by boto3 (see:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html)
if error_code in (
"SlowDown",
"RequestTimeout",
"RequestTimeoutException",
"PriorRequestNotComplete",
"ConnectionError",
"HTTPClientError",
"Throttling",
"ThrottlingException",
"ThrottledException",
"RequestThrottledException",
"TooManyRequestsException",
"ProvisionedThroughputExceededException",
"TransactionInProgressException",
"RequestLimitExceeded",
"BandwidthLimitExceeded",
"LimitExceededException",
"RequestThrottled",
"EC2ThrottledException",
"InternalError",
):
return 503
return error_code
# S3 worker pool
@tracing.cli("s3op/worker")
def worker(result_file_name, queue, mode, s3config):
# Interpret mode, it can either be a single op or something like
# info_download or info_upload which implies:
# - for download: we need to return the information as well
# - for upload: we need to not overwrite the file if it exists
modes = mode.split("_")
pre_op_info = False
if len(modes) > 1:
pre_op_info = True
mode = modes[1]
else:
mode = modes[0]
def op_info(url):
try:
head = s3.head_object(Bucket=url.bucket, Key=url.path)
to_return = {
"error": None,
"size": head["ContentLength"],
"content_type": head["ContentType"],
"encryption": head.get("ServerSideEncryption"),
"metadata": head["Metadata"],
"last_modified": get_timestamp(head["LastModified"]),
}
except client_error as err:
error_code = normalize_client_error(err)
if error_code == 404:
to_return = {"error": ERROR_URL_NOT_FOUND, "raise_error": err}
elif error_code == 403:
to_return = {"error": ERROR_URL_ACCESS_DENIED, "raise_error": err}
elif error_code == 416:
to_return = {"error": ERROR_INVALID_RANGE, "raise_error": err}
elif error_code in (500, 502, 503, 504):
to_return = {"error": ERROR_TRANSIENT, "raise_error": err}
else:
to_return = {"error": error_code, "raise_error": err}
return to_return
with open(result_file_name, "w") as result_file:
try:
from metaflow.plugins.datatools.s3.s3util import get_s3_client
s3, client_error = get_s3_client(
s3_role_arn=s3config.role,
s3_session_vars=s3config.session_vars,
s3_client_params=s3config.client_params,
)
while True:
url, idx = queue.get()
if url is None:
break
if mode == "info":
result = op_info(url)
orig_error = result.get("raise_error", None)
if orig_error:
del result["raise_error"]
with open(url.local, "w") as f:
json.dump(result, f)
result_file.write(
"%d %d\n"
% (idx, -1 * result["error"] if orig_error else result["size"])
)
elif mode == "download":
tmp = NamedTemporaryFile(dir=".", mode="wb", delete=False)
try:
try:
if url.range:
resp = s3.get_object(
Bucket=url.bucket, Key=url.path, Range=url.range
)
range_result = resp["ContentRange"]
range_result_match = RANGE_MATCH.match(range_result)
if range_result_match is None:
raise RuntimeError(
"Wrong format for ContentRange: %s"
% str(range_result)
)
range_result = {
x: int(range_result_match.group(x))
for x in ["total", "start", "end"]
}
else:
resp = s3.get_object(Bucket=url.bucket, Key=url.path)
range_result = None
sz = resp["ContentLength"]
if range_result is None:
range_result = {"total": sz, "start": 0, "end": sz - 1}
if not url.range and sz > DOWNLOAD_FILE_THRESHOLD:
# In this case, it is more efficient to use download_file as it
# will download multiple parts in parallel (it does it after
# multipart_threshold)
s3.download_file(url.bucket, url.path, tmp.name)
else:
read_in_chunks(
tmp, resp["Body"], sz, DOWNLOAD_MAX_CHUNK
)
tmp.close()
os.rename(tmp.name, url.local)
except client_error as err:
tmp.close()
os.unlink(tmp.name)
handle_client_error(err, idx, result_file)
continue
except RetriesExceededError as e:
tmp.close()
os.unlink(tmp.name)
err = convert_to_client_error(e)
handle_client_error(err, idx, result_file)
continue
except OSError as e:
tmp.close()
os.unlink(tmp.name)
if e.errno == errno.ENOSPC:
result_file.write(
"%d %d\n" % (idx, -ERROR_OUT_OF_DISK_SPACE)
)
else:
result_file.write(
"%d %d %s\n" % (idx, -ERROR_TRANSIENT, "OSError")
)
result_file.flush()
continue
except MetaflowException:
# Re-raise Metaflow exceptions (including TimeoutException)
tmp.close()
os.unlink(tmp.name)
raise
except (SSLError, Exception) as e:
tmp.close()
os.unlink(tmp.name)
# assume anything else is transient
result_file.write(
"%d %d %s\n" % (idx, -ERROR_TRANSIENT, type(e).__name__)
)
result_file.flush()
continue
# If we need the metadata, get it and write it out
if pre_op_info:
with open("%s_meta" % url.local, mode="w") as f:
# Get range information
args = {
"size": resp["ContentLength"],
"range_result": range_result,
}
if resp["ContentType"]:
args["content_type"] = resp["ContentType"]
if resp["Metadata"] is not None:
args["metadata"] = resp["Metadata"]
if resp.get("ServerSideEncryption") is not None:
args["encryption"] = resp["ServerSideEncryption"]
if resp["LastModified"]:
args["last_modified"] = get_timestamp(
resp["LastModified"]
)
json.dump(args, f)
# Finally, we push out the size to the result_pipe since
# the size is used for verification and other purposes, and
# we want to avoid file operations for this simple process
result_file.write("%d %d\n" % (idx, resp["ContentLength"]))
else:
# This is upload, if we have a pre_op, it means we do not
# want to overwrite
do_upload = False
if pre_op_info:
result_info = op_info(url)
if result_info["error"] == ERROR_URL_NOT_FOUND:
# We only upload if the file is not found
do_upload = True
else:
# No pre-op so we upload
do_upload = True
if do_upload:
extra = None
if url.content_type or url.metadata or url.encryption:
extra = {}
if url.content_type:
extra["ContentType"] = url.content_type
if url.metadata is not None:
extra["Metadata"] = url.metadata
if url.encryption is not None:
extra["ServerSideEncryption"] = url.encryption
try:
try:
s3.upload_file(
url.local, url.bucket, url.path, ExtraArgs=extra
)
# We indicate that the file was uploaded
result_file.write("%d %d\n" % (idx, 0))
except client_error as err:
# Shouldn't get here, but just in case.
# Internally, botocore catches ClientError and returns a S3UploadFailedError.
# See https://github.com/boto/boto3/blob/develop/boto3/s3/transfer.py#L377
handle_client_error(err, idx, result_file)
continue
except S3UploadFailedError as e:
err = convert_to_client_error(e)
handle_client_error(err, idx, result_file)
continue
except MetaflowException:
# Re-raise Metaflow exceptions (including TimeoutException)
raise
except (SSLError, Exception) as e:
# assume anything else is transient
result_file.write(
"%d %d %s\n" % (idx, -ERROR_TRANSIENT, type(e).__name__)
)
result_file.flush()
continue
except:
traceback.print_exc()
result_file.flush()
sys.exit(ERROR_WORKER_EXCEPTION)
def convert_to_client_error(e):
match = BOTOCORE_MSG_TEMPLATE_MATCH.search(str(e))
if not match:
raise e
error_code = match.group(1)
operation_name = match.group(2)
error_message = match.group(3)
response = {
"Error": {
"Code": error_code,
"Message": error_message,
}
}
return ClientError(response, operation_name)
def handle_client_error(err, idx, result_file):
# Handle all MetaflowExceptions as fatal
if isinstance(err, MetaflowException):
raise err
error_code = normalize_client_error(err)
original_error_code = err.response["Error"]["Code"]
if error_code == 404:
result_file.write("%d %d\n" % (idx, -ERROR_URL_NOT_FOUND))
result_file.flush()
elif error_code == 403:
result_file.write("%d %d\n" % (idx, -ERROR_URL_ACCESS_DENIED))
result_file.flush()
elif error_code == 503:
result_file.write("%d %d %s\n" % (idx, -ERROR_TRANSIENT, original_error_code))
result_file.flush()
else:
# optimistically assume it is a transient error
result_file.write("%d %d %s\n" % (idx, -ERROR_TRANSIENT, original_error_code))
result_file.flush()
def start_workers(mode, urls, num_workers, inject_failure, s3config):
# We start the minimum of len(urls) or num_workers to avoid starting
# workers that will definitely do nothing
num_workers = min(num_workers, len(urls))
queue = Queue(len(urls) + num_workers)
procs = {}
random.seed()
sz_results = []
transient_error_type = None
# 1. push sources and destinations to the queue
# We only push if we don't inject a failure; otherwise, we already set the sz_results
# appropriately with the result of the injected failure.
for idx, elt in enumerate(urls):
if random.randint(0, 99) < inject_failure:
sz_results.append(-ERROR_TRANSIENT)
else:
sz_results.append(None)
queue.put((elt, idx))
# 2. push end-of-queue markers
for i in range(num_workers):
queue.put((None, None))
# 3. start processes
with TempDir() as output_dir:
for i in range(num_workers):
file_path = os.path.join(output_dir, str(i))
p = Process(
target=worker,
args=(file_path, queue, mode, s3config),
)
p.start()
procs[p] = file_path
# 4. wait for the processes to finish; we continuously update procs
# to remove all processes that have finished already
while procs:
new_procs = {}
for proc, out_path in procs.items():
proc.join(timeout=1)
if proc.exitcode is not None:
if proc.exitcode != 0:
msg = "Worker process failed (exit code %d)" % proc.exitcode
# IMPORTANT: if this process has put items on a queue, then it will not terminate
# until all buffered items have been flushed to the pipe, causing a deadlock.
# `cancel_join_thread()` allows it to exit without flushing the queue.
# Without this line, the parent process would hang indefinitely when a subprocess
# did not exit cleanly in the case of unhandled exceptions.
#
# The error situation is:
# 1. this process puts stuff in queue
# 2. subprocess dies so doesn't consume its end-of-queue marker (the None)
# 3. other subprocesses consume all useful bits AND their end-of-queue marker
# 4. one marker is left and not consumed
# 5. this process cannot shut down until the queue is empty.
# 6. it will never be empty because all subprocesses (workers) have died.
queue.cancel_join_thread()
exit(msg, proc.exitcode)
# Read the output file if all went well
with open(out_path, "r") as out_file:
for line in out_file:
line_split = line.split(" ", 2)
idx = int(line_split[0])
size = int(line_split[1])
sz_results[idx] = size
# For transient errors, store the transient error type (should be the same for all)
if size == -ERROR_TRANSIENT and len(line_split) > 2:
transient_error_type = line_split[2].strip()
else:
# Put this process back in the processes to check
new_procs[proc] = out_path
procs = new_procs
return sz_results, transient_error_type
def process_urls(mode, urls, verbose, inject_failure, num_workers, s3config):
if verbose:
print("%sing %d files.." % (mode.capitalize(), len(urls)), file=sys.stderr)
start = time.time()
sz_results, transient_error_type = start_workers(
mode, urls, num_workers, inject_failure, s3config
)
end = time.time()
if verbose:
total_size = sum(sz for sz in sz_results if sz is not None and sz > 0)
bw = total_size / (end - start)
print(
"%sed %d files, %s in total, in %d seconds (%s/s)."
% (
mode.capitalize(),
len(urls),
with_unit(total_size),
end - start,
with_unit(bw),
),
file=sys.stderr,
)
return sz_results, transient_error_type
# Utility functions
def with_unit(x):
if x > 1024**3:
return "%.1fGB" % (x / 1024.0**3)
elif x > 1024**2:
return "%.1fMB" % (x / 1024.0**2)
elif x > 1024:
return "%.1fKB" % (x / 1024.0)
else:
return "%d bytes" % x
# S3Ops class is just a wrapper for get_size and list_prefix
# required by @aws_retry decorator, which needs the reset_client
# method. Otherwise they would be just stand-alone functions.
| S3Url |
python | tensorflow__tensorflow | tensorflow/dtensor/python/d_checkpoint.py | {
"start": 2202,
"end": 7392
} | class ____: # pylint: disable=protected-access
"""A single device saver that places tensors on DTensor Device."""
def __init__(self, mesh: layout.Mesh,
saveable_objects: List[saveable_object.SaveableObject]):
self._saveable_objects = saveable_objects
self._mesh = mesh
def save(
self,
file_prefix: str,
options: Optional[checkpoint_options.CheckpointOptions] = None
) -> Optional[ops.Operation]:
"""Saves the saveable objects to a checkpoint with `file_prefix`.
Also query the generated shards from the distributed DTensor SaveV2 ops and
do a MergeV2 on those. Each op here is backed by a global_barrier to avoid
racing from multiple clients.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
options: Optional `CheckpointOptions` object. This is unused in DTensor.
Returns:
An `Operation`, or None when executing eagerly.
"""
if options is not None and options.experimental_io_device is not None:
raise ValueError(
"Specified experimental_io_device in DTensor checkpoint is not supported."
)
del options
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor = spec.tensor
# A tensor value of `None` indicates that this SaveableObject gets
# recorded in the object graph, but that no value is saved in the
# checkpoint.
if tensor is not None:
if api.device_name() != spec.device:
# Some small tensors are placed on CPU0 from save manager and
# broadcasted to DTensor mesh, e,g., SaveCounter.
tensor = api.pack([tensor] *
self._mesh.host_mesh().num_local_devices(),
layout.Layout.replicated(
self._mesh.host_mesh(),
rank=tensor.shape.rank))
tensor_names.append(spec.name)
tensors.append(tensor)
tensor_slices.append(spec.slice_spec)
return save_restore.sharded_save(self._mesh, file_prefix, tensor_names,
tensor_slices, tensors)
def restore(
self,
file_prefix: str,
options: Optional[checkpoint_options.CheckpointOptions] = None
) -> Dict[str, ops.Operation]:
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
options: Optional `CheckpointOptions` object. This is unused in DTensor.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
if options is not None and options.experimental_io_device is not None:
raise ValueError(
"Specified experimental_io_device in DTensor checkpoint is not "
"supported.")
del options
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
# DTensor change 1 : Gather shapes and layout from original saveable
# specs.
# Note that this relies on the fact that the variables are already
# initialized -- which isn't the behavior we want eventually.
# TODO(b/159035705): Handle the variable initialization in restore.
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
if isinstance(spec, d_variable.DSaveSpec):
restore_specs.append((spec.name, spec.slice_spec, spec.dtype,
spec.layout, spec.global_shape))
# Fall back to replicated layouts for non-DTensor saves that constructs
# normal SaveSpec.
elif isinstance(spec, saveable_object.SaveSpec):
restore_specs.append(
(spec.name, spec.slice_spec, spec.dtype,
layout.Layout.replicated(self._mesh.host_mesh(),
spec.tensor.shape.rank).to_string(),
spec.tensor.shape.as_list()))
tensor_names, tensor_slices, tensor_dtypes, layouts, global_shapes = zip(
*restore_specs)
with ops.device(api.device_name()):
# DTensor change 2 : Run on customized DTensor RestoreV2 op rather than
# stock TF io_ops.RestoreV2.
restored_tensors = gen_dtensor_ops.d_tensor_restore_v2(
prefix=file_prefix,
tensor_names=tensor_names,
shape_and_slices=tensor_slices,
input_shapes=global_shapes,
input_layouts=layouts,
dtypes=tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(tensor_structure,
restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
| _DSaver |
python | mahmoud__boltons | boltons/ioutils.py | {
"start": 10329,
"end": 15391
} | class ____(SpooledIOBase):
"""
SpooledStringIO is a spooled file-like-object that only accepts unicode
values. On Python 2.x this means the 'unicode' type and on Python 3.x this
means the 'str' type. Values are accepted as unicode and then coerced into
utf-8 encoded bytes for storage. On retrieval, the values are returned as
unicode.
Example::
>>> from boltons import ioutils
>>> with ioutils.SpooledStringIO() as f:
... f.write(u"\u2014 Hey, an emdash!")
... _ = f.seek(0)
... isinstance(f.read(), str)
True
"""
def __init__(self, *args, **kwargs):
self._tell = 0
super().__init__(*args, **kwargs)
def read(self, n=-1):
self._checkClosed()
ret = self.buffer.reader.read(n, n)
self._tell = self.tell() + len(ret)
return ret
def write(self, s):
self._checkClosed()
if not isinstance(s, str):
raise TypeError("str expected, got {}".format(
type(s).__name__
))
current_pos = self.tell()
if self.buffer.tell() + len(s.encode('utf-8')) >= self._max_size:
self.rollover()
self.buffer.write(s.encode('utf-8'))
self._tell = current_pos + len(s)
def _traverse_codepoints(self, current_position, n):
"""Traverse from current position to the right n codepoints"""
dest = current_position + n
while True:
if current_position == dest:
# By chance we've landed on the right position, break
break
# If the read would take us past the intended position then
# seek only enough to cover the offset
if current_position + READ_CHUNK_SIZE > dest:
self.read(dest - current_position)
break
else:
ret = self.read(READ_CHUNK_SIZE)
# Increment our current position
current_position += READ_CHUNK_SIZE
# If we kept reading but there was nothing here, break
# as we are at the end of the file
if not ret:
break
return dest
def seek(self, pos, mode=0):
"""Traverse from offset to the specified codepoint"""
self._checkClosed()
# Seek to position from the start of the file
if mode == os.SEEK_SET:
self.buffer.seek(0)
self._traverse_codepoints(0, pos)
self._tell = pos
# Seek to new position relative to current position
elif mode == os.SEEK_CUR:
start_pos = self.tell()
self._traverse_codepoints(self.tell(), pos)
self._tell = start_pos + pos
elif mode == os.SEEK_END:
self.buffer.seek(0)
dest_position = self.len - pos
self._traverse_codepoints(0, dest_position)
self._tell = dest_position
else:
raise ValueError(
f"Invalid whence ({mode}, should be 0, 1, or 2)"
)
return self.tell()
def readline(self, length=None):
self._checkClosed()
ret = self.buffer.readline(length).decode('utf-8')
self._tell = self.tell() + len(ret)
return ret
def readlines(self, sizehint=0):
ret = [x.decode('utf-8') for x in self.buffer.readlines(sizehint)]
self._tell = self.tell() + sum(len(x) for x in ret)
return ret
@property
def buffer(self):
try:
return self._buffer
except AttributeError:
self._buffer = EncodedFile(BytesIO(), data_encoding='utf-8')
return self._buffer
@property
def _rolled(self):
return not isinstance(self.buffer.stream, BytesIO)
def rollover(self):
"""Roll the buffer over to a TempFile"""
if not self._rolled:
tmp = EncodedFile(TemporaryFile(dir=self._dir),
data_encoding='utf-8')
pos = self.buffer.tell()
tmp.write(self.buffer.getvalue())
tmp.seek(pos)
self.buffer.close()
self._buffer = tmp
def tell(self):
"""Return the codepoint position"""
self._checkClosed()
return self._tell
@property
def len(self):
"""Determine the number of codepoints in the file"""
pos = self.buffer.tell()
self.buffer.seek(0)
total = 0
while True:
ret = self.read(READ_CHUNK_SIZE)
if not ret:
break
total += len(ret)
self.buffer.seek(pos)
return total
def is_text_fileobj(fileobj):
if getattr(fileobj, 'encoding', False):
# codecs.open and io.TextIOBase
return True
if getattr(fileobj, 'getvalue', False):
# StringIO.StringIO / io.StringIO
try:
if isinstance(fileobj.getvalue(), str):
return True
except Exception:
pass
return False
| SpooledStringIO |
python | django__django | tests/fixtures_regress/models.py | {
"start": 4230,
"end": 4696
} | class ____(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, models.CASCADE, related_name="ref_fks")
nk_m2m = models.ManyToManyField(NKChild, related_name="ref_m2ms")
def __str__(self):
return "%s: Reference to %s [%s]" % (
self.text,
self.nk_fk,
", ".join(str(o) for o in self.nk_m2m.all()),
)
# ome models with pathological circular dependencies
| RefToNKChild |
python | PrefectHQ__prefect | tests/blocks/test_abstract.py | {
"start": 1927,
"end": 2020
} | class ____(PrefectException):
"""Raised when a job run is still running."""
| JobRunIsRunning |
python | gevent__gevent | src/gevent/tests/test__socket_ex.py | {
"start": 699,
"end": 1110
} | class ____(greentest.TestCase):
switch_expected = False
def test(self):
# pylint:disable=no-member
sock = socket.socket()
self.assertTrue(sock.ref)
sock.ref = False
self.assertFalse(sock.ref)
self.assertFalse(sock._read_event.ref)
self.assertFalse(sock._write_event.ref)
sock.close()
if __name__ == '__main__':
greentest.main()
| TestRef |
python | bokeh__bokeh | tests/support/defaults.py | {
"start": 1899,
"end": 4858
} | class ____(Serializer):
def _encode(self, obj: Any) -> AnyRep:
if isinstance(obj, Model):
def query(prop: PropertyDescriptor[Any]) -> bool:
return prop.readonly or prop.serialized
properties = obj.query_properties_with_values(query, include_defaults=False, include_undefined=True)
attributes = {key: self.encode(val) for key, val in properties.items()}
rep = ObjectRep(
type="object",
name=obj.__qualified_model__,
attributes=attributes,
)
return rep
elif obj is Undefined:
return SymbolRep(type="symbol", name="unset")
else:
return super()._encode(obj)
def collect_defaults() -> dict[str, Any]:
serializer = DefaultsSerializer()
defaults: dict[str, Any] = {}
# In order to look up from the model catalog that Model maintains, it
# has to be created first. These imports ensure that all built-in Bokeh
# models are represented in the catalog.
import bokeh.models
import bokeh.plotting # noqa: F401
models = sorted(Model.model_class_reverse_map.values(), key=lambda model: f"{model.__module__}.{model.__name__}")
for model in models:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=BokehDeprecationWarning)
obj = model()
# filter only own properties and overrides
def query(prop: PropertyDescriptor[Any]) -> bool:
return (prop.readonly or prop.serialized) and \
(prop.name in obj.__class__.__properties__ or prop.name in obj.__class__.__overridden_defaults__)
properties = obj.query_properties_with_values(query, include_defaults=True, include_undefined=True)
attributes = {key: serializer.encode(val) for key, val in properties.items()}
bases = [base.__qualified_model__ for base in model.__bases__ if issubclass(base, HasProps) and base != HasProps]
if bases != []:
attributes = dict(
__extends__ = bases[0] if len(bases) == 1 else bases,
**attributes,
)
name = model.__qualified_model__
defaults[name] = attributes
return defaults
def output_defaults(dest: Path, defaults: dict[str, Any]) -> None:
os.makedirs(dest.parent, exist_ok=True)
output = json5.dumps(defaults, sort_keys=False, indent=2)
with open(dest, "w", encoding="utf-8") as f:
f.write(output)
f.write("\n")
print(f"Wrote {dest} with {len(defaults)} models")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| DefaultsSerializer |
python | keras-team__keras | keras/src/layers/activations/prelu_test.py | {
"start": 113,
"end": 1167
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_prelu(self):
self.run_layer_test(
prelu.PReLU,
init_kwargs={
"alpha_initializer": "zeros",
"alpha_regularizer": "L1",
"alpha_constraint": "MaxNorm",
"shared_axes": 1,
},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_prelu_correctness(self):
def np_prelu(x, alpha):
return (x > 0) * x + (x <= 0) * alpha * x
inputs = np.random.randn(2, 10, 5, 3)
prelu_layer = prelu.PReLU(
alpha_initializer="glorot_uniform",
alpha_regularizer="l1",
alpha_constraint="non_neg",
shared_axes=(1, 2),
)
prelu_layer.build(inputs.shape)
weights = np.random.random((1, 1, 3))
prelu_layer.alpha.assign(weights)
ref_out = np_prelu(inputs, weights)
self.assertAllClose(prelu_layer(inputs), ref_out)
| PReLUTest |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 80473,
"end": 81162
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 3) == "3 घण्टा"
assert self.locale._format_timeframe("hour", 0) == "एक घण्टा"
def test_format_relative_now(self):
result = self.locale._format_relative("अहिले", "now", 0)
assert result == "अहिले"
def test_format_relative_future(self):
result = self.locale._format_relative("एक घण्टा", "hour", 1)
assert result == "एक घण्टा पछी"
def test_format_relative_past(self):
result = self.locale._format_relative("एक घण्टा", "hour", -1)
assert result == "एक घण्टा पहिले"
@pytest.mark.usefixtures("lang_locale")
| TestNepaliLocale |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 11978,
"end": 12309
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layers = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
)
def forward(self, x):
return self.layers(x)
| Seq |
python | pypa__warehouse | tests/common/db/organizations.py | {
"start": 890,
"end": 1528
} | class ____(WarehouseFactory):
class Meta:
model = OrganizationApplication
id = factory.Faker("uuid4", cast_to=None)
name = factory.Faker("pystr", max_chars=12)
display_name = factory.Faker("word")
orgtype = "Community"
link_url = factory.Faker("uri")
description = factory.Faker("sentence")
status = OrganizationApplicationStatus.Submitted
submitted_by = factory.SubFactory(UserFactory)
submitted = factory.Faker(
"date_time_between_dates",
datetime_start=datetime.datetime(2020, 1, 1),
datetime_end=datetime.datetime(2022, 1, 1),
)
| OrganizationApplicationFactory |
python | langchain-ai__langchain | libs/langchain_v1/langchain/agents/middleware/types.py | {
"start": 10828,
"end": 10999
} | class ____(TypedDict): # noqa: PYI049
"""Input state schema for the agent."""
messages: Required[Annotated[list[AnyMessage | dict], add_messages]]
| _InputAgentState |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 18225,
"end": 18927
} | class ____(DiDegreeView):
"""A DegreeView class to report in_degree for a DiGraph; See DegreeView"""
def __getitem__(self, n):
weight = self._weight
nbrs = self._pred[n]
if weight is None:
return len(nbrs)
return sum(dd.get(weight, 1) for dd in nbrs.values())
def __iter__(self):
weight = self._weight
if weight is None:
for n in self._nodes:
preds = self._pred[n]
yield (n, len(preds))
else:
for n in self._nodes:
preds = self._pred[n]
deg = sum(dd.get(weight, 1) for dd in preds.values())
yield (n, deg)
| InDegreeView |
python | kamyu104__LeetCode-Solutions | Python/valid-palindrome.py | {
"start": 29,
"end": 452
} | class ____(object):
# @param s, a string
# @return a boolean
def isPalindrome(self, s):
i, j = 0, len(s) - 1
while i < j:
while i < j and not s[i].isalnum():
i += 1
while i < j and not s[j].isalnum():
j -= 1
if s[i].lower() != s[j].lower():
return False
i, j = i + 1, j - 1
return True
| Solution |
python | astropy__astropy | astropy/table/bst.py | {
"start": 206,
"end": 810
} | class ____:
"""
Represents an infinite value for purposes
of tuple comparison.
"""
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __repr__(self):
return "MAX"
def to_value(self, unit):
"""Convert to a value of the given unit."""
# This is needed to support Quantity comparisons, in particular
# Quantity.searchsorted(MAX).
return np.float64(np.inf)
__str__ = __repr__
| MaxValue |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 14776,
"end": 15805
} | class ____:
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by all classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
| ProxyAlreadyVisited |
python | google__pytype | pytype/blocks/block_serializer.py | {
"start": 877,
"end": 1933
} | class ____(json.JSONEncoder):
"""Implements the JSONEncoder behavior for ordered bytecode blocks."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _encode_code(self, code: SerializedCode) -> dict[str, Any]:
return {
"_type": "Code",
"blocks": [self._encode_block(b) for b in code.blocks],
}
def _encode_block(self, block: SerializedBlock) -> dict[str, Any]:
return {
"_type": "Block",
"id": block.id,
"code": block.code,
"incoming": block.incoming,
"outgoing": block.outgoing,
}
def default(self, o):
if isinstance(o, SerializedCode):
return self._encode_code(o)
elif isinstance(o, SerializedBlock):
return self._encode_block(o)
else:
return super().default(o)
def encode_merged_graph(block_graph):
out = []
for k, v in block_graph.graph.items():
for b in v.order:
out.append(SerializedBlock.make(k, b))
sc = SerializedCode(out)
return json.dumps(sc, cls=BlockGraphEncoder)
| BlockGraphEncoder |
python | mwaskom__seaborn | tests/test_statistics.py | {
"start": 18563,
"end": 19664
} | class ____:
def test_weighted_mean(self, long_df):
long_df["weight"] = long_df["x"]
est = WeightedAggregator("mean")
out = est(long_df, "y")
expected = np.average(long_df["y"], weights=long_df["weight"])
assert_array_equal(out["y"], expected)
assert_array_equal(out["ymin"], np.nan)
assert_array_equal(out["ymax"], np.nan)
def test_weighted_ci(self, long_df):
long_df["weight"] = long_df["x"]
est = WeightedAggregator("mean", "ci")
out = est(long_df, "y")
expected = np.average(long_df["y"], weights=long_df["weight"])
assert_array_equal(out["y"], expected)
assert (out["ymin"] <= out["y"]).all()
assert (out["ymax"] >= out["y"]).all()
def test_limited_estimator(self):
with pytest.raises(ValueError, match="Weighted estimator must be 'mean'"):
WeightedAggregator("median")
def test_limited_ci(self):
with pytest.raises(ValueError, match="Error bar method must be 'ci'"):
WeightedAggregator("mean", "sd")
| TestWeightedAggregator |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 469336,
"end": 491657
} | class ____(Request):
"""
Prepare request data for frames download. Intended for allowing the user to send the data using
a POST request (since a query string in a GET request cannot accomodate large generic data structures), and use
the resulting call's ID as a handle for calling the `download_for_dataview` endpoint using a GET method.
:param dataview: Dataview specification
:type dataview: Dataview
:param random_seed: Optional random seed used for frame selection. If not
provided, one will be generated.
:type random_seed: int
:param node: Node number. This provides support for multi-node experiments
running multiple workers executing the same experiment on multiple processes or
machines
:type node: int
:param projection: Used to select which parts of the frame will be returned.
Each string represents a field or sub-field (using dot-separated notation). In
order to specify a specific array element, use array index as a field name. To
specify all array elements, use '*'.
:type projection: Sequence[str]
:param download_type: Download type. Determines the downloaded file's
formatting and mime type.
:type download_type: str
:param remove_none_values: If set to Truethen none values are removed from
frames (except for metadata)
:type remove_none_values: bool
:param clean_subfields: If set to Truethen both frame toplevel fields and
subfields are cleaned according to the schema. Otherwise only top level fields
:type clean_subfields: bool
"""
_service = "frames"
_action = "prepare_download_for_dataview"
_version = "2.23"
_schema = {
"definitions": {
"dataview": {
"properties": {
"augmentation": {
"description": "Augmentation parameters. Only for training and testing tasks.",
"oneOf": [
{"$ref": "#/definitions/dv_augmentation"},
{"type": "null"},
],
},
"filters": {
"description": "List of FilterRule ('OR' relationship)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": ["array", "null"],
},
"iteration": {
"description": "Iteration parameters. Not applicable for register (import) tasks.",
"oneOf": [
{"$ref": "#/definitions/iteration"},
{"type": "null"},
],
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": ["object", "null"],
},
"mapping": {
"description": "Mapping parameters",
"oneOf": [{"$ref": "#/definitions/mapping"}, {"type": "null"}],
},
"output_rois": {
"description": (
"'all_in_frame' - all rois for a frame are returned\n\n'only_filtered' - only rois which"
" led this frame to be selected\n\n'frame_per_roi' - single roi per frame. Frame can be"
" returned multiple times with a different roi each time.\n\nNote: this should be used for"
" Training tasks only\n\nNote: frame_per_roi implies that only filtered rois will be"
" returned\n "
),
"oneOf": [
{"$ref": "#/definitions/output_rois_enum"},
{"type": "null"},
],
},
"versions": {
"description": "View dataset versions",
"items": {"$ref": "#/definitions/view_entry"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/dv_augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dv_augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
"view_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": ["string", "null"],
},
"merge_with": {
"description": "Version ID to merge with",
"type": ["string", "null"],
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"clean_subfields": {
"default": False,
"description": (
"If set to Truethen both frame toplevel fields and subfields are cleaned according to the schema."
" Otherwise only top level fields"
),
"type": "boolean",
},
"dataview": {
"$ref": "#/definitions/dataview",
"description": "Dataview specification",
},
"download_type": {
"default": "json",
"description": "Download type. Determines the downloaded file's formatting and mime type.",
"enum": ["json jsonlines"],
"type": "string",
},
"node": {
"description": (
"Node number. This provides support for multi-node experiments running multiple workers executing"
" the same experiment on multiple processes or machines"
),
"type": "integer",
},
"projection": {
"description": (
"Used to select which parts of the frame will be returned. Each string represents a\n "
" field or sub-field (using dot-separated notation). In order to specify a specific array"
" element,\n use array index as a field name. To specify all array elements, use"
" '*'."
),
"items": {"type": "string"},
"type": "array",
},
"random_seed": {
"description": "Optional random seed used for frame selection. If not provided, one will be generated.",
"type": "integer",
},
"remove_none_values": {
"default": False,
"description": "If set to Truethen none values are removed from frames (except for metadata)",
"type": "boolean",
},
},
"required": ["dataview"],
"type": "object",
}
def __init__(
self,
dataview,
random_seed=None,
node=None,
projection=None,
download_type="json",
remove_none_values=False,
clean_subfields=False,
**kwargs
):
super(PrepareDownloadForDataviewRequest, self).__init__(**kwargs)
self.dataview = dataview
self.random_seed = random_seed
self.node = node
self.projection = projection
self.download_type = download_type
self.remove_none_values = remove_none_values
self.clean_subfields = clean_subfields
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
if isinstance(value, dict):
value = Dataview.from_dict(value)
else:
self.assert_isinstance(value, "dataview", Dataview)
self._property_dataview = value
@schema_property("random_seed")
def random_seed(self):
return self._property_random_seed
@random_seed.setter
def random_seed(self, value):
if value is None:
self._property_random_seed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "random_seed", six.integer_types)
self._property_random_seed = value
@schema_property("node")
def node(self):
return self._property_node
@node.setter
def node(self, value):
if value is None:
self._property_node = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "node", six.integer_types)
self._property_node = value
@schema_property("projection")
def projection(self):
return self._property_projection
@projection.setter
def projection(self, value):
if value is None:
self._property_projection = None
return
self.assert_isinstance(value, "projection", (list, tuple))
self.assert_isinstance(value, "projection", six.string_types, is_array=True)
self._property_projection = value
@schema_property("download_type")
def download_type(self):
return self._property_download_type
@download_type.setter
def download_type(self, value):
if value is None:
self._property_download_type = None
return
self.assert_isinstance(value, "download_type", six.string_types)
self._property_download_type = value
@schema_property("remove_none_values")
def remove_none_values(self):
return self._property_remove_none_values
@remove_none_values.setter
def remove_none_values(self, value):
if value is None:
self._property_remove_none_values = None
return
self.assert_isinstance(value, "remove_none_values", (bool,))
self._property_remove_none_values = value
@schema_property("clean_subfields")
def clean_subfields(self):
return self._property_clean_subfields
@clean_subfields.setter
def clean_subfields(self, value):
if value is None:
self._property_clean_subfields = None
return
self.assert_isinstance(value, "clean_subfields", (bool,))
self._property_clean_subfields = value
| PrepareDownloadForDataviewRequest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_autofit05.py | {
"start": 342,
"end": 1025
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("autofit05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
date_format = workbook.add_format({"num_format": 14})
worksheet.write_datetime(0, 0, date(2023, 1, 1), date_format)
worksheet.write_datetime(0, 1, date(2023, 12, 12), date_format)
worksheet.autofit()
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/strobogrammatic-number-ii.py | {
"start": 491,
"end": 1153
} | class ____(object):
def findStrobogrammatic(self, n):
"""
:type n: int
:rtype: List[str]
"""
lookup = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
def findStrobogrammaticRecu(n, k):
if k == 0:
return ['']
elif k == 1:
return ['0', '1', '8']
result = []
for num in findStrobogrammaticRecu(n, k - 2):
for key, val in lookup.iteritems():
if n != k or key != '0':
result.append(key + num + val)
return result
return findStrobogrammaticRecu(n, n)
| Solution2 |
python | doocs__leetcode | solution/3700-3799/3727.Maximum Alternating Sum of Squares/Solution.py | {
"start": 0,
"end": 253
} | class ____:
def maxAlternatingSum(self, nums: List[int]) -> int:
nums.sort(key=lambda x: x * x)
n = len(nums)
s1 = sum(x * x for x in nums[: n // 2])
s2 = sum(x * x for x in nums[n // 2 :])
return s2 - s1
| Solution |
python | astropy__astropy | astropy/nddata/nduncertainty.py | {
"start": 20523,
"end": 33246
} | class ____:
"""
Propagation of uncertainties for variances, also used to perform error
propagation for variance-like uncertainties (standard deviation and inverse
variance).
"""
def _propagate_collapse(self, numpy_op, axis=None):
"""
Error propagation for collapse operations on variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
numpy_op : function
Numpy operation like `np.sum` or `np.max` to use in the collapse
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
axis : tuple, optional
Axis on which to compute collapsing operations.
"""
try:
result_unit_sq = self.parent_nddata.unit**2
except (AttributeError, TypeError):
result_unit_sq = None
if self.array is not None:
# Formula: sigma**2 = dA
if numpy_op in [np.min, np.max]:
# Find the indices of the min/max in parent data along each axis,
# return the uncertainty at the corresponding entry:
return self._get_err_at_extremum(numpy_op, axis=axis)
# np.sum and np.mean operations use similar pattern
# to `_propagate_add_sub`, for example:
else:
# lookup the mapping for to_variance and from_variance for this
# numpy operation:
to_variance = collapse_to_variance_mapping[numpy_op]
from_variance = collapse_from_variance_mapping[numpy_op]
masked_uncertainty = np.ma.masked_array(
self.array, self.parent_nddata.mask
)
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = (
to_variance(masked_uncertainty << self.unit)
.to(result_unit_sq)
.value
)
else:
this = to_variance(masked_uncertainty)
return from_variance(this, axis=axis)
def _get_err_at_extremum(self, extremum, axis):
"""
Return the value of the ``uncertainty`` array at the indices
which satisfy the ``extremum`` function applied to the ``measurement`` array,
where we expect ``extremum`` to be np.argmax or np.argmin, and
we expect a two-dimensional output.
Assumes the ``measurement`` and ``uncertainty`` array dimensions
are ordered such that the zeroth dimension is the one to preserve.
For example, if you start with array with shape (a, b, c), this
function applies the ``extremum`` function to the last two dimensions,
with shapes b and c.
This operation is difficult to cast in a vectorized way. Here
we implement it with a list comprehension, which is likely not the
most performant solution.
"""
if axis is not None and not hasattr(axis, "__len__"):
# this is a single axis:
axis = [axis]
if extremum is np.min:
arg_extremum = np.ma.argmin
elif extremum is np.max:
arg_extremum = np.ma.argmax
all_axes = np.arange(self.array.ndim)
if axis is None:
# collapse over all dimensions
ind = arg_extremum(np.asanyarray(self.parent_nddata).ravel())
return self.array.ravel()[ind]
# collapse an ND array over arbitrary dimensions:
preserve_axes = [ax for ax in all_axes if ax not in axis]
meas = np.ma.masked_array(
_move_preserved_axes_first(self.parent_nddata.data, preserve_axes),
_move_preserved_axes_first(self.parent_nddata.mask, preserve_axes),
)
err = _move_preserved_axes_first(self.array, preserve_axes)
result = np.array(
[e[np.unravel_index(arg_extremum(m), m.shape)] for m, e in zip(meas, err)]
)
return _unravel_preserved_axes(
self.parent_nddata.data,
result,
preserve_axes,
)
def _propagate_add_sub(
self,
other_uncert,
result_data,
correlation,
subtract=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for addition or subtraction of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
subtract : bool, optional
If ``True``, propagate for subtraction, otherwise propagate for
addition.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
if subtract:
correlation_sign = -1
else:
correlation_sign = 1
try:
result_unit_sq = result_data.unit**2
except AttributeError:
result_unit_sq = None
if other_uncert.array is not None:
# Formula: sigma**2 = dB
if other_uncert.unit is not None and result_unit_sq != to_variance(
other_uncert.unit
):
# If the other uncertainty has a unit and this unit differs
# from the unit of the result convert it to the results unit
other = (
to_variance(other_uncert.array << other_uncert.unit)
.to(result_unit_sq)
.value
)
else:
other = to_variance(other_uncert.array)
else:
other = 0
if self.array is not None:
# Formula: sigma**2 = dA
if (
self.unit is not None
and to_variance(self.unit) != self.parent_nddata.unit**2
):
# If the uncertainty has a different unit than the result we
# need to convert it to the results unit.
this = to_variance(self.array << self.unit).to(result_unit_sq).value
else:
this = to_variance(self.array)
else:
this = 0
# Formula: sigma**2 = dA + dB +/- 2*cor*sqrt(dA*dB)
# Formula: sigma**2 = sigma_other + sigma_self +/- 2*cor*sqrt(dA*dB)
# (sign depends on whether addition or subtraction)
# Determine the result depending on the correlation
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = 2 * correlation * np.sqrt(this * other)
result = this + other + correlation_sign * corr
else:
result = this + other
return from_variance(result)
def _propagate_multiply_divide(
self,
other_uncert,
result_data,
correlation,
divide=False,
to_variance=lambda x: x,
from_variance=lambda x: x,
):
"""
Error propagation for multiplication or division of variance or
variance-like uncertainties. Uncertainties are calculated using the
formulae for variance but can be used for uncertainty convertible to
a variance.
Parameters
----------
other_uncert : `~astropy.nddata.NDUncertainty` instance
The uncertainty, if any, of the other operand.
result_data : `~astropy.nddata.NDData` instance
The results of the operation on the data.
correlation : float or array-like
Correlation of the uncertainties.
divide : bool, optional
If ``True``, propagate for division, otherwise propagate for
multiplication.
to_variance : function, optional
Function that will transform the input uncertainties to variance.
The default assumes the uncertainty is the variance.
from_variance : function, optional
Function that will convert from variance to the input uncertainty.
The default assumes the uncertainty is the variance.
"""
# For multiplication we don't need the result as quantity
if isinstance(result_data, Quantity):
result_data = result_data.value
if divide:
correlation_sign = -1
else:
correlation_sign = 1
if other_uncert.array is not None:
# We want the result to have a unit consistent with the parent, so
# we only need to convert the unit of the other uncertainty if it
# is different from its data's unit.
if (
other_uncert.unit
and to_variance(1 * other_uncert.unit)
!= ((1 * other_uncert.parent_nddata.unit) ** 2).unit
):
d_b = (
to_variance(other_uncert.array << other_uncert.unit)
.to((1 * other_uncert.parent_nddata.unit) ** 2)
.value
)
else:
d_b = to_variance(other_uncert.array)
# Formula: sigma**2 = |A|**2 * d_b
right = np.abs(self.parent_nddata.data**2 * d_b)
else:
right = 0
if self.array is not None:
# Just the reversed case
if (
self.unit
and to_variance(1 * self.unit)
!= ((1 * self.parent_nddata.unit) ** 2).unit
):
d_a = (
to_variance(self.array << self.unit)
.to((1 * self.parent_nddata.unit) ** 2)
.value
)
else:
d_a = to_variance(self.array)
# Formula: sigma**2 = |B|**2 * d_a
left = np.abs(other_uncert.parent_nddata.data**2 * d_a)
else:
left = 0
# Multiplication
#
# The fundamental formula is:
# sigma**2 = |AB|**2*(d_a/A**2+d_b/B**2+2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# This formula is not very handy since it generates NaNs for every
# zero in A and B. So we rewrite it:
#
# Multiplication Formula:
# sigma**2 = (d_a*B**2 + d_b*A**2 + (2 * cor * ABsqrt(dAdB)))
# sigma**2 = (left + right + (2 * cor * ABsqrt(dAdB)))
#
# Division
#
# The fundamental formula for division is:
# sigma**2 = |A/B|**2*(d_a/A**2+d_b/B**2-2*sqrt(d_a)/A*sqrt(d_b)/B*cor)
#
# As with multiplication, it is convenient to rewrite this to avoid
# nans where A is zero.
#
# Division formula (rewritten):
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2 * cor * A *sqrt(dAdB) / B**3
# sigma**2 = d_a/B**2 + (A/B)**2 * d_b/B**2
# - 2*cor * sqrt(d_a)/B**2 * sqrt(d_b) * A / B
# sigma**2 = multiplication formula/B**4 (and sign change in
# the correlation)
if isinstance(correlation, np.ndarray) or correlation != 0:
corr = (
2
* correlation
* np.sqrt(d_a * d_b)
* self.parent_nddata.data
* other_uncert.parent_nddata.data
)
else:
corr = 0
if divide:
return from_variance(
(left + right + correlation_sign * corr)
/ other_uncert.parent_nddata.data**4
)
else:
return from_variance(left + right + correlation_sign * corr)
| _VariancePropagationMixin |
python | sqlalchemy__sqlalchemy | test/orm/test_froms.py | {
"start": 92983,
"end": 114010
} | class ____(QueryTest, AssertsCompiledSQL):
run_setup_mappers = None
__dialect__ = "default"
def test_replace_with_select(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sel = users.select().where(users.c.id.in_([7, 8])).alias()
sess = fixture_session()
ua = aliased(User, sel)
eq_(
sess.query(ua).all(),
[User(id=7), User(id=8)],
)
eq_(
sess.query(ua).filter(ua.id == 8).all(),
[User(id=8)],
)
eq_(
sess.query(ua).order_by(desc(ua.name)).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
eq_(
sess.query(ua).order_by(asc(ua.name)).all(),
[User(name="ed", id=8), User(name="jack", id=7)],
)
eq_(
sess.query(ua).options(joinedload(ua.addresses)).first(),
User(name="jack", addresses=[Address(id=1)]),
)
def test_select_from_aliased_one(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
not_users = table("users", column("id"), column("name"))
ua = aliased(User, select(not_users).alias(), adapt_on_names=True)
q = sess.query(ua.name).order_by(ua.name)
self.assert_compile(
q,
"SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1 ORDER BY anon_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_select_from_aliased_two(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
ua = aliased(User)
q = sess.query(ua.name).order_by(ua.name)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"ORDER BY users_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_differentiate_self_external(self):
"""test some different combinations of joining a table to a subquery of
itself."""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
sel = sess.query(User).filter(User.id.in_([7, 8])).subquery()
ualias = aliased(User)
ua = aliased(User, sel)
self.assert_compile(
sess.query(User).join(sel, User.id > sel.c.id),
"SELECT users.id AS users_id, users.name AS users_name FROM "
"users JOIN (SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN (__[POSTCOMPILE_id_1])) "
"AS anon_1 ON users.id > anon_1.id",
)
self.assert_compile(
sess.query(ualias).select_from(ua).filter(ualias.id > ua.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM ("
"SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1, "
"users AS users_1 "
"WHERE users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias).select_from(ua).join(ualias, ualias.id > ua.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias).select_from(ua).join(ualias, ualias.id > ua.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
salias = aliased(User, sel)
self.assert_compile(
sess.query(salias).join(ualias, ualias.id > salias.id),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
def test_aliased_class_vs_nonaliased(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
ua = aliased(User)
sess = fixture_session()
self.assert_compile(
sess.query(User).select_from(ua).join(User, ua.name > User.name),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users AS users_1 JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(User.name)
.select_from(ua)
.join(User, ua.name > User.name),
"SELECT users.name AS users_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua.name)
.select_from(ua)
.join(User, ua.name > User.name),
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, ua.name > User.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, User.name > ua.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users.name > users_1.name",
)
def test_join_relname_from_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
sess = fixture_session()
self.assert_compile(
sess.query(User).select_from(Address).join(Address.user),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_filter_by_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
sess = fixture_session()
self.assert_compile(
sess.query(User)
.select_from(Address)
.filter_by(email_address="ed")
.join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id "
"WHERE addresses.email_address = :email_address_1",
)
def test_join_ent_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
sess = fixture_session()
self.assert_compile(
sess.query(User).select_from(Address).join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_join(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sel = users.select().where(users.c.id.in_([7, 8]))
sess = fixture_session()
ua = aliased(User, sel.subquery())
eq_(
sess.query(ua)
.join(ua.addresses)
.add_entity(Address)
.order_by(ua.id)
.order_by(Address.id)
.all(),
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
adalias = aliased(Address)
ua = aliased(User, sel.subquery())
eq_(
sess.query(ua)
.join(ua.addresses.of_type(adalias))
.add_entity(adalias)
.order_by(ua.id)
.order_by(adalias.id)
.all(),
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="jack@bean.com", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@wood.com", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@bettyboop.com", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="ed@lala.com", id=4),
),
],
)
def test_more_joins(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order, backref="user")},
) # o2m, m2o
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(Keyword, keywords)
sess = fixture_session()
sel = users.select().where(users.c.id.in_([7, 8]))
ua = aliased(User, sel.subquery())
eq_(
sess.query(ua)
.join(ua.orders)
.join(Order.items)
.join(Item.keywords)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
def test_very_nested_joins_with_joinedload(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order, backref="user")},
) # o2m, m2o
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(Keyword, keywords)
sess = fixture_session()
sel = users.select().where(users.c.id.in_([7, 8]))
ua = aliased(User, sel.subquery())
def go():
eq_(
sess.query(ua)
.options(
joinedload(ua.orders)
.joinedload(Order.items)
.joinedload(Item.keywords)
)
.join(ua.orders)
.join(Order.items)
.join(Item.keywords)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[
User(
name="jack",
orders=[
Order(
description="order 1",
items=[
Item(
description="item 1",
keywords=[
Keyword(name="red"),
Keyword(name="big"),
Keyword(name="round"),
],
),
Item(
description="item 2",
keywords=[
Keyword(name="red", id=2),
Keyword(name="small", id=5),
Keyword(name="square"),
],
),
Item(
description="item 3",
keywords=[
Keyword(name="green", id=3),
Keyword(name="big", id=4),
Keyword(name="round", id=6),
],
),
],
),
Order(
description="order 3",
items=[
Item(
description="item 3",
keywords=[
Keyword(name="green", id=3),
Keyword(name="big", id=4),
Keyword(name="round", id=6),
],
),
Item(
description="item 4", keywords=[], id=4
),
Item(
description="item 5", keywords=[], id=5
),
],
),
Order(
description="order 5",
items=[
Item(description="item 5", keywords=[])
],
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
sel2 = orders.select().where(orders.c.id.in_([1, 2, 3]))
oa = aliased(Order, sel2.subquery())
eq_(
sess.query(oa)
.join(oa.items)
.join(Item.keywords)
.filter(Keyword.name == "red")
.order_by(oa.id)
.all(),
[
Order(description="order 1", id=1),
Order(description="order 2", id=2),
],
)
def test_replace_with_eager(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, order_by=addresses.c.id)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
sel = users.select().where(users.c.id.in_([7, 8]))
sess = fixture_session()
ua = aliased(User, sel.subquery())
def go():
eq_(
sess.query(ua)
.options(joinedload(ua.addresses))
.order_by(ua.id)
.all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(ua)
.options(joinedload(ua.addresses))
.filter(ua.id == 8)
.order_by(ua.id)
.all(),
[
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(ua)
.options(joinedload(ua.addresses))
.order_by(ua.id)
.offset(1)
.limit(1)
.scalar(),
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
),
)
self.assert_sql_count(testing.db, go, 1)
| SelectFromTest |
python | redis__redis-py | tests/test_scenario/test_maint_notifications.py | {
"start": 1012,
"end": 41149
} | class ____:
"""
Test Redis Enterprise maintenance push notifications with real cluster
operations.
"""
@pytest.fixture(autouse=True)
def setup_and_cleanup(
self,
client_maint_notifications: Redis,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
endpoint_name: str,
):
# Initialize cleanup flags first to ensure they exist even if setup fails
self._failover_executed = False
self._migration_executed = False
self._bind_executed = False
self.target_node = None
self.empty_node = None
self.endpoint_id = None
try:
self.target_node, self.empty_node = (
ClusterOperations.find_target_node_and_empty_node(
fault_injector_client, endpoints_config
)
)
logging.info(
f"Using target_node: {self.target_node}, empty_node: {self.empty_node}"
)
except Exception as e:
pytest.fail(f"Failed to find target and empty nodes: {e}")
try:
self.endpoint_id = ClusterOperations.find_endpoint_for_bind(
fault_injector_client, endpoints_config, endpoint_name
)
logging.info(f"Using endpoint: {self.endpoint_id}")
except Exception as e:
pytest.fail(f"Failed to find endpoint for bind operation: {e}")
# Ensure setup completed successfully
if not self.target_node or not self.empty_node:
pytest.fail("Setup failed: target_node or empty_node not available")
if not self.endpoint_id:
pytest.fail("Setup failed: endpoint_id not available")
# Yield control to the test
yield
# Cleanup code - this will run even if the test fails
logging.info("Starting cleanup...")
try:
client_maint_notifications.close()
except Exception as e:
logging.error(f"Failed to close client: {e}")
# Only attempt cleanup if we have the necessary attributes and they were executed
if self._failover_executed:
try:
self._execute_failover(fault_injector_client, endpoints_config)
logging.info("Failover cleanup completed")
except Exception as e:
logging.error(f"Failed to revert failover: {e}")
logging.info("Cleanup finished")
def _execute_failover(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
failover_result = ClusterOperations.execute_failover(
fault_injector_client, endpoints_config
)
self._failover_executed = True
logging.debug(f"Failover result: {failover_result}")
def _execute_migration(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
target_node: str,
empty_node: str,
):
migrate_action_id = ClusterOperations.execute_rladmin_migrate(
fault_injector=fault_injector_client,
endpoint_config=endpoints_config,
target_node=target_node,
empty_node=empty_node,
)
self._migration_executed = True
migrate_result = fault_injector_client.get_operation_result(
migrate_action_id, timeout=MIGRATE_TIMEOUT
)
logging.debug(f"Migration result: {migrate_result}")
def _execute_bind(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
endpoint_id: str,
):
bind_action_id = ClusterOperations.execute_rladmin_bind_endpoint(
fault_injector_client, endpoints_config, endpoint_id
)
self._bind_executed = True
bind_result = fault_injector_client.get_operation_result(
bind_action_id, timeout=BIND_TIMEOUT
)
logging.debug(f"Bind result: {bind_result}")
def _execute_migrate_bind_flow(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
target_node: str,
empty_node: str,
endpoint_id: str,
):
self._execute_migration(
fault_injector_client=fault_injector_client,
endpoints_config=endpoints_config,
target_node=target_node,
empty_node=empty_node,
)
self._execute_bind(
fault_injector_client=fault_injector_client,
endpoints_config=endpoints_config,
endpoint_id=endpoint_id,
)
def _get_all_connections_in_pool(self, client: Redis) -> List[ConnectionInterface]:
connections = []
if hasattr(client.connection_pool, "_available_connections"):
for conn in client.connection_pool._available_connections:
connections.append(conn)
if hasattr(client.connection_pool, "_in_use_connections"):
for conn in client.connection_pool._in_use_connections:
connections.append(conn)
if hasattr(client.connection_pool, "_connections"):
# This is the case for BlockingConnectionPool
for conn in client.connection_pool._connections:
connections.append(conn)
return connections
def _validate_maintenance_state(
self, client: Redis, expected_matching_conns_count: int
):
"""Validate the client connections are in the expected state after migration."""
matching_conns_count = 0
connections = self._get_all_connections_in_pool(client)
for conn in connections:
if (
conn._sock is not None
and conn._sock.gettimeout() == RELAXED_TIMEOUT
and conn.maintenance_state == MaintenanceState.MAINTENANCE
):
matching_conns_count += 1
assert matching_conns_count == expected_matching_conns_count
def _validate_moving_state(
self,
client: Redis,
configured_endpoint_type: EndpointType,
expected_matching_connected_conns_count: int,
expected_matching_disconnected_conns_count: int,
):
"""Validate the client connections are in the expected state after migration."""
matching_connected_conns_count = 0
matching_disconnected_conns_count = 0
connections = self._get_all_connections_in_pool(client)
for conn in connections:
endpoint_configured_correctly = bool(
(
configured_endpoint_type == EndpointType.NONE
and conn.host == conn.orig_host_address
)
or (
configured_endpoint_type != EndpointType.NONE
and conn.host != conn.orig_host_address
and (
configured_endpoint_type
== MaintNotificationsConfig().get_endpoint_type(conn.host, conn)
)
)
)
if (
conn._sock is not None
and conn._sock.gettimeout() == RELAXED_TIMEOUT
and conn.maintenance_state == MaintenanceState.MOVING
and endpoint_configured_correctly
):
matching_connected_conns_count += 1
elif (
conn._sock is None
and conn.maintenance_state == MaintenanceState.MOVING
and conn.socket_timeout == RELAXED_TIMEOUT
and endpoint_configured_correctly
):
matching_disconnected_conns_count += 1
else:
pass
assert matching_connected_conns_count == expected_matching_connected_conns_count
assert (
matching_disconnected_conns_count
== expected_matching_disconnected_conns_count
)
def _validate_default_state(
self, client: Redis, expected_matching_conns_count: int
):
"""Validate the client connections are in the expected state after migration."""
matching_conns_count = 0
connections = self._get_all_connections_in_pool(client)
for conn in connections:
if conn._sock is None:
if (
conn.maintenance_state == MaintenanceState.NONE
and conn.socket_timeout == CLIENT_TIMEOUT
and conn.host == conn.orig_host_address
):
matching_conns_count += 1
elif (
conn._sock.gettimeout() == CLIENT_TIMEOUT
and conn.maintenance_state == MaintenanceState.NONE
and conn.host == conn.orig_host_address
):
matching_conns_count += 1
assert matching_conns_count == expected_matching_conns_count
def _validate_default_notif_disabled_state(
self, client: Redis, expected_matching_conns_count: int
):
"""Validate the client connections are in the expected state after migration."""
matching_conns_count = 0
connections = self._get_all_connections_in_pool(client)
for conn in connections:
if conn._sock is None:
if (
conn.maintenance_state == MaintenanceState.NONE
and conn.socket_timeout == CLIENT_TIMEOUT
and not hasattr(conn, "orig_host_address")
):
matching_conns_count += 1
elif (
conn._sock.gettimeout() == CLIENT_TIMEOUT
and conn.maintenance_state == MaintenanceState.NONE
and not hasattr(conn, "orig_host_address")
):
matching_conns_count += 1
assert matching_conns_count == expected_matching_conns_count
@pytest.mark.timeout(300) # 5 minutes timeout for this test
def test_receive_failing_over_and_failed_over_push_notification(
self,
client_maint_notifications: Redis,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
"""
Test the push notifications are received when executing cluster operations.
"""
logging.info("Creating one connection in the pool.")
conn = client_maint_notifications.connection_pool.get_connection()
logging.info("Executing failover command...")
failover_thread = Thread(
target=self._execute_failover,
name="failover_thread",
args=(fault_injector_client, endpoints_config),
)
failover_thread.start()
logging.info("Waiting for FAILING_OVER push notifications...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=FAILOVER_TIMEOUT, connection=conn
)
logging.info("Validating connection maintenance state...")
assert conn.maintenance_state == MaintenanceState.MAINTENANCE
assert conn._sock.gettimeout() == RELAXED_TIMEOUT
logging.info("Waiting for FAILED_OVER push notifications...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=FAILOVER_TIMEOUT, connection=conn
)
logging.info("Validating connection default states is restored...")
assert conn.maintenance_state == MaintenanceState.NONE
assert conn._sock.gettimeout() == CLIENT_TIMEOUT
logging.info("Releasing connection back to the pool...")
client_maint_notifications.connection_pool.release(conn)
failover_thread.join()
@pytest.mark.timeout(300) # 5 minutes timeout for this test
def test_receive_migrating_and_moving_push_notification(
self,
client_maint_notifications: Redis,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
"""
Test the push notifications are received when executing cluster operations.
"""
logging.info("Executing rladmin migrate command...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=MIGRATE_TIMEOUT
)
logging.info("Validating connection migrating state...")
conn = client_maint_notifications.connection_pool.get_connection()
assert conn.maintenance_state == MaintenanceState.MAINTENANCE
assert conn._sock.gettimeout() == RELAXED_TIMEOUT
client_maint_notifications.connection_pool.release(conn)
logging.info("Waiting for MIGRATED push notifications...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=MIGRATE_TIMEOUT
)
logging.info("Validating connection states...")
conn = client_maint_notifications.connection_pool.get_connection()
assert conn.maintenance_state == MaintenanceState.NONE
assert conn._sock.gettimeout() == CLIENT_TIMEOUT
client_maint_notifications.connection_pool.release(conn)
migrate_thread.join()
logging.info("Executing rladmin bind endpoint command...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
logging.info("Waiting for MOVING push notifications...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=BIND_TIMEOUT
)
logging.info("Validating connection states...")
conn = client_maint_notifications.connection_pool.get_connection()
assert conn.maintenance_state == MaintenanceState.MOVING
assert conn._sock.gettimeout() == RELAXED_TIMEOUT
logging.info("Waiting for moving ttl to expire")
time.sleep(BIND_TIMEOUT)
logging.info("Validating connection states...")
assert conn.maintenance_state == MaintenanceState.NONE
assert conn.socket_timeout == CLIENT_TIMEOUT
assert conn._sock.gettimeout() == CLIENT_TIMEOUT
client_maint_notifications.connection_pool.release(conn)
bind_thread.join()
@pytest.mark.timeout(300) # 5 minutes timeout
@pytest.mark.parametrize(
"endpoint_type",
[
EndpointType.EXTERNAL_FQDN,
EndpointType.EXTERNAL_IP,
EndpointType.NONE,
],
)
def test_timeout_handling_during_migrating_and_moving(
self,
endpoint_type: EndpointType,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
"""
Test the push notifications are received when executing cluster operations.
"""
logging.info(f"Testing timeout handling for endpoint type: {endpoint_type}")
client = _get_client_maint_notifications(
endpoints_config=endpoints_config, endpoint_type=endpoint_type
)
# Create three connections in the pool
logging.info("Creating three connections in the pool.")
conns = []
for _ in range(3):
conns.append(client.connection_pool.get_connection())
# Release the connections
for conn in conns:
client.connection_pool.release(conn)
logging.info("Executing rladmin migrate command...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
# this will consume the notification in one of the connections
ClientValidations.wait_push_notification(client, timeout=MIGRATE_TIMEOUT)
self._validate_maintenance_state(client, expected_matching_conns_count=1)
self._validate_default_state(client, expected_matching_conns_count=2)
logging.info("Waiting for MIGRATED push notifications...")
ClientValidations.wait_push_notification(client, timeout=MIGRATE_TIMEOUT)
logging.info("Validating connection states after MIGRATED ...")
self._validate_default_state(client, expected_matching_conns_count=3)
migrate_thread.join()
logging.info("Executing rladmin bind endpoint command...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
logging.info("Waiting for MOVING push notifications...")
# this will consume the notification in one of the connections
# and will handle the states of the rest
# the consumed connection will be disconnected during
# releasing it back to the pool and as a result we will have
# 3 disconnected connections in the pool
ClientValidations.wait_push_notification(client, timeout=BIND_TIMEOUT)
if endpoint_type == EndpointType.NONE:
logging.info(
"Waiting for moving ttl/2 to expire to validate proactive reconnection"
)
time.sleep(8)
logging.info("Validating connections states...")
self._validate_moving_state(
client,
endpoint_type,
expected_matching_connected_conns_count=0,
expected_matching_disconnected_conns_count=3,
)
# during get_connection() the connection will be reconnected
# either to the address provided in the moving notification or to the original address
# depending on the configured endpoint type
# with this call we test if we are able to connect to the new address
conn = client.connection_pool.get_connection()
self._validate_moving_state(
client,
endpoint_type,
expected_matching_connected_conns_count=1,
expected_matching_disconnected_conns_count=2,
)
client.connection_pool.release(conn)
logging.info("Waiting for moving ttl to expire")
time.sleep(BIND_TIMEOUT)
logging.info("Validating connection states...")
self._validate_default_state(client, expected_matching_conns_count=3)
bind_thread.join()
@pytest.mark.timeout(300) # 5 minutes timeout
@pytest.mark.parametrize(
"endpoint_type",
[
EndpointType.EXTERNAL_FQDN,
EndpointType.EXTERNAL_IP,
EndpointType.NONE,
],
)
def test_connection_handling_during_moving(
self,
endpoint_type: EndpointType,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
logging.info(f"Testing timeout handling for endpoint type: {endpoint_type}")
client = _get_client_maint_notifications(
endpoints_config=endpoints_config, endpoint_type=endpoint_type
)
logging.info("Creating one connection in the pool.")
first_conn = client.connection_pool.get_connection()
logging.info("Executing rladmin migrate command...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
# this will consume the notification in the provided connection
ClientValidations.wait_push_notification(
client, timeout=MIGRATE_TIMEOUT, connection=first_conn
)
self._validate_maintenance_state(client, expected_matching_conns_count=1)
logging.info("Waiting for MIGRATED push notification ...")
ClientValidations.wait_push_notification(
client, timeout=MIGRATE_TIMEOUT, connection=first_conn
)
client.connection_pool.release(first_conn)
migrate_thread.join()
logging.info("Executing rladmin bind endpoint command...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
logging.info("Waiting for MOVING push notifications on random connection ...")
# this will consume the notification in one of the connections
# and will handle the states of the rest
# the consumed connection will be disconnected during
# releasing it back to the pool and as a result we will have
# 3 disconnected connections in the pool
ClientValidations.wait_push_notification(client, timeout=BIND_TIMEOUT)
if endpoint_type == EndpointType.NONE:
logging.info(
"Waiting for moving ttl/2 to expire to validate proactive reconnection"
)
time.sleep(8)
# validate that new connections will also receive the moving notification
connections = []
for _ in range(3):
connections.append(client.connection_pool.get_connection())
for conn in connections:
client.connection_pool.release(conn)
logging.info("Validating connections states during MOVING ...")
# during get_connection() the existing connection will be reconnected
# either to the address provided in the moving notification or to the original address
# depending on the configured endpoint type
# with this call we test if we are able to connect to the new address
# new connection should also be marked as moving
self._validate_moving_state(
client,
endpoint_type,
expected_matching_connected_conns_count=3,
expected_matching_disconnected_conns_count=0,
)
logging.info("Waiting for moving ttl to expire")
time.sleep(BIND_TIMEOUT)
logging.info("Validating connection states after MOVING has expired ...")
self._validate_default_state(client, expected_matching_conns_count=3)
bind_thread.join()
@pytest.mark.timeout(300) # 5 minutes timeout
def test_old_connection_shutdown_during_moving(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
# it is better to use ip for this test - enables validation that
# the connection is disconnected from the original address
# and connected to the new address
endpoint_type = EndpointType.EXTERNAL_IP
logging.info("Testing old connection shutdown during MOVING")
client = _get_client_maint_notifications(
endpoints_config=endpoints_config, endpoint_type=endpoint_type
)
logging.info("Starting migration ...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
ClientValidations.wait_push_notification(client, timeout=MIGRATE_TIMEOUT)
self._validate_maintenance_state(client, expected_matching_conns_count=1)
logging.info("Waiting for MIGRATED push notification ...")
ClientValidations.wait_push_notification(client, timeout=MIGRATE_TIMEOUT)
self._validate_default_state(client, expected_matching_conns_count=1)
migrate_thread.join()
moving_event = threading.Event()
def execute_commands(moving_event: threading.Event, errors: Queue):
while not moving_event.is_set():
try:
client.set("key", "value")
client.get("key")
except Exception as e:
errors.put(
f"Command failed in thread {threading.current_thread().name}: {e}"
)
logging.info("Starting rebind...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
errors = Queue()
threads_count = 10
futures = []
logging.info(f"Starting {threads_count} command execution threads...")
# Start the worker pool and submit N identical worker tasks
with ThreadPoolExecutor(
max_workers=threads_count, thread_name_prefix="command_execution_thread"
) as executor:
futures = [
executor.submit(execute_commands, moving_event, errors)
for _ in range(threads_count)
]
logging.info("Waiting for MOVING push notification ...")
# this will consume the notification in one of the connections
# and will handle the states of the rest
ClientValidations.wait_push_notification(client, timeout=BIND_TIMEOUT)
# set the event to stop the command execution threads
moving_event.set()
# Wait for all workers to finish and propagate any exceptions
for f in futures:
f.result()
# validate that all connections are either disconnected
# or connected to the new address
connections = self._get_all_connections_in_pool(client)
for conn in connections:
if conn._sock is not None:
assert conn.get_resolved_ip() == conn.host
assert conn.maintenance_state == MaintenanceState.MOVING
assert conn._sock.gettimeout() == RELAXED_TIMEOUT
assert conn.host != conn.orig_host_address
assert not conn.should_reconnect()
else:
assert conn.maintenance_state == MaintenanceState.MOVING
assert conn.socket_timeout == RELAXED_TIMEOUT
assert conn.host != conn.orig_host_address
assert not conn.should_reconnect()
# validate no errors were raised in the command execution threads
assert errors.empty(), f"Errors occurred in threads: {errors.queue}"
logging.info("Waiting for moving ttl to expire")
time.sleep(DEFAULT_BIND_TTL)
bind_thread.join()
@pytest.mark.timeout(300) # 5 minutes timeout
def test_new_connections_receive_moving(
self,
client_maint_notifications: Redis,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
logging.info("Creating one connection in the pool.")
first_conn = client_maint_notifications.connection_pool.get_connection()
logging.info("Executing rladmin migrate command...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
# this will consume the notification in the provided connection
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=MIGRATE_TIMEOUT, connection=first_conn
)
self._validate_maintenance_state(
client_maint_notifications, expected_matching_conns_count=1
)
logging.info("Waiting for MIGRATED push notifications on both connections ...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=MIGRATE_TIMEOUT, connection=first_conn
)
migrate_thread.join()
logging.info("Executing rladmin bind endpoint command...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
logging.info("Waiting for MOVING push notifications on random connection ...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=BIND_TIMEOUT, connection=first_conn
)
old_address = first_conn._sock.getpeername()[0]
logging.info(f"The node address before bind: {old_address}")
logging.info(
"Creating new client to connect to the same node - new connections to this node should receive the moving notification..."
)
endpoint_type = EndpointType.EXTERNAL_IP
# create new client with new pool that should also receive the moving notification
new_client = _get_client_maint_notifications(
endpoints_config=endpoints_config,
endpoint_type=endpoint_type,
host_config=old_address,
)
# the moving notification will be consumed as
# part of the client connection setup, so we don't need
# to wait for it explicitly with wait_push_notification
logging.info(
"Creating one connection in the new pool that should receive the moving notification."
)
new_client_conn = new_client.connection_pool.get_connection()
logging.info("Validating connections states during MOVING ...")
self._validate_moving_state(
new_client,
endpoint_type,
expected_matching_connected_conns_count=1,
expected_matching_disconnected_conns_count=0,
)
logging.info("Waiting for moving thread to be completed ...")
bind_thread.join()
new_client.connection_pool.release(new_client_conn)
new_client.close()
client_maint_notifications.connection_pool.release(first_conn)
@pytest.mark.timeout(300) # 5 minutes timeout
def test_new_connections_receive_migrating(
self,
client_maint_notifications: Redis,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
logging.info("Creating one connection in the pool.")
first_conn = client_maint_notifications.connection_pool.get_connection()
logging.info("Executing rladmin migrate command...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
# this will consume the notification in the provided connection
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=MIGRATE_TIMEOUT, connection=first_conn
)
self._validate_maintenance_state(
client_maint_notifications, expected_matching_conns_count=1
)
# validate that new connections will also receive the migrating notification
# it should be received as part of the client connection setup flow
logging.info(
"Creating second connection that should receive the migrating notification as well."
)
second_connection = client_maint_notifications.connection_pool.get_connection()
self._validate_maintenance_state(
client_maint_notifications, expected_matching_conns_count=2
)
logging.info("Waiting for MIGRATED push notifications on both connections ...")
ClientValidations.wait_push_notification(
client_maint_notifications, timeout=MIGRATE_TIMEOUT, connection=first_conn
)
ClientValidations.wait_push_notification(
client_maint_notifications,
timeout=MIGRATE_TIMEOUT,
connection=second_connection,
)
migrate_thread.join()
logging.info("Executing rladmin bind endpoint command for cleanup...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
bind_thread.join()
client_maint_notifications.connection_pool.release(first_conn)
client_maint_notifications.connection_pool.release(second_connection)
@pytest.mark.timeout(300)
def test_disabled_handling_during_migrating_and_moving(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
):
logging.info("Creating client with disabled notifications.")
client = _get_client_maint_notifications(
endpoints_config=endpoints_config,
enable_maintenance_notifications=False,
)
logging.info("Creating one connection in the pool.")
first_conn = client.connection_pool.get_connection()
logging.info("Executing rladmin migrate command...")
migrate_thread = Thread(
target=self._execute_migration,
name="migrate_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
),
)
migrate_thread.start()
logging.info("Waiting for MIGRATING push notifications...")
# this will consume the notification in the provided connection if it arrives
ClientValidations.wait_push_notification(
client, timeout=5, fail_on_timeout=False, connection=first_conn
)
self._validate_default_notif_disabled_state(
client, expected_matching_conns_count=1
)
# validate that new connections will also receive the moving notification
logging.info(
"Creating second connection in the pool"
" and expect it not to receive the migrating as well."
)
second_connection = client.connection_pool.get_connection()
ClientValidations.wait_push_notification(
client, timeout=5, fail_on_timeout=False, connection=second_connection
)
logging.info(
"Validating connection states after MIGRATING for both connections ..."
)
self._validate_default_notif_disabled_state(
client, expected_matching_conns_count=2
)
logging.info("Waiting for MIGRATED push notifications on both connections ...")
ClientValidations.wait_push_notification(
client, timeout=5, fail_on_timeout=False, connection=first_conn
)
ClientValidations.wait_push_notification(
client, timeout=5, fail_on_timeout=False, connection=second_connection
)
client.connection_pool.release(first_conn)
client.connection_pool.release(second_connection)
migrate_thread.join()
logging.info("Executing rladmin bind endpoint command...")
bind_thread = Thread(
target=self._execute_bind,
name="bind_thread",
args=(fault_injector_client, endpoints_config, self.endpoint_id),
)
bind_thread.start()
logging.info("Waiting for MOVING push notifications on random connection ...")
# this will consume the notification if it arrives in one of the connections
# and will handle the states of the rest
# the consumed connection will be disconnected during
# releasing it back to the pool and as a result we will have
# 3 disconnected connections in the pool
ClientValidations.wait_push_notification(
client,
timeout=10,
fail_on_timeout=False,
)
# validate that new connections will also receive the moving notification
connections = []
for _ in range(3):
connections.append(client.connection_pool.get_connection())
for conn in connections:
client.connection_pool.release(conn)
logging.info("Validating connections states during MOVING ...")
self._validate_default_notif_disabled_state(
client, expected_matching_conns_count=3
)
logging.info("Waiting for moving ttl to expire")
time.sleep(DEFAULT_BIND_TTL)
logging.info("Validating connection states after MOVING has expired ...")
self._validate_default_notif_disabled_state(
client, expected_matching_conns_count=3
)
bind_thread.join()
@pytest.mark.timeout(300)
@pytest.mark.parametrize(
"endpoint_type",
[
EndpointType.EXTERNAL_FQDN,
EndpointType.EXTERNAL_IP,
EndpointType.NONE,
],
)
def test_command_execution_during_migrating_and_moving(
self,
fault_injector_client: FaultInjectorClient,
endpoints_config: Dict[str, Any],
endpoint_type: EndpointType,
):
"""
Test command execution during migrating and moving notifications.
This test validates that:
1. Commands can be executed during MIGRATING and MOVING notifications
2. Commands are not blocked by the notifications
3. Commands are executed successfully
"""
errors = Queue()
execution_duration = 180
socket_timeout = 0.5
client = _get_client_maint_notifications(
endpoints_config=endpoints_config,
endpoint_type=endpoint_type,
disable_retries=True,
socket_timeout=socket_timeout,
enable_maintenance_notifications=True,
)
migrate_and_bind_thread = Thread(
target=self._execute_migrate_bind_flow,
name="migrate_and_bind_thread",
args=(
fault_injector_client,
endpoints_config,
self.target_node,
self.empty_node,
self.endpoint_id,
),
)
migrate_and_bind_thread.start()
def execute_commands(duration: int, errors: Queue):
start = time.time()
while time.time() - start < duration:
try:
client.set("key", "value")
client.get("key")
except Exception as e:
errors.put(
f"Command failed in thread {threading.current_thread().name}: {e}"
)
threads = []
for _ in range(10):
thread = Thread(
target=execute_commands,
name="command_execution_thread",
args=(
execution_duration,
errors,
),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
migrate_and_bind_thread.join()
assert errors.empty(), f"Errors occurred in threads: {errors.queue}"
| TestPushNotifications |
python | aio-libs__aiohttp | tests/test_http_exceptions.py | {
"start": 2691,
"end": 3979
} | class ____:
def test_ctor(self) -> None:
err = http_exceptions.LineTooLong("spam", "10", "12")
assert err.code == 400
assert err.message == "Got more than 10 bytes (12) when reading spam."
assert err.headers is None
def test_pickle(self) -> None:
err = http_exceptions.LineTooLong(line="spam", limit="10", actual_size="12")
err.foo = "bar" # type: ignore[attr-defined]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(err, proto)
err2 = pickle.loads(pickled)
assert err2.code == 400
assert err2.message == ("Got more than 10 bytes (12) when reading spam.")
assert err2.headers is None
assert err2.foo == "bar"
def test_str(self) -> None:
err = http_exceptions.LineTooLong(line="spam", limit="10", actual_size="12")
expected = "400, message:\n Got more than 10 bytes (12) when reading spam."
assert str(err) == expected
def test_repr(self) -> None:
err = http_exceptions.LineTooLong(line="spam", limit="10", actual_size="12")
assert repr(err) == (
"<LineTooLong: 400, message='Got more than "
"10 bytes (12) when reading spam.'>"
)
| TestLineTooLong |
python | google__pytype | pytype/ast/visitor.py | {
"start": 22,
"end": 2310
} | class ____:
"""A base class for writing AST visitors.
Subclasses should define {visit,enter,leave}_X to process nodes of type X.
If a visit method returns a non-None value, the visited node is replaced
with that value.
Attributes:
_ast: Any module whose interface matches the standard ast library, such as
typed_ast. The same module must be used to generate the AST to visit.
"""
def __init__(self, ast, visit_decorators=True):
self._ast = ast
maybe_decorators = ["decorator_list"] if visit_decorators else []
self._node_children = {
self._ast.Module: ["body"],
self._ast.ClassDef: maybe_decorators + ["keywords", "bases", "body"],
self._ast.FunctionDef: maybe_decorators + ["body", "args", "returns"],
self._ast.Assign: ["targets", "value"],
}
def visit(self, node):
"""Does a post-order traversal of the AST."""
if isinstance(node, self._ast.AST):
self.enter(node)
for k, v in self._children(node):
ret = self.visit(v)
if ret is not None:
setattr(node, k, ret)
out = self._call_visitor(node)
self.leave(node)
if out is not None:
return out
elif isinstance(node, list):
for i, v in enumerate(node):
ret = self.visit(v)
if ret is not None:
node[i] = ret
def _children(self, node):
"""Children to recurse over."""
ks = self._node_children.get(node.__class__)
if ks:
return [(k, getattr(node, k)) for k in ks]
else:
return self._ast.iter_fields(node)
def _call_visitor(self, node):
method = "visit_" + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called when no visit function is found for a node type."""
del node # unused
def enter(self, node):
"""Does a pre-order traversal of the AST."""
method = "enter_" + node.__class__.__name__
visitor = getattr(self, method, None)
if visitor:
return visitor(node)
def leave(self, node):
"""Called after visit() to do any cleanup that enter() needs."""
method = "leave_" + node.__class__.__name__
visitor = getattr(self, method, None)
if visitor:
visitor(node)
| BaseVisitor |
python | getsentry__sentry | src/sentry/auth/access.py | {
"start": 23822,
"end": 25484
} | class ____(RpcBackedAccess):
"""Access to all an organization's teams and projects."""
def __init__(
self,
*,
rpc_user_organization_context: RpcUserOrganizationContext,
auth_state: RpcAuthState,
scopes: Iterable[str] | None,
):
super().__init__(
rpc_user_organization_context=rpc_user_organization_context,
auth_state=auth_state,
scopes_upper_bound=_wrap_scopes(scopes),
)
@cached_property
def scopes(self) -> frozenset[str]:
return frozenset(self.scopes_upper_bound or [])
@property
def has_global_access(self) -> bool:
return True
def has_team_access(self, team: Team) -> bool:
return bool(
team.organization_id == self.rpc_user_organization_context.organization.id
and team.status == TeamStatus.ACTIVE
)
def has_project_access(self, project: Project) -> bool:
return bool(
project.organization_id == self.rpc_user_organization_context.organization.id
and project.status == ObjectStatus.ACTIVE
)
@cached_property
def accessible_team_ids(self) -> frozenset[int]:
return frozenset(
t.id
for t in self.rpc_user_organization_context.organization.teams
if t.status == TeamStatus.ACTIVE
)
@cached_property
def accessible_project_ids(self) -> frozenset[int]:
return frozenset(
p.id
for p in self.rpc_user_organization_context.organization.projects
if p.status == ObjectStatus.ACTIVE
)
| ApiBackedOrganizationGlobalAccess |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_data_test.py | {
"start": 5649,
"end": 13148
} | class ____(test_util.TensorFlowTestCase):
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
# Tear down temporary dump directory.
file_io.delete_recursively(self._dump_root)
def _makeDataDirWithMultipleDevicesAndDuplicateNodeNames(self):
cpu_0_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX
+ debug_data.DEVICE_TAG
+ ",job_localhost,replica_0,task_0,cpu_0",
)
gpu_0_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX
+ debug_data.DEVICE_TAG
+ ",job_localhost,replica_0,task_0,device_GPU_0",
)
gpu_1_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX
+ debug_data.DEVICE_TAG
+ ",job_localhost,replica_0,task_0,device_GPU_1",
)
os.makedirs(cpu_0_dir)
os.makedirs(gpu_0_dir)
os.makedirs(gpu_1_dir)
open(
os.path.join(cpu_0_dir, "node_foo_1_2_DebugIdentity_1472563253536386"),
"wb",
)
open(
os.path.join(gpu_0_dir, "node_foo_1_2_DebugIdentity_1472563253536385"),
"wb",
)
open(
os.path.join(gpu_1_dir, "node_foo_1_2_DebugIdentity_1472563253536387"),
"wb",
)
def testDebugDumpDir_nonexistentDumpRoot(self):
with self.assertRaisesRegex(IOError, "does not exist"):
debug_data.DebugDumpDir(tempfile.mkdtemp() + "_foo")
def testDebugDumpDir_invalidFileNamingPattern(self):
# File name with too few underscores should lead to an exception.
device_dir = os.path.join(
self._dump_root,
debug_data.METADATA_FILE_PREFIX
+ debug_data.DEVICE_TAG
+ ",job_localhost,replica_0,task_0,cpu_0",
)
os.makedirs(device_dir)
open(os.path.join(device_dir, "node1_DebugIdentity_1234"), "wb")
with self.assertRaisesRegex(
ValueError, "does not conform to the naming pattern"
):
debug_data.DebugDumpDir(self._dump_root)
def testDebugDumpDir_validDuplicateNodeNamesWithMultipleDevices(self):
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
graph_cpu_0 = graph_pb2.GraphDef()
node = graph_cpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
graph_gpu_0 = graph_pb2.GraphDef()
node = graph_gpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:0"
graph_gpu_1 = graph_pb2.GraphDef()
node = graph_gpu_1.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
dump_dir = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1],
)
self.assertCountEqual(
[
"/job:localhost/replica:0/task:0/cpu:0",
"/job:localhost/replica:0/task:0/device:GPU:0",
"/job:localhost/replica:0/task:0/device:GPU:1",
],
dump_dir.devices(),
)
self.assertEqual(1472563253536385, dump_dir.t0)
self.assertEqual(3, dump_dir.size)
with self.assertRaisesRegex(ValueError, r"Invalid device name: "):
dump_dir.nodes("/job:localhost/replica:0/task:0/device:GPU:2")
self.assertCountEqual(
["node_foo_1", "node_foo_1", "node_foo_1"], dump_dir.nodes()
)
self.assertCountEqual(
["node_foo_1"],
dump_dir.nodes(device_name="/job:localhost/replica:0/task:0/cpu:0"),
)
def testDuplicateNodeNamesInGraphDefOfSingleDeviceRaisesException(self):
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
graph_cpu_0 = graph_pb2.GraphDef()
node = graph_cpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
graph_gpu_0 = graph_pb2.GraphDef()
node = graph_gpu_0.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:0"
graph_gpu_1 = graph_pb2.GraphDef()
node = graph_gpu_1.node.add()
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
node = graph_gpu_1.node.add() # Here is the duplicate.
node.name = "node_foo_1"
node.op = "FooOp"
node.device = "/job:localhost/replica:0/task:0/device:GPU:1"
with self.assertRaisesRegex(ValueError, r"Duplicate node name on device "):
debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=[graph_cpu_0, graph_gpu_0, graph_gpu_1],
)
def testDebugDumpDir_emptyDumpDir(self):
dump_dir = debug_data.DebugDumpDir(self._dump_root)
self.assertIsNone(dump_dir.t0)
self.assertEqual([], dump_dir.dumped_tensor_data)
def testDebugDumpDir_usesGfileGlob(self):
if platform.system() == "Windows":
self.skipTest("gfile.Glob is not used on Windows.")
self._makeDataDirWithMultipleDevicesAndDuplicateNodeNames()
def fake_gfile_glob(glob_pattern):
del glob_pattern
return []
with test.mock.patch.object(
gfile, "Glob", side_effect=fake_gfile_glob, autospec=True
) as fake:
debug_data.DebugDumpDir(self._dump_root)
expected_calls = [
test.mock.call(
os.path.join(
self._dump_root,
(
debug_data.METADATA_FILE_PREFIX
+ debug_data.CORE_METADATA_TAG
+ "*"
),
)
),
test.mock.call(
os.path.join(
self._dump_root,
(
debug_data.METADATA_FILE_PREFIX
+ debug_data.FETCHES_INFO_FILE_TAG
+ "*"
),
)
),
test.mock.call(
os.path.join(
self._dump_root,
(
debug_data.METADATA_FILE_PREFIX
+ debug_data.FEED_KEYS_INFO_FILE_TAG
+ "*"
),
)
),
test.mock.call(
os.path.join(
self._dump_root,
(
debug_data.METADATA_FILE_PREFIX
+ debug_data.DEVICE_TAG
+ "*"
),
)
),
]
fake.assert_has_calls(expected_calls, any_order=True)
def testValidationSucceedsOnDoubleSlashNodeName(self):
device_dir = os.path.join(
self._dump_root,
debug_data.device_name_to_device_path(
"/job:localhost/replica:0/task:0/cpu:0"
),
)
node_scope_dir = os.path.join(device_dir, "scope_A")
os.makedirs(node_scope_dir)
file_io.write_string_to_file(
os.path.join(node_scope_dir, "op_B_0_DebugIdentity_12345"), "dummy"
)
graph_def = graph_pb2.GraphDef()
node = graph_def.node.add()
# Previously double slash would have caused validation to fail. b/429335661
node.name = "scope_A//op_B"
node.op = "NoOp"
node.device = "/job:localhost/replica:0/task:0/cpu:0"
dump_dir = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=[graph_def]
)
self.assertEqual(1, dump_dir.size)
self.assertIn("scope_A/op_B", dump_dir.nodes()[0])
if __name__ == "__main__":
googletest.main()
| DebugDumpDirTest |
python | getsentry__sentry | src/sentry/analytics/events/search_saved.py | {
"start": 69,
"end": 270
} | class ____(analytics.Event):
user_id: int | None = None
default_user_id: int | str | None = None
project_id: int
organization_id: int
analytics.register(SearchSavedEvent)
| SearchSavedEvent |
python | jina-ai__jina | tests/integration/reduce/test_reduce.py | {
"start": 3834,
"end": 7651
} | class ____(Executor):
@requests
def endpoint(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'exec-status'
status = {
'shard_id': self.runtime_args.shard_id,
'happy_status': 'Hey there! Have a nice day :)',
}
return status
def test_reduce_needs(port_generator):
exposed_port = port_generator()
flow = (
Flow(port=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3')
)
with flow:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_responses=True)
assert len(resp[0].docs) == 5
for doc in resp[0].docs:
assert doc.text == 'exec1'
assert doc.tags == {'a': 'b'}
assert doc.modality == 'image'
assert (doc.embedding == np.zeros(3)).all()
def test_uses_before_reduce(port_generator):
exposed_port = port_generator()
flow = (
Flow(port=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses_before='BaseExecutor')
)
with flow:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_responses=True)
# assert reduce happened because there is only BaseExecutor as uses_before
assert len(resp[0].docs) == 5
def test_uses_before_no_reduce_real_executor(port_generator):
exposed_port = port_generator()
flow = (
Flow(port=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)
)
with flow:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_responses=True)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
def test_uses_before_no_reduce_real_executor_uses(port_generator):
exposed_port = port_generator()
flow = (
Flow(port=exposed_port)
.add(uses=Executor1, name='pod0')
.add(uses=Executor2, needs='gateway', name='pod1')
.add(uses=Executor3, needs='gateway', name='pod2')
.add(needs=['pod0', 'pod1', 'pod2'], name='pod3', uses=DummyExecutor)
)
with flow:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post('/', inputs=da, return_responses=True)
# assert no reduce happened
assert len(resp[0].docs) == 1
assert resp[0].docs[0].id == 'fake_document'
def test_reduce_status(port_generator):
exposed_port = port_generator()
n_shards = 2
flow = Flow(port=exposed_port).add(
uses=ExecutorStatus, name='pod0', shards=n_shards, polling='all'
)
with flow as f:
da = DocumentArray([Document() for _ in range(5)])
resp = Client(port=exposed_port).post(
'/status', parameters={'foo': 'bar'}, inputs=da, return_responses=True
)
assert resp[0].parameters['foo'] == 'bar'
assert len(resp[0].parameters['__results__']) == n_shards
for _, param in resp[0].parameters['__results__'].items():
assert 'shard_id' in param.keys()
assert 'happy_status' in param.keys()
for doc in resp[0].docs:
assert doc.text == 'exec-status'
| ExecutorStatus |
python | ansible__ansible | test/units/_internal/_errors/test_error_utils.py | {
"start": 258,
"end": 449
} | class ____(Exception, _error_utils.ContributesToTaskResult):
@property
def result_contribution(self) -> c.Mapping[str, object]:
return dict(some_flag=True)
| _TestContributesError |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 8013,
"end": 9151
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(TASK_STR)
def test_execute(self, task_mock, hook_mock):
op = DataplexGetTaskOperator(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
dataplex_task_id=DATAPLEX_TASK_ID,
task_id="get_dataplex_task",
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.wait_for_operation.return_value = None
task_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.get_task.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
dataplex_task_id=DATAPLEX_TASK_ID,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexGetTaskOperator |
python | pydantic__pydantic | tests/mypy/modules/plugin_fail.py | {
"start": 2942,
"end": 3133
} | class ____(BaseModel, validate_by_name=True):
x: str = Field(..., alias=x_alias)
z: int
KwargsDynamicAliasModel(y='y', z=1)
KwargsDynamicAliasModel(x='y', z=1)
| KwargsDynamicAliasModel |
python | cherrypy__cherrypy | cherrypy/_cptools.py | {
"start": 14259,
"end": 15104
} | class ____(Tool):
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
request = cherrypy.serving.request
if _caching.get(**kwargs):
request.handler = None
else:
if request.cacheable:
# Note the devious technique here of adding hooks on the fly
request.hooks.attach(
'before_finalize',
_caching.tee_output,
priority=100,
)
_wrapper.priority = 90
def _setup(self):
"""Wire caching into ``cherrypy.request``."""
conf = self._merged_args()
p = conf.pop('priority', None)
cherrypy.serving.request.hooks.attach(
'before_handler',
self._wrapper,
priority=p,
**conf,
)
| CachingTool |
python | numba__numba | numba/tests/test_operators.py | {
"start": 45637,
"end": 46953
} | class ____(TestCase):
"""
Test the ** operator with a static exponent, to exercise a
dedicated optimization.
"""
def _check_pow(self, exponents, values):
for exp in exponents:
# test against non-static version of the @jit-ed function
regular_func = LiteralOperatorImpl.pow_usecase
static_func = make_static_power(exp)
static_cfunc = jit(nopython=True)(static_func)
regular_cfunc = jit(nopython=True)(regular_func)
for v in values:
try:
expected = regular_cfunc(v, exp)
except ZeroDivisionError:
with self.assertRaises(ZeroDivisionError):
static_cfunc(v)
else:
got = static_cfunc(v)
self.assertPreciseEqual(expected, got, prec='double')
def test_int_values(self):
exponents = [1, 2, 3, 5, 17, 0, -1, -2, -3]
vals = [0, 1, 3, -1, -4, np.int8(-3), np.uint16(4)]
self._check_pow(exponents, vals)
def test_real_values(self):
exponents = [1, 2, 3, 5, 17, 0, -1, -2, -3, 0x111111, -0x111112]
vals = [1.5, 3.25, -1.25, np.float32(-2.0), float('inf'), float('nan')]
self._check_pow(exponents, vals)
| TestStaticPower |
python | PyCQA__pylint | tests/functional/a/arguments_differ_issue5371.py | {
"start": 79,
"end": 331
} | class ____(Enum):
"""
Enum._generate_next_value_() in the stdlib currently lacks a
@staticmethod decorator.
"""
@staticmethod
def _generate_next_value_(name: str, start: int, count: int, last_values: list):
return 42
| MyEnum |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_optimize07.py | {
"start": 315,
"end": 1220
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
worksheet = workbook.add_worksheet()
strings = [
"_",
"_x",
"_x0",
"_x00",
"_x000",
"_x0000",
"_x0000_",
"_x005F_",
"_x000G_",
"_X0000_",
"_x000a_",
"_x000A_",
"_x0000__x0000_",
"__x0000__",
]
worksheet.write_column(0, 0, strings)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/asset.py | {
"start": 935,
"end": 1136
} | class ____(BaseModel):
"""Asset schema for responses with fields that are needed for Runtime."""
name: str
uri: str
group: str
extra: dict[str, JsonValue] | None = None
| AssetResponse |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_data_bar12.py | {
"start": 345,
"end": 5242
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.name = "Sheet1"
worksheet.conditional_format(
"A1",
{
"type": "data_bar",
"data_bar_2010": True,
},
)
worksheet.add_sparkline("A2", {"range": "Sheet1!B2"})
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="min"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="autoMin"/>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!B2</xm:f>
<xm:sqref>A2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.