language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 30041,
"end": 33222
} | class ____(state_types.Transform):
collective_axes: tuple[Hashable, ...]
def transform_shape(self, shape):
return shape
def transform_dtype(self, dtype):
return dtype
def untransform_index(
self, idxs: tuple[Index, ...]
) -> tuple[tuple[Index, ...], state_types.Transform]:
return idxs, self
def tree_flatten(self):
return (), self.collective_axes
@classmethod
def tree_unflatten(cls, metadata, arrays):
return cls(metadata[0])
def remote_ref(
ref: _Ref,
device_id: jax.typing.ArrayLike,
device_id_type: pallas_primitives.DeviceIdType = pallas_primitives.DeviceIdType.MESH,
) -> pallas_core.TransformedRef:
"""Translate memref to a symmetric memref on a peer device."""
if not isinstance(ref, pallas_core.TransformedRef):
if not isinstance(jax_core.get_aval(ref), state_types.AbstractRef):
raise TypeError("ref must be a reference")
ref = pallas_core.TransformedRef(ref, transforms=())
if any(isinstance(t, MulticastRef) for t in ref.transforms):
raise ValueError("Can't make a multicast reference into a peer reference.")
return pallas_core.TransformedRef(
ref.ref, (*ref.transforms, PeerMemRef(device_id, device_id_type)),
)
def multicast_ref(
ref: _Ref,
collective_axes: Hashable | tuple[Hashable, ...],
) -> pallas_core.TransformedRef:
"""Return a multicast reference for cross-device operations.
Args:
ref: The reference to transform.
collective_axes: The JAX mesh axes indicating the devices to operate on.
"""
if not isinstance(collective_axes, tuple):
collective_axes = (collective_axes,)
if not isinstance(ref, pallas_core.TransformedRef):
if not isinstance(jax_core.get_aval(ref), state_types.AbstractRef):
raise TypeError("ref must be a reference")
ref = pallas_core.TransformedRef(ref, transforms=())
if any(isinstance(t, PeerMemRef) for t in ref.transforms):
raise ValueError("Can't make a peer reference into a multicast reference.")
return pallas_core.TransformedRef(
ref.ref, (*ref.transforms, MulticastRef(collective_axes)),
)
def transform_ref(
ref: pallas_core.TransformedRef,
transform: state_types.Transform
) -> pallas_core.TransformedRef:
if not isinstance(ref, pallas_core.TransformedRef):
if not isinstance(jax_core.get_aval(ref), state_types.AbstractRef):
raise TypeError("ref must be a reference")
ref = pallas_core.TransformedRef(ref, transforms=())
return pallas_core.TransformedRef(
ref.ref, (*ref.transforms, transform),
)
def transpose_ref(
ref: pallas_core.TransformedRef | Any,
permutation: tuple[int, ...],
) -> pallas_core.TransformedRef:
assert hasattr(ref, "memory_space")
if ref.memory_space == MemorySpace.TMEM:
raise ValueError("Can't transpose a TMEM reference.")
return ref.transpose(permutation)
def untile_ref(ref, tiling: tuple[int, ...]) -> pallas_core.TransformedRef:
return transform_ref(ref, UntileRef(tiling))
def unswizzle_ref(ref, swizzle: int) -> pallas_core.TransformedRef:
return transform_ref(ref, UnswizzleRef(swizzle))
@tree_util.register_pytree_node_class
@dataclasses.dataclass(frozen=True)
| MulticastRef |
python | MongoEngine__mongoengine | tests/fields/test_email_field.py | {
"start": 119,
"end": 3994
} | class ____(MongoDBTestCase):
def test_generic_behavior(self):
class User(Document):
email = EmailField()
user = User(email="ross@example.com")
user.validate()
user = User(email="ross@example.co.uk")
user.validate()
user = User(
email=("Kofq@rhom0e4klgauOhpbpNdogawnyIKvQS0wk2mjqrgGQ5SaJIazqqWkm7.net")
)
user.validate()
user = User(email="new-tld@example.technology")
user.validate()
user = User(email="ross@example.com.")
with pytest.raises(ValidationError):
user.validate()
# unicode domain
user = User(email="user@пример.рф")
user.validate()
# invalid unicode domain
user = User(email="user@пример")
with pytest.raises(ValidationError):
user.validate()
# invalid data type
user = User(email=123)
with pytest.raises(ValidationError):
user.validate()
def test_email_field_unicode_user(self):
class User(Document):
email = EmailField()
# unicode user shouldn't validate by default...
user = User(email="Dörte@Sörensen.example.com")
with pytest.raises(ValidationError):
user.validate()
# ...but it should be fine with allow_utf8_user set to True
class User(Document):
email = EmailField(allow_utf8_user=True)
user = User(email="Dörte@Sörensen.example.com")
user.validate()
def test_email_field_domain_whitelist(self):
class User(Document):
email = EmailField()
# localhost domain shouldn't validate by default...
user = User(email="me@localhost")
with pytest.raises(ValidationError):
user.validate()
# ...but it should be fine if it's whitelisted
class User(Document):
email = EmailField(domain_whitelist=["localhost"])
user = User(email="me@localhost")
user.validate()
def test_email_domain_validation_fails_if_invalid_idn(self):
class User(Document):
email = EmailField()
invalid_idn = ".google.com"
user = User(email="me@%s" % invalid_idn)
with pytest.raises(ValidationError) as exc_info:
user.validate()
assert "domain failed IDN encoding" in str(exc_info.value)
def test_email_field_ip_domain(self):
class User(Document):
email = EmailField()
valid_ipv4 = "email@[127.0.0.1]"
valid_ipv6 = "email@[2001:dB8::1]"
invalid_ip = "email@[324.0.0.1]"
# IP address as a domain shouldn't validate by default...
user = User(email=valid_ipv4)
with pytest.raises(ValidationError):
user.validate()
user = User(email=valid_ipv6)
with pytest.raises(ValidationError):
user.validate()
user = User(email=invalid_ip)
with pytest.raises(ValidationError):
user.validate()
# ...but it should be fine with allow_ip_domain set to True
class User(Document):
email = EmailField(allow_ip_domain=True)
user = User(email=valid_ipv4)
user.validate()
user = User(email=valid_ipv6)
user.validate()
# invalid IP should still fail validation
user = User(email=invalid_ip)
with pytest.raises(ValidationError):
user.validate()
def test_email_field_honors_regex(self):
class User(Document):
email = EmailField(regex=r"\w+@example.com")
# Fails regex validation
user = User(email="me@foo.com")
with pytest.raises(ValidationError):
user.validate()
# Passes regex validation
user = User(email="me@example.com")
assert user.validate() is None
| TestEmailField |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/sort_children.py | {
"start": 164,
"end": 445
} | class ____(Label):
DEFAULT_CSS = """
Number {
width: 1fr;
}
"""
def __init__(self, number: int) -> None:
self.number = number
super().__init__(classes=f"number{number}")
def render(self) -> str:
return str(self.number)
| Number |
python | pytest-dev__pytest | src/_pytest/debugging.py | {
"start": 9686,
"end": 10336
} | class ____:
def pytest_exception_interact(
self, node: Node, call: CallInfo[Any], report: BaseReport
) -> None:
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stdout.write(err)
assert call.excinfo is not None
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None:
exc_or_tb = _postmortem_exc_or_tb(excinfo)
post_mortem(exc_or_tb)
| PdbInvoke |
python | pytorch__pytorch | test/export/test_tools.py | {
"start": 498,
"end": 1889
} | class ____(TestCase):
def test_report_exportability_basic(self):
class Module(torch.nn.Module):
def forward(self, x, y):
return x[0] + y
f = Module()
inp = ([torch.ones(1, 3)], torch.ones(1, 3))
report = report_exportability(f, inp)
self.assertTrue(len(report) == 1)
self.assertTrue(report[""] is None)
def test_report_exportability_with_issues(self):
class Unsupported(torch.nn.Module):
def forward(self, x):
return torch.ops.testlib.op_missing_meta(x, x.cos())
class Supported(torch.nn.Module):
def forward(self, x):
return x.sin()
class Module(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.unsupported = Unsupported()
self.supported = Supported()
def forward(self, x):
y = torch.nonzero(x)
return self.unsupported(y) + self.supported(y)
f = Module()
inp = (torch.ones(4, 4),)
report = report_exportability(f, inp, strict=False, pre_dispatch=True)
self.assertTrue(report[""] is not None)
self.assertTrue(report["unsupported"] is not None)
self.assertTrue(report["supported"] is None)
if __name__ == "__main__":
run_tests()
| TestExportTools |
python | OmkarPathak__pygorithm | pygorithm/geometry/line2.py | {
"start": 149,
"end": 23192
} | class ____(object):
"""
Define a two-dimensional directed line segment defined by two points.
This class is mostly used as a way to cache information that is
regularly required when working on geometrical problems.
.. caution::
Lines should be used as if they were completely immutable to ensure
correctness. All attributes of Line2 can be reconstructed from the two
points, and thus cannot be changed on their own and must be recalculated
if there were any changes to `start` or `end`.
.. tip::
To prevent unnecessary recalculations, many functions on lines accept an
'offset' argument, which is used to perform calculations on lines that
are simply shifts of other lines.
.. note::
The minimum x is guarranteed to be on either (or both) of
the start and end. However, minimum x and minimum y might not
come from the same point. The same is true for the maximum x
and maximum y.
:ivar start: the start of this line
:vartype start: :class:`pygorithm.geometry.vector2.Vector2`
:ivar end: the end of this line
:vartype end: :class:`pygorithm.geometry.vector2.Vector2`
"""
def __init__(self, start, end):
"""
Create a new line from start to end.
:param start: the start point
:type start: :class:`pygorithm.geometry.vector2.Vector2`
:param end: the end point
:type end: :class:`pygorithm.geometry.vector2.Vector2`
:raises ValueError: if start and end are at the same point
"""
if start.x == end.x and start.y == end.y:
raise ValueError('start and end are the same point')
self.start = start
self.end = end
self._delta = None
self._axis = None
self._normal = None
self._magnitude_squared = None
self._magnitude = None
self._min_x = None
self._min_y = None
self._max_x = None
self._max_y = None
self._slope = None
self._y_intercept = None
self._horizontal = None
self._vertical = None
@property
def delta(self):
"""
Get the vector from start to end, lazily initialized.
:returns: delta from start to end
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
if self._delta is None:
self._delta = self.end - self.start
return self._delta
@property
def axis(self):
"""
Get the normalized delta vector, lazily initialized
:returns: normalized delta
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
if self._axis is None:
self._axis = self.delta * (1 / self.magnitude)
return self._axis
@property
def normal(self):
"""
Get normalized normal vector to axis, lazily initialized.
Get the normalized normal vector such that the normal
vector is 90 degrees counter-clockwise from the axis.
:returns: normalized normal to axis
:rtype: :class:`pygorithm.geometry.vector2.Vector2`
"""
if self._normal is None:
self._normal = vector2.Vector2(-self.axis.y, self.axis.x)
return self._normal
@property
def magnitude_squared(self):
"""
Get the square of the magnitude of delta, lazily initialized.
:returns: square of magnitude of delta
:rtype: :class:`numbers.Number`
"""
if self._magnitude_squared is None:
self._magnitude_squared = self.delta.magnitude_squared()
return self._magnitude_squared
@property
def magnitude(self):
"""
Get the magnitude of delta, lazily initialized.
.. note::
It is substantially faster to operate on squared magnitude,
where possible.
:returns: magnitude of delta
:rtype: :class:`numbers.Number`
"""
if self._magnitude is None:
self._magnitude = math.sqrt(self.magnitude_squared)
return self._magnitude
@property
def min_x(self):
"""
Get the minimum x that this line contains, lazily initialized.
:returns: minimum x this line contains
:rtype: :class:`numbers.Number`
"""
if self._min_x is None:
self._min_x = min(self.start.x, self.end.x)
return self._min_x
@property
def min_y(self):
"""
Get the minimum y that this line contains, lazily initialized.
:returns: minimum x this line contains
:rtype: :class:`numbers.Number`
"""
if self._min_y is None:
self._min_y = min(self.start.y, self.end.y)
return self._min_y
@property
def max_x(self):
"""
Get the maximum x that this line contains, lazily initialized.
:returns: maximum x this line contains
:rtype: :class:`numbers.Number`
"""
if self._max_x is None:
self._max_x = max(self.start.x, self.end.x)
return self._max_x
@property
def max_y(self):
"""
Get the maximum y that this line contains, lazily initialized.
:returns: maximum x this line contains
:rtype: :class:`numbers.Number`
"""
if self._max_y is None:
self._max_y = max(self.start.y, self.end.y)
return self._max_y
@property
def slope(self):
"""
Get the slope of this line, lazily initialized.
.. caution::
The slope may be 0 (horizontal line) or positive or negative
infinity (vertical lines). It may be necessary to handle
these lines seperately, typically through checking the
:py:attr:`~pygorithm.geometry.line2.Line2.horizontal` and
:py:attr:`~pygorithm.geometry.line2.Line2.vertical` properties.
:returns: the slope of this line (rise over run).
:rtype: :class:`numbers.Number`
"""
if self._slope is None:
if self.delta.x == 0:
if self.delta.y > 0:
self._slope = float('+inf')
else:
self._slope = float('-inf')
else:
self._slope = self.delta.y / self.delta.x
return self._slope
@property
def y_intercept(self):
"""
Get the y-intercept of this line, lazily initialized.
This does not take into account any offset of the
line and may return None if this is a vertical line.
.. caution::
This function will return a y-intercept for non-vertical
line segments that do not reach ``x=0``.
.. caution::
The y-intercept will change based on the offset in a somewhat
complex manner.
:py:meth:`~pygorithm.geometry.line2.Line2.calculate_y_intercept`
accepts an offset parameter.
:returns: the y-intercept of this line when unshifted
:rtype: :class:`numbers.Number` or None
"""
if self.vertical:
return None
if self._y_intercept is None:
self._y_intercept = self.start.y - self.slope * self.start.x
return self._y_intercept
@property
def horizontal(self):
"""
Get if this line is horizontal, lazily initialized.
A line is horizontal if it has a slope of 0. This also
means that ``start.y == end.y``
:returns: if this line is horizontal
:rtype: bool
"""
if self._horizontal is None:
self._horizontal = self.delta.y == 0
return self._horizontal
@property
def vertical(self):
"""
Get if this line is vertical, lazily initialized.
A line is vertical if it has a slope of +inf or -inf. This
also means that ``start.x == end.x``.
:returns: if this line is vertical
:rtype: bool
"""
if self._vertical is None:
self._vertical = self.delta.x == 0
return self._vertical
def __repr__(self):
"""
Get an unambiguous representation of this line
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, line2)
vec1 = vector2.Vector2(1, 1)
vec2 = vector2.Vector2(3, 4)
line = line2.Line2(vec1, vec2)
# prints line2(start=vector2(x=1, y=1), end=vector2(x=3, y=4))
print(repr(line))
:returns: unambiguous representation of this line
:rtype: string
"""
return "line2(start={}, end={})".format(repr(self.start), repr(self.end))
def __str__(self):
"""
Get a human-readable representation of this line
Example:
.. code-block:: python
from pygorithm.geometry import (vector2, line2)
vec1 = vector2.Vector2(1, 1)
vec2 = vector2.Vector2(3, 4)
line = line2.Line2(vec1, vec2)
# prints <1, 1> -> <3, 4>
print(str(line))
# same as above
print(line)
:returns: human-readable representation of this line
:rtype: string
"""
return "{} -> {}".format(self.start, self.end)
def calculate_y_intercept(self, offset):
"""
Calculate the y-intercept of this line when it is at the
specified offset.
If the offset is None this is exactly equivalent to y_intercept
:param offset: the offset of this line for this calculations
:type offset: :class:`pygorithm.geometry.vector2.Vector2` or None
:returns: the y-intercept of this line when at offset
:rtype: :class:`numbers.Number`
"""
if offset is None:
return self.y_intercept
if self.vertical:
return None
# y = mx + b -> b = y - mx
return self.start.y + offset.y - self.slope * (self.start.x + offset.x)
@staticmethod
def are_parallel(line1, line2):
"""
Determine if the two lines are parallel.
Two lines are parallel if they have the same or opposite slopes.
:param line1: the first line
:type line1: :class:`pygorithm.geometry.line2.Line2`
:param line2: the second line
:type line2: :class:`pygorithm.geometry.line2.Line2`
:returns: if the lines are parallel
:rtype: bool
"""
if line1.vertical and line2.vertical:
return True
return math.isclose(line1.slope, line2.slope)
@staticmethod
def contains_point(line, point, offset = None):
"""
Determine if the line contains the specified point.
Optionally, specify an offset for the line. Being
on the line is determined using `math.isclose`.
:param line: the line
:type line: :class:`pygorithm.geometry.line2.Line2`
:param point: the point
:type point: :class:`pygorithm.geometry.vector2.Vector2`
:param offset: the offset of the line or None for the origin
:type offset: :class:`pygorithm.geometry.vector2.Vector2` or None
:returns: if the point is on the line
:rtype: bool
"""
if line.vertical:
x = line.start.x + offset.x if offset is not None else line.start.x
if not math.isclose(point.x, x, abs_tol=1e-07):
return False
ymin = line.min_y + offset.y if offset is not None else line.min_y
ymax = line.max_y + offset.y if offset is not None else line.max_y
if math.isclose(point.y, ymin, abs_tol=1e-07) or math.isclose(point.y, ymax, abs_tol=1e-07):
return True
return point.y > ymin and point.y < ymax
xmin = line.min_x + offset.x if offset is not None else line.min_x
xmax = line.max_x + offset.x if offset is not None else line.max_x
if not (math.isclose(point.x, xmin, abs_tol=1e-07) or point.x > xmin):
return False
if not (math.isclose(point.x, xmax, abs_tol=1e-07) or point.x < xmax):
return False
ystart = line.start.y + offset.y if offset is not None else line.start.y
if line.horizontal:
return math.isclose(ystart, point.y, abs_tol=1e-07)
yint = line.calculate_y_intercept(offset)
yatx = line.slope * point.x + yint
return math.isclose(point.y, yatx, abs_tol=1e-07)
@staticmethod
def find_intersection(line1, line2, offset1 = None, offset2 = None):
"""
Find the intersection between the two lines.
The lines may optionally be offset by a fixed amount. This
will incur a minor performance penalty which is less than
that of recreating new lines.
Two lines are considered touching if they only share exactly
one point and that point is an edge of one of the lines.
If two lines are parallel, their intersection could be a line.
.. tip::
This will never return True, True
:param line1: the first line
:type line1: :class:`pygorithm.geometry.line2.Line2`
:param line2: the second line
:type line2: :class:`pygorithm.geometry.line2.Line2`
:param offset1: the offset of line 1
:type offset1: :class:`pygorithm.geometry.vector2.Vector2` or None
:param offset2: the offset of line 2
:type offset2: :class:`pygorithm.geometry.vector2.Vector2` or None
:returns: (touching, overlapping, intersection_location)
:rtype: (bool, bool, :class:`pygorithm.geometry.line2.Line2` or :class:`pygorithm.geometry.vector2.Vector2` or None)
"""
# We will ensure that:
# - If one line is vertical and one horizontal, line1 is the vertical line
# - If only one line is vertical, line1 is the vertical line
# - If only one line is horizontal, line1 is the horizontal line
if line2.vertical and not line1.vertical:
return Line2.find_intersection(line2, line1, offset2, offset1)
if line2.horizontal and not line1.horizontal and not line1.vertical:
return Line2.find_intersection(line2, line1, offset2, offset1)
l1_st_x = line1.start.x + (offset1.x if offset1 is not None else 0)
l1_st_y = line1.start.y + (offset1.y if offset1 is not None else 0)
l1_en_x = line1.end.x + (offset1.x if offset1 is not None else 0)
l1_en_y = line1.end.y + (offset1.y if offset1 is not None else 0)
l2_st_x = line2.start.x + (offset2.x if offset2 is not None else 0)
l2_st_y = line2.start.y + (offset2.y if offset2 is not None else 0)
l2_en_x = line2.end.x + (offset2.x if offset2 is not None else 0)
l2_en_y = line2.end.y + (offset2.y if offset2 is not None else 0)
if line1.vertical and line2.vertical:
# Two vertical lines
if not math.isclose(l1_st_x, l2_st_x):
return False, False, None
aal1 = axisall.AxisAlignedLine(None, l1_st_y, l1_en_y)
aal2 = axisall.AxisAlignedLine(None, l2_st_y, l2_en_y)
touch, mtv = axisall.AxisAlignedLine.find_intersection(aal1, aal2)
if not touch:
return False, False, None
elif mtv[0] is None:
return True, False, vector2.Vector2(l1_st_x, mtv[1])
else:
return False, True, Line2(vector2.Vector2(l1_st_x, mtv[1]), vector2.Vector2(l1_st_x, mtv[2]))
if line1.horizontal and line2.horizontal:
# Two horizontal lines
if not math.isclose(l1_st_y, l2_st_y):
return False, False, None
aal1 = axisall.AxisAlignedLine(None, l1_st_x, l1_en_x)
aal2 = axisall.AxisAlignedLine(None, l2_st_x, l2_st_y)
touch, mtv = axisall.AxisAlignedLine.find_intersection(aal1, aal2)
if not touch:
return False, False, None
elif mtv[0] is None:
return True, False, vector2.Vector2(mtv[1], l1_st_y)
else:
return False, True, Line2(vector2.Vector2(mtv[1], l1_st_x), vector2.Vector2(mtv[2], l1_st_y))
if Line2.are_parallel(line1, line2):
# Two non-vertical, non-horizontal, parallel lines
yintr1 = line1.calculate_y_intercept(offset1)
yintr2 = line2.calculate_y_intercept(offset2)
if not math.isclose(yintr1, yintr2):
return False, False, None
axis = line1.axis
aal1 = axisall.AxisAlignedLine(axis, l1_st_x * axis.x + l1_st_y * axis.y, l1_en_x * axis.x + l1_en_y * axis.y)
aal2 = axisall.AxisAlignedLine(axis, l2_st_x * axis.x + l2_st_y * axis.y, l2_en_x * axis.x + l2_en_y * axis.y)
touch, mtv = axisall.AxisAlignedLine.find_intersection(aal1, aal2)
def unshift_vec(vec):
numerator = line1.slope * vec.x - yintr1 * axis.x * axis.x
denominator = axis.x * axis.y + line1.slope * axis.y * axis.y
new_x = numerator / denominator
new_y = line1.slope * new_x + yintr1
return vector2.Vector2(new_x, new_y)
if not touch:
return False, False, None
elif mtv[0] is None:
return True, False, unshift_vec(axis * mtv[1])
else:
return False, True, Line2(unshift_vec(axis * mtv[1]), unshift_vec(axis * mtv[2]))
if line1.vertical and line2.horizontal:
# A vertical and horizontal line
l1_min = min(l1_st_y, l1_en_y) if offset1 is not None else line1.min_y
l1_max = max(l1_st_y, l1_en_y) if offset1 is not None else line1.max_y
if l2_st_y < l1_min or l2_st_y > l2_max:
return False, False, None
l2_min = min(l2_st_x, l2_en_x) if offset2 is not None else line2.min_x
l2_max = max(l2_st_x, l2_en_x) if offset2 is not None else line2.max_x
if l1_st_x < l2_min or l1_st_x > l2_max:
return False, False, None
pt = vector2.Vector2(l1_st_x, l2_st_y)
if math.isclose(l2_st_y, l1_min) or math.isclose(l2_st_y, l2_max) or math.isclose(l1_st_x, l2_min) or math.isclose(l2_st_y, l2_max):
return True, False, pt
else:
return False, True, pt
if line1.vertical:
# A vertical and non-horizontal, non-vertical line
line2_y_at_line1_x = line2.slope * l1_st_x + line2.calculate_y_intercept(offset2)
l1_min = min(l1_st_y, l1_en_y) if offset1 is not None else line1.min_y
l1_max = max(l1_st_y, l1_en_y) if offset1 is not None else line1.max_y
if math.isclose(line2_y_at_line1_x, l1_min) or math.isclose(line2_y_at_line1_x, l1_max):
return True, False, vector2.Vector2(l1_st_x, line2_y_at_line1_x)
elif line2_y_at_line1_x < l1_min or line2_y_at_line1_x > l2_max:
return False, False, None
else:
return False, True, vector2.Vector2(l1_st_x, line2_y_at_line1_x)
if line1.horizontal:
# A horizontal and non-vertical, non-horizontal line
# y = mx + b -> x = (y - b) / m
line2_x_at_line1_y = (l1_st_y - line2.calculate_y_intercept(offset2)) / line2.slope
l1_min = min(l1_st_x, l1_en_x) if offset1 is not None else line1.min_x
l1_max = max(l1_st_x, l1_en_x) if offset1 is not None else line1.max_x
if math.isclose(line2_x_at_line1_y, l1_min) or math.isclose(line2_x_at_line1_y, l1_max):
return True, False, vector2.Vector2(line2_x_at_line1_y, l1_st_y)
elif line2_x_at_line1_y < l1_min or line2_x_at_line1_y > l1_max:
return False, False, None
else:
return False, True, vector2.Vector2(line2_x_at_line1_y, l1_st_y)
# Two non-vertical, non-horizontal, non-parallel lines
# y = m1 x + b1
# y = m2 x + b2
# m1 x + b1 = m2 x + b2
# m1 x - m2 x = b2 - b1
# x = (b2 - b1) / (m1 - m2)
yintr1 = line1.calculate_y_intercept(offset1)
yintr2 = line2.calculate_y_intercept(offset2)
intr_x = (yintr2 - yintr1) / (line1.slope - line2.slope)
# Some caution needs to be taken here to ensure we do approximately before range
# checks. It's possible for _approx(a, b) to be True and a < b to be True
on_edge1 = math.isclose(intr_x, l1_st_x) or math.isclose(intr_x, l1_en_x)
on_edge2 = math.isclose(intr_x, l2_st_x) or math.isclose(intr_x, l2_en_x)
if on_edge1 and on_edge2:
intr_y = line1.slope * intr_x + yintr1
return True, False, vector2.Vector2(intr_x, intr_y)
l1_min_x = min(l1_st_x, l1_en_x) if offset1 is not None else line1.min_x
l1_max_x = max(l1_st_x, l1_en_x) if offset1 is not None else line1.max_x
l2_min_x = min(l2_st_x, l2_en_x) if offset2 is not None else line2.min_x
l2_max_x = max(l2_st_x, l2_en_x) if offset2 is not None else line2.max_x
on_line1 = on_edge1 or (intr_x > l1_min_x and intr_x < l1_max_x)
on_line2 = on_edge2 or (intr_x > l2_min_x and intr_x < l2_max_x)
if on_line1 and on_line2:
intr_y = line1.slope * intr_x + yintr1
is_edge = on_edge1 or on_edge2
return is_edge, not is_edge, vector2.Vector2(intr_x, intr_y)
return False, False, None
| Line2 |
python | ray-project__ray | release/long_running_tests/workloads/many_actor_tasks.py | {
"start": 988,
"end": 2021
} | class ____(object):
def __init__(self):
self.value = 0
def method(self):
self.value += 1
return np.zeros(1024, dtype=np.uint8)
actors = [
Actor._remote([], {}, num_cpus=0.1, resources={str(i % num_nodes): 0.1})
for i in range(num_nodes * 5)
]
iteration = 0
start_time = time.time()
previous_time = start_time
while True:
for _ in range(100):
previous_ids = [a.method.remote() for a in actors]
ray.get(previous_ids)
new_time = time.time()
print(
"Iteration {}:\n"
" - Iteration time: {}.\n"
" - Absolute time: {}.\n"
" - Total elapsed time: {}.".format(
iteration, new_time - previous_time, new_time, new_time - start_time
)
)
update_progress(
{
"iteration": iteration,
"iteration_time": new_time - previous_time,
"absolute_time": new_time,
"elapsed_time": new_time - start_time,
}
)
previous_time = new_time
iteration += 1
| Actor |
python | pytorch__pytorch | test/dynamo/test_autograd_function.py | {
"start": 42390,
"end": 52544
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[]", L_y_: "f32[]"):
l_x_ = L_x_
l_y_ = L_y_
fwd_body_0 = self.fwd_body_0
bwd_body_0 = self.bwd_body_0
autograd_function_apply = torch.ops.higher_order.autograd_function_apply(fwd_body_0, bwd_body_0, l_x_, l_y_, args_tensor_mask = [True, True], non_differentiable_idx = [1]); fwd_body_0 = bwd_body_0 = l_x_ = l_y_ = None
getitem: "f32[]" = autograd_function_apply[0]
getitem_1: "f32[]" = autograd_function_apply[1]; autograd_function_apply = None
return (getitem, getitem_1)
class fwd_body_0(torch.nn.Module):
def forward(self, ctx : torch.autograd.function.Function, x: "f32[]", y: "f32[]"):
_set_grad_enabled = torch._C._set_grad_enabled(False); _set_grad_enabled = None
out1: "f32[]" = x.sin(); x = None
out2: "f32[]" = y * 2; y = None
_set_grad_enabled_1 = torch._C._set_grad_enabled(True); _set_grad_enabled_1 = None
return ((out1, out2), [])
class bwd_body_0(torch.nn.Module):
def forward(self, ctx : torch.autograd.function.Function, grad1: "f32[]", grad2: "f32[]"):
_set_grad_enabled = torch._C._set_grad_enabled(False); _set_grad_enabled = None
cos: "f32[]" = grad1.cos(); grad1 = None
mul: "f32[]" = grad2 * 0.0; grad2 = None
_set_grad_enabled_1 = torch._C._set_grad_enabled(True); _set_grad_enabled_1 = None
return (cos, mul)
""",
)
def test_mark_multi_output_non_differentiable(self):
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x, y, z):
out1 = x.sin()
out2 = y * 2
out3 = z + 3
ctx.mark_non_differentiable(out2, out3)
return out1, out2, out3
@staticmethod
def backward(ctx, grad1, grad2, grad3):
return grad1.cos(), grad2, grad3
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x, y, z):
return MyFunction.apply(x, y, z)
x = torch.tensor(10.0, requires_grad=True)
y = torch.tensor(20.0, requires_grad=True)
z = torch.tensor(30.0, requires_grad=True)
ref1, ref2, ref3 = MyFunction.apply(x, y, z)
res1, res2, res3 = fn(x, y, z)
self.assertEqual(ref1, res1)
self.assertEqual(ref2, res2)
self.assertEqual(ref3, res3)
# Ensure out1 requires gradients, out2 does not.
self.assertTrue(ref1.requires_grad)
self.assertTrue(res1.requires_grad)
self.assertFalse(ref2.requires_grad)
self.assertFalse(res2.requires_grad)
self.assertFalse(ref3.requires_grad)
self.assertFalse(res3.requires_grad)
res1.sum().backward()
def test_default_values(self):
from torch.autograd import Function
class Foo(Function):
@staticmethod
def forward(ctx, x, alpha=0.99):
return x
@staticmethod
def backward(ctx, grad_out):
return grad_out
@torch.compile
def foo(x):
return Foo.apply(x)
# Make sure guards for default values do not crash
foo(torch.randn(2))
foo(torch.randn(2, requires_grad=True))
def test_fwd_no_grad(self):
# autograd.Function.forward should be traced and called under no_grad mode.
# torch.exp with out=... arguments don't support automatic differentiation,
# so can't be traced/called under grad mode (throwing RuntimeError),
# therefore this unit test ensures fwd is under no_grad mode.
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, inputs):
torch.exp(inputs, out=inputs)
return inputs
@staticmethod
def backward(ctx, grad_output):
return None
@torch.compile(backend="eager", fullgraph=True)
def f(x):
return Foo.apply(x)
x1 = torch.randn(2, 3, requires_grad=True)
x2 = x1.clone()
self.assertEqual(f(x1), Foo.apply(x2))
# https://github.com/pytorch/pytorch/issues/129963
def test_fwd_propogation_correctness(self):
class MyCube(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
result = x**3
dx = 3 * x**2
ctx.save_for_backward(x, dx)
return result, dx
@staticmethod
def backward(ctx, grad_output, grad_dx):
x, dx = ctx.saved_tensors
result = grad_output * dx + grad_dx * 6 * x
# Intentionally return a wrong value to test if the backward is triggered twice.
# Since if the first MyCube.apply returns values w/o requires_grad=True,
# this backward would be only triggered once (the first MyCube.apply call),
# as the second MyCube.apply is inlined by Dynamo and the corresponding backward
# would be generated by autograd engine.
return result * 0.5
@torch.compile(backend="eager", fullgraph=True)
def fn(x):
x, _ = MyCube.apply(x)
x, _ = MyCube.apply(x)
return x
inp = torch.ones(2, requires_grad=True)
out = fn(inp)
out.sum().backward()
self.assertEqual(out, inp**3)
self.assertEqual(inp.grad, torch.tensor([2.25, 2.25]))
def test_tuple_arg(self):
cnt = torch._dynamo.testing.CompileCounter()
class TupleArgFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, shape):
ctx.save_for_backward(torch.randn(shape))
return x + 1
@staticmethod
def backward(ctx, grad_output):
(result,) = ctx.saved_tensors
return result, None
@torch.compile(backend=cnt, fullgraph=True)
def fn():
return TupleArgFunc.apply(x, shape)
shape = (10, 10)
x = torch.randn(shape, requires_grad=True)
out = fn()
out.sum().backward()
self.assertEqual(out, x + 1)
self.assertEqual(x.grad.shape, shape)
self.assertEqual(cnt.frame_count, 1)
self.assertEqual(cnt.op_count, 1)
@requires_gpu
def test_triton_kernel_basic(self):
class Add(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: ( # noqa: E731
triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
)
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=16)
return output
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return x * grad_output, y * grad_output
@torch.compile(fullgraph=True, backend="inductor")
def f(x, y):
z = Add.apply(x, y)
return z
x = torch.randn(10, device=device_type, requires_grad=True)
y = torch.randn(10, device=device_type, requires_grad=True)
z = f(x, y)
loss = z.sum()
loss.backward()
self.assertEqual(x + y, z)
@requires_gpu
def test_triton_kernel_multiple_out(self):
class Add(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
ctx.t1 = x
ctx.t2 = y
output = torch.zeros_like(x)
n_elements = output.numel()
grid = lambda meta: ( # noqa: E731
triton.cdiv(n_elements, meta["BLOCK_SIZE"]),
)
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=16)
return output, x
@staticmethod
def backward(ctx, grad_output, old_x):
x, y = ctx.saved_tensors
x1 = ctx.t1
y1 = ctx.t2
return old_x * x * x1 * grad_output, y * y1 * grad_output
@torch.compile(fullgraph=True, backend="inductor")
def f(x, y):
z = Add.apply(x, y)
return z
x = torch.randn(10, device=device_type, requires_grad=True)
y = torch.randn(10, device=device_type, requires_grad=True)
z, _ = f(x, y)
loss = z.sum()
loss.backward()
self.assertEqual(x + y, z)
@unittest.expectedFailure
def test_nonlocal_list_mutation_in_autograd_function(self):
"""Test that nonlocal list mutation in autograd.Function forward is handled correctly."""
class SimpleAutogradFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, z):
# Simple computation
o = torch.matmul(x, x) @ x
out = x.sin()
# Mutate the nonlocal list
z.append(out)
return torch.cos(torch.sin(o)), torch.sin(x)
@staticmethod
def backward(ctx, grad_output1, grad_output2):
# Simple backward
return grad_output1 + grad_output2, None
def fn(x):
z = []
outs = SimpleAutogradFunc.apply(x, z)
out1 = outs[0]
# Check that the extra output pytree handling is done properly
out2 = outs[-1]
return out1 + out2, z[0]
x = torch.randn(4, 4, requires_grad=True)
ref = fn(x)
opt_fn = torch.compile(fn, backend="aot_eager", fullgraph=True)
res = opt_fn(x)
self.assertEqual(ref[0], res[0])
self.assertEqual(ref[1], res[1])
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| GraphModule |
python | tensorflow__tensorflow | tensorflow/python/data/ops/readers.py | {
"start": 18690,
"end": 19464
} | class ____(dataset_ops.DatasetV1Adapter):
"""A `Dataset` comprising records from one or more TFRecord files."""
def __init__(self,
filenames,
compression_type=None,
buffer_size=None,
num_parallel_reads=None,
name=None):
wrapped = TFRecordDatasetV2(
filenames, compression_type, buffer_size, num_parallel_reads, name=name)
super(TFRecordDatasetV1, self).__init__(wrapped)
__init__.__doc__ = TFRecordDatasetV2.__init__.__doc__
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
| TFRecordDatasetV1 |
python | doocs__leetcode | solution/3400-3499/3439.Reschedule Meetings for Maximum Free Time I/Solution.py | {
"start": 0,
"end": 486
} | class ____:
def maxFreeTime(
self, eventTime: int, k: int, startTime: List[int], endTime: List[int]
) -> int:
nums = [startTime[0]]
for i in range(1, len(endTime)):
nums.append(startTime[i] - endTime[i - 1])
nums.append(eventTime - endTime[-1])
ans = s = 0
for i, x in enumerate(nums):
s += x
if i >= k:
ans = max(ans, s)
s -= nums[i - k]
return ans
| Solution |
python | pytorch__pytorch | test/test_ops_jit.py | {
"start": 1411,
"end": 14790
} | class ____(JitCommonTestCase):
exact_dtype = True
# Tests that the forward and backward passes of operations produce the
# same values for the cross-product of op variants (function, method, inplace)
# and runtimes (eager, traced, scripted).
# TODO WARNING: inplace x {traced, scripted} not currently tested
@_variant_ops(op_db)
def test_variant_consistency_jit(self, device, dtype, op):
_requires_grad = dtype in op.supported_backward_dtypes(
torch.device(device).type
)
include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex
samples = op.sample_inputs(
device,
dtype,
requires_grad=_requires_grad,
include_conjugated_inputs=include_conjugated_inputs,
)
# Acquires variants to test
func = op.get_op()
method = op.get_method()
variants = {
# TODO: inplace tests currently fail, fix and add inplace variant
"function": func,
"method": method,
}
# scripting strips the torch.ops prefix from these operators
# incorrectly; don't bother testing this case. Count this
# as "testing"
if isinstance(func, torch._ops.OpOverload):
self.skipTest("variant consistency doesn't work on torch.ops")
# TODO: find better way to standardize on op registration itself..
has_fake_function = op.name in ["resize_", "resize_as_"]
if has_fake_function:
variants = {"method": getattr(torch.Tensor, op.name)}
samples = op.sample_inputs(device, dtype, requires_grad=False)
tested = False
for sample in samples:
# Test traced and scripted consistency
for func_type, variant in variants.items():
if variant is None:
continue
# scripting and check_alias_analysis do not work with lambdas
# lambdas are typically used as a way to simulate methods without
# functional variants, so rely on the other variant for testing
# for now
if is_lambda(variant):
continue
tested = True
try:
self.indiv_variant_test_jit(
device, dtype, op, sample, func_type, variant, has_fake_function
)
except Exception as e:
variant_error_info = dedent(
f"""
Error testing {op.name} {func_type} variant
with dtype: {dtype}
with inputs {sample}:
"""
)
raise Exception(variant_error_info) from e # noqa: TRY002
assert tested, "JIT Test does not execute any logic"
def indiv_variant_test_jit(
self, device, dtype, op, sample, func_type, variant, has_fake_function
):
_requires_grad = dtype in op.supported_backward_dtypes(
torch.device(device).type
)
support_script = op.supports_scripting
# Create accessor for script function variant
name = op.name + "_" if func_type == "inplace" else op.name
# run with disable_autodiff_subgraph_inlining(True) to test
# autodiff support. Context manager forces the graph to contain
# DifferentiableGraph nodes if they are present
with disable_autodiff_subgraph_inlining():
# Check scripted forward, grad, and grad grad
if support_script:
script_fn = create_script_fn(self, name, func_type)
def out_fn(output):
# Processes the output for autograd
if sample.output_process_fn_grad is not None:
return sample.output_process_fn_grad(output)
return output
def get_sample():
return (
clone_input_helper(sample.input)
if op.name[-1] == "_"
else sample.input
)
if support_script:
check_against_reference(
self,
script_fn,
op.get_op(),
out_fn,
(get_sample(),) + sample.args,
sample.kwargs,
no_grad=not _requires_grad,
no_gradgrad=not op.supports_gradgrad,
)
# Check traced forward, grad, and grad grad
# TODO: fix tracing here
supports_tracing = op.supports_tracing and not has_fake_function
if op.assert_jit_shape_analysis:
self.assertTrue(supports_tracing)
if supports_tracing:
traced_fn = create_traced_fn(self, variant)
check_against_reference(
self,
traced_fn,
op.get_op(),
out_fn,
(get_sample(),) + sample.args,
sample.kwargs,
no_grad=not _requires_grad,
no_gradgrad=not op.supports_gradgrad,
)
# Check alias annotation schema for correctness (make
# sure inputs that aren't supposed to be modified aren't)
# Note: only runs in float32 because schema isn't affected by dtype,
# so running it on all dtypes is would be excessive
if dtype == torch.float32:
# TODO: no reason why we can't run this with tracing graph
if support_script and op.name != "rsub":
check_alias_annotation(
name,
(get_sample(),) + sample.args,
sample.kwargs,
func_type=func_type,
aten_name=op.aten_name,
)
# TODO: use script graph as well
checked_shape_analysis = False
if supports_tracing:
out = variant(get_sample(), *sample.args, **sample.kwargs)
# right now, tuple of outputs and tensor output supported
# TODO: list of tensor outputs
tuple_of_tensors = isinstance(out, tuple) and all(
isinstance(elem, torch.Tensor) for elem in out
)
if isinstance(out, torch.Tensor) or tuple_of_tensors:
if tuple_of_tensors:
sizes = [elem.size() for elem in out]
else:
sizes = out.size()
self.checkShapeAnalysis(
sizes, traced_fn.graph, op.assert_jit_shape_analysis
)
checked_shape_analysis = True
if op.assert_jit_shape_analysis:
self.assertTrue(checked_shape_analysis)
# Check autodifferentiation of nodes for traced and scripted graphs, only need to check once per sample
if dtype is torch.float32:
# Sandcastle doesn't fuse nodes
if IS_SANDCASTLE:
# fusible nodes are expected to be found in FusionGroups in the DifferentiableGraphs
nonfusible_nodes = (
op.autodiff_nonfusible_nodes + op.autodiff_fusible_nodes
)
fusible_nodes = []
else:
nonfusible_nodes = op.autodiff_nonfusible_nodes
fusible_nodes = op.autodiff_fusible_nodes
if supports_tracing:
self.assertAutodiffNode(
traced_fn.last_graph,
op.assert_autodiffed,
nonfusible_nodes,
fusible_nodes,
)
if support_script:
self.assertAutodiffNode(
script_fn.last_graph,
op.assert_autodiffed,
nonfusible_nodes,
fusible_nodes,
)
# alias testing is only done with torch.float for the same reason
_alias_ops = partial(ops, dtypes=OpDTypes.supported, allowed_dtypes=(torch.float,))
@_alias_ops(op for op in op_db if op.aliases)
def test_jit_alias_remapping(self, device, dtype, op):
# NOTE: only tests on first sample
samples = op.sample_inputs(device, dtype, requires_grad=True)
sample = first_sample(self, samples)
# [Scripting Data Preparation]
# Prepare data for test scripting
# Below we prepare strings of args/kwargs with and without type annotations.
# These strings are inserted into function template strings which is then torch scripted.
# - args string is ["t0"] corresponding to the "input" tensor required by the op
# - args_kw is the value of args and strings of kwargs used to call the op (without type annotations), for example,
# ["to", "1.0", "(1,)", "True", "tensor(1.0)"] -> def fn(t0): return variant(t0, 1.0, (1,), True, tensor(1.0))
args = ["t0"]
def quote_strs(v):
if isinstance(v, str):
return f"'{v}'"
return str(v)
args_kw = (
args
+ [f"{v}" for v in sample.args]
+ [f"{k}={quote_strs(v)}" for k, v in sample.kwargs.items()]
)
# Prepare data for test tracing
sample_args_kwargs = ()
if len(sample.args) > 0:
sample_args_kwargs += (sample.args,)
if len(sample.kwargs) > 0:
sample_args_kwargs += (sample.kwargs,)
original_name = op.aten_name
original_name_inplace = original_name + "_"
expected_dtype = op(sample.input, *sample.args, **sample.kwargs).dtype
for a_op in op.aliases:
inplace = a_op.inplace_variant
method_or_inplace = [a_op.inplace_variant, a_op.method_variant]
variants = (
v
for v in (a_op.op, a_op.method_variant, a_op.inplace_variant)
if v is not None
)
# Test scripting:
for variant in variants:
variant_name = variant.__name__
op_name = original_name_inplace if variant is inplace else original_name
if variant in method_or_inplace:
fn_template = """
def _fn(t0{c}):
return t0.{alias_name}({args_kw})
"""
# remove the first input tensor
script = fn_template.format(
c=", " if len(args_kw[1:]) > 1 else "",
args_kw=", ".join(args_kw[1:]),
alias_name=variant_name,
)
else:
fn_template = """
def _fn({args}):
return variant({args_kw})
"""
script = fn_template.format(
args=", ".join(args),
args_kw=", ".join(args_kw),
)
# Required to avoid undefined value: tensor error in JIT
# compilation of the function template
script = script.replace("tensor(", "torch.tensor(")
scripted = torch.jit.CompilationUnit(script)._fn
if variant is inplace and not torch.can_cast(expected_dtype, dtype):
try:
inp = clone_input_helper(sample.input)
scripted(inp)
except Exception:
continue
self.fail(
"Inplace operation on integer tensor that should be promoted to float didn't fail!"
)
inp = clone_input_helper(sample.input)
scripted(inp)
inp = clone_input_helper(sample.input)
graph = scripted.graph_for(inp)
FileCheck().check(op.aten_name).check_not(variant_name).run(graph)
# Test tracing:
for variant in variants:
variant_name = variant.__name__
op_name = original_name_inplace if variant is inplace else original_name
def _fn(*sample_args, **sample_kwargs):
return variant(*sample_args, **sample_kwargs)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
traced = torch.jit.trace(_fn, *inp)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
traced(*inp)
inp = (clone_input_helper(sample.input),) + sample_args_kwargs
graph = traced.graph_for(*inp)
FileCheck().check(op_name).check_not(variant_name).run(graph)
instantiate_device_type_tests(TestJit, globals())
if __name__ == "__main__":
TestCase._default_dtype_check_enabled = True
run_tests()
| TestJit |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/etcd_rendezvous.py | {
"start": 1635,
"end": 2598
} | class ____(Exception):
pass
# Default timeout for the rendezvous.
_DEFAULT_TIMEOUT: int = 600 # 10 minutes
# Additional waiting time after reaching the minimum number of nodes
# in case the rendezvous is elastic (min != max).
_DEFAULT_LAST_CALL_TIMEOUT: int = 30 # 30 seconds
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctness, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
| EtcdRendezvousRetryImmediately |
python | PrefectHQ__prefect | src/prefect/events/schemas/automations.py | {
"start": 12235,
"end": 14167
} | class ____(CompositeTrigger):
"""A composite trigger that requires some number of triggers to have fired
within the given time period in a specific order"""
type: Literal["sequence"] = "sequence"
def describe_for_cli(self, indent: int = 0) -> str:
"""Return a human-readable description of this trigger for the CLI"""
return textwrap.indent(
"\n".join(
[
"In this order:",
"\n".join(
[
trigger.describe_for_cli(indent=indent + 1)
for trigger in self.triggers
]
),
]
),
prefix=" " * indent,
)
def trigger_discriminator(value: Any) -> str:
"""Discriminator for triggers that defaults to 'event' if no type is specified."""
if isinstance(value, dict):
# Check for explicit type first
if "type" in value:
return value["type"]
# Check for compound/sequence specific fields
if "triggers" in value and "require" in value:
return "compound"
if "triggers" in value and "require" not in value:
return "sequence"
# Check for metric-specific posture
if value.get("posture") == "Metric":
return "metric"
# Default to event
return "event"
return getattr(value, "type", "event")
TriggerTypes: TypeAlias = Annotated[
Union[
Annotated[EventTrigger, Tag("event")],
Annotated[MetricTrigger, Tag("metric")],
Annotated[CompoundTrigger, Tag("compound")],
Annotated[SequenceTrigger, Tag("sequence")],
],
Discriminator(trigger_discriminator),
]
"""The union of all concrete trigger types that a user may actually create"""
CompoundTrigger.model_rebuild()
SequenceTrigger.model_rebuild()
| SequenceTrigger |
python | encode__django-rest-framework | rest_framework/utils/serializer_helpers.py | {
"start": 2863,
"end": 3479
} | class ____(BoundField):
def as_form_field(self):
value = self.value
# When HTML form input is used and the input is not valid
# value will be a JSONString, rather than a JSON primitive.
if not getattr(value, 'is_json_string', False):
with contextlib.suppress(TypeError, ValueError):
value = json.dumps(
self.value,
sort_keys=True,
indent=4,
separators=(',', ': '),
)
return self.__class__(self._field, value, self.errors, self._prefix)
| JSONBoundField |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/metadata.py | {
"start": 644,
"end": 733
} | class ____(DistlibException):
"""A required metadata is missing"""
| MetadataMissingError |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_emr_serverless.py | {
"start": 2623,
"end": 14085
} | class ____:
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_execute_successfully_with_wait_for_completion(self, mock_conn, mock_waiter):
mock_waiter().wait.return_value = True
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
mock_conn.get_application.side_effect = [
{"application": {"state": "CREATED"}},
{"application": {"state": "STARTED"}},
]
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
waiter_max_attempts=3,
waiter_delay=0,
)
id = operator.execute(None)
mock_conn.create_application.assert_called_once_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
mock_waiter().wait.assert_called_with(
applicationId=application_id,
WaiterConfig={
"MaxAttempts": 1,
},
)
assert mock_waiter().wait.call_count == 2
mock_conn.start_application.assert_called_once_with(applicationId=application_id)
assert id == application_id
mock_conn.get_application.call_count == 2
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_execute_successfully_no_wait_for_completion(self, mock_conn, mock_waiter):
mock_waiter().wait.return_value = True
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
wait_for_completion=False,
config=config,
)
id = operator.execute(None)
mock_conn.create_application.assert_called_once_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
mock_conn.start_application.assert_called_once_with(applicationId=application_id)
mock_waiter().wait.assert_called_once()
assert id == application_id
@mock.patch.object(EmrServerlessHook, "conn")
def test_failed_create_application_request(self, mock_conn):
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 404},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
)
with pytest.raises(AirflowException) as ex_message:
operator.execute(None)
assert "Application Creation failed:" in str(ex_message.value)
mock_conn.create_application.assert_called_once_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_failed_create_application(self, mock_conn, mock_get_waiter):
error = WaiterError(
name="test_name",
reason="Waiter encountered a terminal failure state:",
last_response={"application": {"state": "FAILED"}},
)
mock_get_waiter().wait.side_effect = error
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
)
with pytest.raises(AirflowException) as ex_message:
operator.execute(None)
assert "Serverless Application creation failed:" in str(ex_message.value)
mock_conn.create_application.assert_called_once_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
error = WaiterError(
name="test_name",
reason="Waiter encountered a terminal failure state:",
last_response={"application": {"state": "TERMINATED"}},
)
mock_get_waiter().wait.side_effect = error
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
)
with pytest.raises(AirflowException) as ex_message:
operator.execute(None)
assert "Serverless Application creation failed:" in str(ex_message.value)
mock_conn.create_application.assert_called_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
mock_conn.create_application.call_count == 2
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_failed_start_application(self, mock_conn, mock_get_waiter):
error = WaiterError(
name="test_name",
reason="Waiter encountered a terminal failure state:",
last_response={"application": {"state": "TERMINATED"}},
)
mock_get_waiter().wait.side_effect = [True, error]
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
)
with pytest.raises(AirflowException) as ex_message:
operator.execute(None)
assert "Serverless Application failed to start:" in str(ex_message.value)
mock_conn.create_application.assert_called_once_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_no_client_request_token(self, mock_conn, mock_waiter):
mock_waiter().wait.return_value = True
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
wait_for_completion=False,
config=config,
)
operator.execute(None)
generated_client_token = operator.client_request_token
assert str(UUID(generated_client_token, version=4)) == generated_client_token
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_application_in_failure_state(self, mock_conn, mock_get_waiter):
fail_state = "STOPPED"
error = WaiterError(
name="test_name",
reason="Waiter encountered a terminal failure state:",
last_response={"application": {"state": fail_state}},
)
mock_get_waiter().wait.side_effect = [error]
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
)
with pytest.raises(AirflowException) as ex_message:
operator.execute(None)
assert str(ex_message.value) == f"Serverless Application creation failed: {error}"
mock_conn.create_application.assert_called_once_with(
clientToken=client_request_token,
releaseLabel=release_label,
type=job_type,
**config,
)
@pytest.mark.parametrize(
("waiter_delay", "waiter_max_attempts", "expected"),
[
(NOTSET, NOTSET, [60, 25]),
(30, 10, [30, 10]),
],
)
def test_create_application_waiter_params(
self,
waiter_delay,
waiter_max_attempts,
expected,
):
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
)
assert operator.wait_for_completion is True
assert operator.waiter_delay == expected[0]
assert operator.waiter_max_attempts == expected[1]
@mock.patch.object(EmrServerlessHook, "conn")
def test_create_application_deferrable(self, mock_conn):
mock_conn.create_application.return_value = {
"applicationId": application_id,
"ResponseMetadata": {"HTTPStatusCode": 200},
}
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
deferrable=True,
)
with pytest.raises(TaskDeferred):
operator.execute(None)
def test_template_fields(self):
operator = EmrServerlessCreateApplicationOperator(
task_id=task_id,
release_label=release_label,
job_type=job_type,
client_request_token=client_request_token,
config=config,
waiter_max_attempts=3,
waiter_delay=0,
)
template_fields = list(operator.template_fields) + list(operator.template_fields_renderers.keys())
class_fields = operator.__dict__
missing_fields = [field for field in template_fields if field not in class_fields]
assert not missing_fields, f"Templated fields are not available {missing_fields}"
| TestEmrServerlessCreateApplicationOperator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/initsubclass2.py | {
"start": 228,
"end": 326
} | class ____(A, param_a=123):
pass
# This should generate two errors because param_a is missing.
| B |
python | kamyu104__LeetCode-Solutions | Python/implement-trie-prefix-tree.py | {
"start": 193,
"end": 1245
} | class ____(object):
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Inserts a word into the trie.
def insert(self, word):
cur = self.root
for c in word:
if not c in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.is_string = True
# @param {string} word
# @return {boolean}
# Returns if the word is in the trie.
def search(self, word):
node = self.childSearch(word)
if node:
return node.is_string
return False
# @param {string} prefix
# @return {boolean}
# Returns if there is any word in the trie
# that starts with the given prefix.
def startsWith(self, prefix):
return self.childSearch(prefix) is not None
def childSearch(self, word):
cur = self.root
for c in word:
if c in cur.leaves:
cur = cur.leaves[c]
else:
return None
return cur
| Trie |
python | getsentry__sentry-python | sentry_sdk/profiler/continuous_profiler.py | {
"start": 15731,
"end": 18276
} | class ____(ContinuousScheduler):
"""
This scheduler is based on the thread scheduler but adapted to work with
gevent. When using gevent, it may monkey patch the threading modules
(`threading` and `_thread`). This results in the use of greenlets instead
of native threads.
This is an issue because the sampler CANNOT run in a greenlet because
1. Other greenlets doing sync work will prevent the sampler from running
2. The greenlet runs in the same thread as other greenlets so when taking
a sample, other greenlets will have been evicted from the thread. This
results in a sample containing only the sampler's code.
"""
mode = "gevent" # type: ContinuousProfilerMode
def __init__(self, frequency, options, sdk_info, capture_func):
# type: (int, Dict[str, Any], SDKInfo, Callable[[Envelope], None]) -> None
if ThreadPool is None:
raise ValueError("Profiler mode: {} is not available".format(self.mode))
super().__init__(frequency, options, sdk_info, capture_func)
self.thread = None # type: Optional[_ThreadPool]
self.lock = threading.Lock()
def ensure_running(self):
# type: () -> None
self.soft_shutdown = False
pid = os.getpid()
# is running on the right process
if self.running and self.pid == pid:
return
with self.lock:
# another thread may have tried to acquire the lock
# at the same time so it may start another thread
# make sure to check again before proceeding
if self.running and self.pid == pid:
return
self.pid = pid
self.running = True
# if the profiler thread is changing,
# we should create a new buffer along with it
self.reset_buffer()
self.thread = ThreadPool(1) # type: ignore[misc]
try:
self.thread.spawn(self.run)
except RuntimeError:
# Unfortunately at this point the interpreter is in a state that no
# longer allows us to spawn a thread and we have to bail.
self.running = False
self.thread = None
def teardown(self):
# type: () -> None
if self.running:
self.running = False
if self.thread is not None:
self.thread.join()
self.thread = None
self.buffer = None
PROFILE_BUFFER_SECONDS = 60
| GeventContinuousScheduler |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 5929,
"end": 6258
} | class ____(StatementRole, ReturnsRowsRole):
__slots__ = ()
_role_name = "SELECT construct or equivalent text() construct"
def subquery(self) -> Subquery:
raise NotImplementedError(
"All SelectStatementRole objects should implement a "
".subquery() method."
)
| SelectStatementRole |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataplex.py | {
"start": 10041,
"end": 11217
} | class ____:
@mock.patch(HOOK_STR)
@mock.patch(LAKE_STR)
def test_execute(self, lake_mock, hook_mock):
op = DataplexCreateLakeOperator(
task_id="create_dataplex_lake",
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
body=BODY_LAKE,
validate_only=None,
api_version=API_VERSION,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.wait_for_operation.return_value = None
lake_mock.return_value.to_dict.return_value = None
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
api_version=API_VERSION,
impersonation_chain=IMPERSONATION_CHAIN,
)
hook_mock.return_value.create_lake.assert_called_once_with(
project_id=PROJECT_ID,
region=REGION,
lake_id=LAKE_ID,
body=BODY_LAKE,
validate_only=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestDataplexCreateLakeOperator |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-bedrock/tests/test_bedrock_async.py | {
"start": 341,
"end": 447
} | class ____:
async def read(self):
return json.dumps(EXP_RESPONSE).encode()
| AsyncMockStreamReader |
python | sphinx-doc__sphinx | sphinx/builders/dummy.py | {
"start": 324,
"end": 1007
} | class ____(Builder):
name = 'dummy'
epilog = __('The dummy builder generates no files.')
allow_parallel = True
def init(self) -> None:
pass
def get_outdated_docs(self) -> set[str]:
return self.env.found_docs
def get_target_uri(self, docname: str, typ: str | None = None) -> str:
return ''
def write_doc(self, docname: str, doctree: nodes.document) -> None:
pass
def finish(self) -> None:
pass
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_builder(DummyBuilder)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| DummyBuilder |
python | ansible__ansible | test/units/module_utils/common/test_dict_transformations.py | {
"start": 3544,
"end": 3948
} | class ____:
def test_dict_merge_invalid_dict(self):
""" if b is not a dict, return b """
res = dict_merge({}, None)
assert res is None
def test_merge_sub_dicts(self):
"""merge sub dicts """
a = {'a': {'a1': 1}}
b = {'a': {'b1': 2}}
c = {'a': {'a1': 1, 'b1': 2}}
res = dict_merge(a, b)
assert res == c
| TestCaseAzureIncidental |
python | numba__numba | numba/parfors/parfor.py | {
"start": 81710,
"end": 90878
} | class ____:
"""
Convert supported Numpy functions, as well as arrayexpr nodes, to
parfor nodes.
"""
def __init__(self, pass_states):
self.pass_states = pass_states
self.rewritten = []
def run(self, blocks):
pass_states = self.pass_states
topo_order = find_topo_order(blocks)
# variables available in the program so far (used for finding map
# functions in array_expr lowering)
avail_vars = []
for label in topo_order:
block = blocks[label]
new_body = []
equiv_set = pass_states.array_analysis.get_equiv_set(label)
for instr in block.body:
if isinstance(instr, ir.Assign):
expr = instr.value
lhs = instr.target
lhs_typ = self.pass_states.typemap[lhs.name]
if self._is_C_or_F_order(lhs_typ):
if guard(self._is_supported_npycall, expr):
new_instr = self._numpy_to_parfor(equiv_set, lhs, expr)
if new_instr is not None:
self.rewritten.append(dict(
old=instr,
new=new_instr,
reason='numpy_allocator',
))
instr = new_instr
elif isinstance(expr, ir.Expr) and expr.op == 'arrayexpr':
new_instr = self._arrayexpr_to_parfor(
equiv_set, lhs, expr, avail_vars)
self.rewritten.append(dict(
old=instr,
new=new_instr,
reason='arrayexpr',
))
instr = new_instr
avail_vars.append(lhs.name)
new_body.append(instr)
block.body = new_body
def _is_C_order(self, arr_name):
if isinstance(arr_name, types.npytypes.Array):
return arr_name.layout == 'C' and arr_name.ndim > 0
elif arr_name is str:
typ = self.pass_states.typemap[arr_name]
return (isinstance(typ, types.npytypes.Array) and
typ.layout == 'C' and
typ.ndim > 0)
else:
return False
def _is_C_or_F_order(self, arr_name):
if isinstance(arr_name, types.npytypes.Array):
return (arr_name.layout == 'C' or arr_name.layout == 'F') and arr_name.ndim > 0
elif arr_name is str:
typ = self.pass_states.typemap[arr_name]
return (isinstance(typ, types.npytypes.Array) and
(typ.layout == 'C' or typ.layout == 'F') and
typ.ndim > 0)
else:
return False
def _arrayexpr_to_parfor(self, equiv_set, lhs, arrayexpr, avail_vars):
"""generate parfor from arrayexpr node, which is essentially a
map with recursive tree.
"""
pass_states = self.pass_states
scope = lhs.scope
loc = lhs.loc
expr = arrayexpr.expr
arr_typ = pass_states.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
size_vars = equiv_set.get_shape(lhs)
index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc)
# generate init block and body
init_block = ir.Block(scope, loc)
init_block.body = mk_alloc(
pass_states.typingctx,
pass_states.typemap, pass_states.calltypes, lhs,
tuple(size_vars), el_typ, scope, loc,
pass_states.typemap[lhs.name])
body_label = next_label()
body_block = ir.Block(scope, loc)
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
pass_states.typemap[expr_out_var.name] = el_typ
index_var, index_var_typ = _make_index_var(
pass_states.typemap, scope, index_vars, body_block)
body_block.body.extend(
_arrayexpr_tree_to_ir(
pass_states.func_ir,
pass_states.typingctx,
pass_states.typemap,
pass_states.calltypes,
equiv_set,
init_block,
expr_out_var,
expr,
index_var,
index_vars,
avail_vars))
pat = ('array expression {}'.format(repr_arrayexpr(arrayexpr.expr)),)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set, pat[0], pass_states.flags)
setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc)
pass_states.calltypes[setitem_node] = signature(
types.none, pass_states.typemap[lhs.name], index_var_typ, el_typ)
body_block.body.append(setitem_node)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor from arrayexpr")
parfor.dump()
return parfor
def _is_supported_npycall(self, expr):
"""check if we support parfor translation for
this Numpy call.
"""
call_name, mod_name = find_callname(self.pass_states.func_ir, expr)
if not (isinstance(mod_name, str) and mod_name.startswith('numpy')):
return False
if call_name in ['zeros', 'ones']:
return True
if mod_name == 'numpy.random' and call_name in random_calls:
return True
# TODO: add more calls
return False
def _numpy_to_parfor(self, equiv_set, lhs, expr):
call_name, mod_name = find_callname(self.pass_states.func_ir, expr)
args = expr.args
kws = dict(expr.kws)
if call_name in ['zeros', 'ones'] or mod_name == 'numpy.random':
return self._numpy_map_to_parfor(equiv_set, call_name, lhs, args, kws, expr)
# return error if we couldn't handle it (avoid rewrite infinite loop)
raise errors.UnsupportedRewriteError(
f"parfor translation failed for {expr}", loc=expr.loc,
)
def _numpy_map_to_parfor(self, equiv_set, call_name, lhs, args, kws, expr):
"""generate parfor from Numpy calls that are maps.
"""
pass_states = self.pass_states
scope = lhs.scope
loc = lhs.loc
arr_typ = pass_states.typemap[lhs.name]
el_typ = arr_typ.dtype
# generate loopnests and size variables from lhs correlations
size_vars = equiv_set.get_shape(lhs)
if size_vars is None:
if config.DEBUG_ARRAY_OPT >= 1:
print("Could not convert numpy map to parfor, unknown size")
return None
index_vars, loopnests = _mk_parfor_loops(pass_states.typemap, size_vars, scope, loc)
# generate init block and body
init_block = ir.Block(scope, loc)
init_block.body = mk_alloc(
pass_states.typingctx,
pass_states.typemap, pass_states.calltypes, lhs,
tuple(size_vars), el_typ, scope, loc,
pass_states.typemap[lhs.name])
body_label = next_label()
body_block = ir.Block(scope, loc)
expr_out_var = ir.Var(scope, mk_unique_var("$expr_out_var"), loc)
pass_states.typemap[expr_out_var.name] = el_typ
index_var, index_var_typ = _make_index_var(
pass_states.typemap, scope, index_vars, body_block)
if call_name == 'zeros':
value = ir.Const(el_typ(0), loc)
elif call_name == 'ones':
value = ir.Const(el_typ(1), loc)
elif call_name in random_calls:
# remove size arg to reuse the call expr for single value
_remove_size_arg(call_name, expr)
# update expr type
new_arg_typs, new_kw_types = _get_call_arg_types(
expr, pass_states.typemap)
pass_states.calltypes.pop(expr)
pass_states.calltypes[expr] = pass_states.typemap[expr.func.name].get_call_type(
typing.Context(), new_arg_typs, new_kw_types)
value = expr
else:
raise NotImplementedError(
"Map of numpy.{} to parfor is not implemented".format(call_name))
value_assign = ir.Assign(value, expr_out_var, loc)
body_block.body.append(value_assign)
setitem_node = ir.SetItem(lhs, index_var, expr_out_var, loc)
pass_states.calltypes[setitem_node] = signature(
types.none, pass_states.typemap[lhs.name], index_var_typ, el_typ)
body_block.body.append(setitem_node)
parfor = Parfor(loopnests, init_block, {}, loc, index_var, equiv_set,
('{} function'.format(call_name,), 'NumPy mapping'),
pass_states.flags)
parfor.loop_body = {body_label: body_block}
if config.DEBUG_ARRAY_OPT >= 1:
print("generated parfor for numpy map:")
parfor.dump()
return parfor
| ConvertNumpyPass |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 4229,
"end": 5462
} | class ____(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(
KeyError,
HTTPServer,
application,
ssl_options={"keyfile": "/__missing__.crt"},
)
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, "test.crt")
existing_key = os.path.join(module_dir, "test.key")
self.assertRaises(
(ValueError, IOError),
HTTPServer,
application,
ssl_options={"certfile": "/__mising__.crt"},
)
self.assertRaises(
(ValueError, IOError),
HTTPServer,
application,
ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key",
},
)
# This actually works because both files exist
HTTPServer(
application,
ssl_options={"certfile": existing_certificate, "keyfile": existing_key},
)
| BadSSLOptionsTest |
python | ray-project__ray | python/ray/train/v2/_internal/callbacks/backend_setup.py | {
"start": 285,
"end": 1056
} | class ____(WorkerGroupCallback):
def __init__(self, backend_config: BackendConfig):
self._backend_config = backend_config
self._backend = backend_config.backend_cls()
def after_worker_group_start(self, worker_group: WorkerGroup):
self._backend.on_start(worker_group, self._backend_config)
self._backend.on_training_start(worker_group, self._backend_config)
def before_worker_group_shutdown(self, worker_group: WorkerGroup):
try:
self._backend.on_shutdown(worker_group, self._backend_config)
except RayActorError:
logger.warning(
"Graceful shutdown of backend failed. This is "
"expected if one of the workers has crashed."
)
| BackendSetupCallback |
python | kamyu104__LeetCode-Solutions | Python/number-of-subarrays-that-match-a-pattern-ii.py | {
"start": 35,
"end": 1071
} | class ____(object):
def countMatchingSubarrays(self, nums, pattern):
"""
:type nums: List[int]
:type pattern: List[int]
:rtype: int
"""
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j+1 > 0 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
def KMP(text, pattern):
prefix = getPrefix(pattern)
j = -1
for i, x in enumerate(text):
while j+1 > 0 and pattern[j+1] != x:
j = prefix[j]
if pattern[j+1] == x:
j += 1
if j+1 == len(pattern):
yield i-j
j = prefix[j]
return sum(1 for _ in KMP((cmp(nums[i+1], nums[i]) for i in xrange(len(nums)-1)), pattern))
| Solution |
python | walkccc__LeetCode | solutions/1073. Adding Two Negabinary Numbers/1073.py | {
"start": 0,
"end": 378
} | class ____:
def addNegabinary(self, arr1: list[int], arr2: list[int]) -> list[int]:
ans = []
carry = 0
while carry != 0 or arr1 or arr2:
if arr1:
carry += arr1.pop()
if arr2:
carry += arr2.pop()
ans.append(carry & 1)
carry = -(carry >> 1)
while len(ans) > 1 and ans[-1] == 0:
ans.pop()
return ans[::-1]
| Solution |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 7365,
"end": 9285
} | class ____:
async def test_scheduled_time_copied_from_scheduled_to_pending(
self,
session,
run_type,
initialize_orchestration,
):
initial_state_type = states.StateType.SCHEDULED
proposed_state_type = states.StateType.PENDING
intended_transition = (initial_state_type, proposed_state_type)
scheduled_time = now("UTC") - timedelta(minutes=5)
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
initial_details={"scheduled_time": scheduled_time},
)
async with CopyScheduledTime(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.validated_state_type == proposed_state_type
assert ctx.validated_state.state_details.scheduled_time == scheduled_time
@pytest.mark.parametrize(
"proposed_state_type",
[
states.StateType.COMPLETED,
states.StateType.FAILED,
states.StateType.CANCELLED,
states.StateType.CRASHED,
states.StateType.RUNNING,
],
)
async def test_scheduled_time_not_copied_for_other_transitions(
self,
session,
run_type,
initialize_orchestration,
proposed_state_type,
):
initial_state_type = states.StateType.SCHEDULED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
initial_details={"scheduled_time": now("UTC") + timedelta(minutes=5)},
)
scheduling_rule = CopyScheduledTime(ctx, *intended_transition)
async with scheduling_rule as ctx:
await ctx.validate_proposed_state()
assert await scheduling_rule.invalid()
| TestCopyScheduledTime |
python | huggingface__transformers | tests/models/codegen/test_modeling_codegen.py | {
"start": 17813,
"end": 20406
} | class ____(unittest.TestCase):
@cached_property
def cached_tokenizer(self):
return AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
@cached_property
def cached_model(self):
return CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
@slow
def test_lm_generate_codegen(self):
tokenizer = self.cached_tokenizer
for checkpointing in [True, False]:
model = self.cached_model
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(torch_device)
inputs = tokenizer("def hello_world():", return_tensors="pt").to(torch_device)
expected_output = 'def hello_world():\n print("Hello World")\n\nhello_world()\n\n'
output_ids = model.generate(**inputs, do_sample=False)
output_str = tokenizer.batch_decode(output_ids)[0]
self.assertEqual(output_str, expected_output)
@slow
def test_codegen_sample(self):
tokenizer = self.cached_tokenizer
model = self.cached_model
model.to(torch_device)
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
tokenized = tokenizer("def hello_world():", return_tensors="pt", return_token_type_ids=True)
input_ids = tokenized.input_ids.to(torch_device)
output_ids = model.generate(input_ids, do_sample=True)
output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True)
token_type_ids = tokenized.token_type_ids.to(torch_device)
output_seq = model.generate(input_ids=input_ids, do_sample=True, num_return_sequences=5)
output_seq_tt = model.generate(
input_ids=input_ids, token_type_ids=token_type_ids, do_sample=True, num_return_sequences=5
)
output_seq_strs = tokenizer.batch_decode(output_seq, skip_special_tokens=True)
output_seq_tt_strs = tokenizer.batch_decode(output_seq_tt, skip_special_tokens=True)
if torch_device == "cuda":
EXPECTED_OUTPUT_STR = 'def hello_world():\n print("Hello World")\n return True\n\nresult ='
else:
EXPECTED_OUTPUT_STR = "def hello_world():\r\n print('Hello, World.')\r\n\r\n\r"
self.assertEqual(output_str, EXPECTED_OUTPUT_STR)
self.assertTrue(
all(output_seq_strs[idx] != output_seq_tt_strs[idx] for idx in range(len(output_seq_tt_strs)))
) # token_type_ids should change output
| CodeGenModelLanguageGenerationTest |
python | zarr-developers__zarr-python | src/zarr/core/metadata/v3.py | {
"start": 5950,
"end": 6717
} | class ____(TypedDict):
"""
A typed dictionary model for zarr v3 metadata.
"""
zarr_format: Literal[3]
node_type: Literal["array"]
data_type: str | NamedConfig[str, Mapping[str, object]]
shape: tuple[int, ...]
chunk_grid: NamedConfig[str, Mapping[str, object]]
chunk_key_encoding: NamedConfig[str, Mapping[str, object]]
fill_value: object
codecs: tuple[str | NamedConfig[str, Mapping[str, object]], ...]
attributes: NotRequired[Mapping[str, JSON]]
storage_transformers: NotRequired[tuple[NamedConfig[str, Mapping[str, object]], ...]]
dimension_names: NotRequired[tuple[str | None]]
ARRAY_METADATA_KEYS = set(ArrayMetadataJSON_V3.__annotations__.keys())
@dataclass(frozen=True, kw_only=True)
| ArrayMetadataJSON_V3 |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/uninitialized_attributes.py | {
"start": 0,
"end": 58
} | class ____:
attr1: int #: docstring
attr2: str
| Base |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py | {
"start": 1268,
"end": 10303
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
@staticmethod
def optional_tests():
"""List of optional test names to run."""
return [
"operator_matmul_with_same_type",
"operator_solve_with_same_type",
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
diag = linear_operator_test_util.random_sign_uniform(
shape[:-1], minval=1., maxval=2., dtype=dtype)
if ensure_self_adjoint_and_pd:
# Abs on complex64 will result in a float32, so we cast back up.
diag = math_ops.cast(math_ops.abs(diag), dtype=dtype)
lin_op_diag = diag
if use_placeholder:
lin_op_diag = array_ops.placeholder_with_default(diag, shape=None)
operator = linalg.LinearOperatorDiag(
lin_op_diag,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
matrix = array_ops.matrix_diag(diag)
return operator, matrix
def test_assert_positive_definite_raises_for_zero_eigenvalue(self):
# Matrix with one positive eigenvalue and one zero eigenvalue.
with self.cached_session():
diag = [1.0, 0.0]
operator = linalg.LinearOperatorDiag(diag)
# is_self_adjoint should be auto-set for real diag.
self.assertTrue(operator.is_self_adjoint)
with self.assertRaisesOpError("non-positive.*not positive definite"):
operator.assert_positive_definite().run()
def test_assert_positive_definite_raises_for_negative_real_eigvalues(self):
with self.cached_session():
diag_x = [1.0, -2.0]
diag_y = [0., 0.] # Imaginary eigenvalues should not matter.
diag = math_ops.complex(diag_x, diag_y)
operator = linalg.LinearOperatorDiag(diag)
# is_self_adjoint should not be auto-set for complex diag.
self.assertTrue(operator.is_self_adjoint is None)
with self.assertRaisesOpError("non-positive real.*not positive definite"):
operator.assert_positive_definite().run()
def test_assert_positive_definite_does_not_raise_if_pd_and_complex(self):
with self.cached_session():
x = [1., 2.]
y = [1., 0.]
diag = math_ops.complex(x, y) # Re[diag] > 0.
# Should not fail
self.evaluate(linalg.LinearOperatorDiag(diag).assert_positive_definite())
def test_assert_non_singular_raises_if_zero_eigenvalue(self):
# Singular matrix with one positive eigenvalue and one zero eigenvalue.
with self.cached_session():
diag = [1.0, 0.0]
operator = linalg.LinearOperatorDiag(diag, is_self_adjoint=True)
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
def test_assert_non_singular_does_not_raise_for_complex_nonsingular(self):
with self.cached_session():
x = [1., 0.]
y = [0., 1.]
diag = math_ops.complex(x, y)
# Should not raise.
self.evaluate(linalg.LinearOperatorDiag(diag).assert_non_singular())
def test_assert_self_adjoint_raises_if_diag_has_complex_part(self):
with self.cached_session():
x = [1., 0.]
y = [0., 1.]
diag = math_ops.complex(x, y)
operator = linalg.LinearOperatorDiag(diag)
with self.assertRaisesOpError("imaginary.*not self-adjoint"):
operator.assert_self_adjoint().run()
def test_assert_self_adjoint_does_not_raise_for_diag_with_zero_imag(self):
with self.cached_session():
x = [1., 0.]
y = [0., 0.]
diag = math_ops.complex(x, y)
operator = linalg.LinearOperatorDiag(diag)
# Should not raise
self.evaluate(operator.assert_self_adjoint())
def test_scalar_diag_raises(self):
with self.assertRaisesRegex(ValueError, "must have at least 1 dimension"):
linalg.LinearOperatorDiag(1.)
def test_broadcast_matmul_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.matmul cannot handle.
# In particular, tf.matmul does not broadcast.
with self.cached_session() as sess:
x = random_ops.random_normal(shape=(2, 2, 3, 4))
# This LinearOperatorDiag will be broadcast to (2, 2, 3, 3) during solve
# and matmul with 'x' as the argument.
diag = random_ops.random_uniform(shape=(2, 1, 3))
operator = linalg.LinearOperatorDiag(diag, is_self_adjoint=True)
self.assertAllEqual((2, 1, 3, 3), operator.shape)
# Create a batch matrix with the broadcast shape of operator.
diag_broadcast = array_ops.concat((diag, diag), 1)
mat = array_ops.matrix_diag(diag_broadcast)
self.assertAllEqual((2, 2, 3, 3), mat.shape) # being pedantic.
operator_matmul = operator.matmul(x)
mat_matmul = math_ops.matmul(mat, x)
self.assertAllEqual(operator_matmul.shape, mat_matmul.shape)
self.assertAllClose(*self.evaluate([operator_matmul, mat_matmul]))
operator_solve = operator.solve(x)
mat_solve = linalg_ops.matrix_solve(mat, x)
self.assertAllEqual(operator_solve.shape, mat_solve.shape)
self.assertAllClose(*self.evaluate([operator_solve, mat_solve]))
def test_diag_matmul(self):
operator1 = linalg_lib.LinearOperatorDiag([2., 3.])
operator2 = linalg_lib.LinearOperatorDiag([1., 2.])
operator3 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3.)
operator_matmul = operator1.matmul(operator2)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2., 6.], self.evaluate(operator_matmul.diag))
operator_matmul = operator2.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2., 6.], self.evaluate(operator_matmul.diag))
operator_matmul = operator1.matmul(operator3)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([6., 9.], self.evaluate(operator_matmul.diag))
operator_matmul = operator3.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([6., 9.], self.evaluate(operator_matmul.diag))
def test_diag_solve(self):
operator1 = linalg_lib.LinearOperatorDiag([2., 3.], is_non_singular=True)
operator2 = linalg_lib.LinearOperatorDiag([1., 2.], is_non_singular=True)
operator3 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3., is_non_singular=True)
operator_solve = operator1.solve(operator2)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([0.5, 2 / 3.], self.evaluate(operator_solve.diag))
operator_solve = operator2.solve(operator1)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2., 3 / 2.], self.evaluate(operator_solve.diag))
operator_solve = operator1.solve(operator3)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([3 / 2., 1.], self.evaluate(operator_solve.diag))
operator_solve = operator3.solve(operator1)
self.assertTrue(isinstance(
operator_solve,
linalg_lib.LinearOperatorDiag))
self.assertAllClose([2 / 3., 1.], self.evaluate(operator_solve.diag))
def test_diag_adjoint_type(self):
diag = [1., 3., 5., 8.]
operator = linalg.LinearOperatorDiag(diag, is_non_singular=True)
self.assertIsInstance(operator.adjoint(), linalg.LinearOperatorDiag)
def test_diag_cholesky_type(self):
diag = [1., 3., 5., 8.]
operator = linalg.LinearOperatorDiag(
diag,
is_positive_definite=True,
is_self_adjoint=True,
)
self.assertIsInstance(operator.cholesky(), linalg.LinearOperatorDiag)
def test_diag_inverse_type(self):
diag = [1., 3., 5., 8.]
operator = linalg.LinearOperatorDiag(diag, is_non_singular=True)
self.assertIsInstance(operator.inverse(), linalg.LinearOperatorDiag)
def test_tape_safe(self):
diag = variables_module.Variable([[2.]])
operator = linalg.LinearOperatorDiag(diag)
self.check_tape_safe(operator)
def test_convert_variables_to_tensors(self):
diag = variables_module.Variable([[2.]])
operator = linalg.LinearOperatorDiag(diag)
with self.cached_session() as sess:
sess.run([diag.initializer])
self.check_convert_variables_to_tensors(operator)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorDiagTest)
test.main()
| LinearOperatorDiagTest |
python | ray-project__ray | release/nightly_tests/multimodal_inference_benchmarks/audio_transcription/ray_data_main.py | {
"start": 1635,
"end": 3316
} | class ____:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.dtype = torch.float16
self.model_id = TRANSCRIPTION_MODEL
self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
self.model_id,
torch_dtype=self.dtype,
low_cpu_mem_usage=True,
use_safetensors=True,
)
self.model.to(self.device)
def __call__(self, batch):
input_features = batch.pop("input_features")
spectrograms = np.array(input_features)
spectrograms = torch.tensor(spectrograms).to(self.device, dtype=self.dtype)
with torch.no_grad():
token_ids = self.model.generate(spectrograms)
batch["token_ids"] = token_ids.cpu().numpy()
return batch
def decoder(batch):
# NOTE: Remove the `token_ids` column since we don't need it anymore. This is done by
# the system automatically on Ray Data 2.51+ with the `with_column` API.
token_ids = batch.pop("token_ids")
transcription = processor.batch_decode(token_ids, skip_special_tokens=True)
batch["transcription"] = transcription
batch["transcription_length"] = np.array([len(t) for t in transcription])
return batch
start_time = time.time()
ds = ray.data.read_parquet(INPUT_PATH)
ds = ds.repartition(target_num_rows_per_block=BATCH_SIZE)
ds = ds.map(resample)
ds = ds.map_batches(whisper_preprocess, batch_size=BATCH_SIZE)
ds = ds.map_batches(
Transcriber,
batch_size=BATCH_SIZE,
concurrency=NUM_GPUS,
num_gpus=1,
)
ds = ds.map_batches(decoder)
ds.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| Transcriber |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 17798,
"end": 18455
} | class ____(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
child_attrs = []
calling_convention = ""
def declared_name(self):
return None
def analyse_templates(self):
# Only C++ functions have templates.
return None
| CDeclaratorNode |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 5509,
"end": 5848
} | class ____(Iterator[int]):
# Note: *Iterable*, not *Iterator*, returned!
def __iter__(self) -> Iterable[int]:
... # Y034 "__iter__" methods in classes like "BadIterator4" usually return "self" at runtime. Consider using "typing_extensions.Self" in "BadIterator4.__iter__", e.g. "def __iter__(self) -> Self: ..."
| BadIterator4 |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 218408,
"end": 225729
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices_padded, num_valid_padded = \
image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.disable_xla(
"b/141236442: "
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromThreeClustersV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold):
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
yp, nvp = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=True)
y, n = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
pad_to_max_output_size=False)
# The output shape of the padded operation must be fully defined.
self.assertEqual(yp.shape.is_fully_defined(), True)
self.assertEqual(y.shape.is_fully_defined(), False)
return yp, nvp, y, n
boxes_np = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
scores_np = [0.9, 0.75, 0.6, 0.95, 0.5, 0.3]
max_output_size_np = 5
iou_threshold_np = 0.5
selected_indices_padded, num_valid_padded, selected_indices, num_valid = \
func(boxes_np, scores_np, max_output_size_np, iou_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(self.evaluate(num_valid_padded), 3)
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(self.evaluate(num_valid), 3)
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV1(self):
with ops.Graph().as_default():
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
boxes = constant_op.constant(boxes_np)
scores = constant_op.constant(scores_np)
max_output_size = constant_op.constant(max_output_size_np)
iou_threshold = constant_op.constant(iou_threshold_np)
score_threshold = constant_op.constant(score_threshold_np)
selected_indices, num_valid = image_ops.non_max_suppression_padded(
boxes,
scores,
max_output_size,
iou_threshold,
score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@parameterized.named_parameters([("_RunEagerly", True), ("_RunGraph", False)])
@test_util.xla_allow_fallback(
"non_max_suppression with dynamic output shape unsupported.")
def testSelectFromContinuousOverLapV2(self, run_func_eagerly):
if not context.executing_eagerly() and run_func_eagerly:
# Skip running tf.function eagerly in V1 mode.
self.skipTest("Skip test that runs tf.function eagerly in V1 mode.")
else:
@def_function.function
def func(boxes, scores, max_output_size, iou_threshold, score_threshold):
boxes = constant_op.constant(boxes)
scores = constant_op.constant(scores)
max_output_size = constant_op.constant(max_output_size)
iou_threshold = constant_op.constant(iou_threshold)
score_threshold = constant_op.constant(score_threshold)
y, nv = image_ops.non_max_suppression_padded(
boxes, scores, max_output_size, iou_threshold, score_threshold)
# The output shape of the padded operation must be fully defined.
self.assertEqual(y.shape.is_fully_defined(), False)
return y, nv
boxes_np = [[0, 0, 1, 1], [0, 0.2, 1, 1.2], [0, 0.4, 1, 1.4],
[0, 0.6, 1, 1.6], [0, 0.8, 1, 1.8], [0, 2, 1, 2]]
scores_np = [0.9, 0.75, 0.6, 0.5, 0.4, 0.3]
max_output_size_np = 3
iou_threshold_np = 0.5
score_threshold_np = 0.1
selected_indices, num_valid = func(boxes_np, scores_np,
max_output_size_np, iou_threshold_np,
score_threshold_np)
with self.cached_session():
with test_util.run_functions_eagerly(run_func_eagerly):
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(self.evaluate(num_valid), 3)
def testInvalidDtype(self):
boxes_np = [[4.0, 6.0, 3.0, 6.0],
[2.0, 1.0, 5.0, 4.0],
[9.0, 0.0, 9.0, 9.0]]
scores = [5.0, 6.0, 5.0]
max_output_size = 2**31
with self.assertRaisesRegex(
(TypeError, ValueError), "type int64 that does not match type int32"):
boxes = constant_op.constant(boxes_np)
image_ops.non_max_suppression_padded(boxes, scores, max_output_size)
| NonMaxSuppressionPaddedTest |
python | jazzband__django-formtools | tests/wizard/test_forms.py | {
"start": 2561,
"end": 11406
} | class ____(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(testform['form_list'], {'start': Step1, 'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2, 'finish': Step3})
testform = TestWizardWithInitAttrs.get_initkwargs()
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, '0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start', 'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True}
)
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False}
)
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
testform = TestWizardWithInitAttrs.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)]
)
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
def test_form_condition_avoid_recursion(self):
def subsequent_step_check(wizard):
data = wizard.get_cleaned_data_for_step('step3') or {}
return data.get('foo')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step3': subsequent_step_check}
)
request = get_request()
old_limit = sys.getrecursionlimit()
sys.setrecursionlimit(80)
try:
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
except RecursionError:
self.fail("RecursionError happened during wizard test.")
finally:
sys.setrecursionlimit(old_limit)
def test_form_condition_unstable(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True}
)
response, instance = testform(request)
self.assertEqual(instance.get_step_index('step2'), 1)
self.assertEqual(instance.get_next_step('step2'), 'step3')
instance.condition_dict['step2'] = False
self.assertEqual(instance.get_step_index('step2'), None)
self.assertEqual(instance.get_next_step('step2'), 'start')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([
('start', Step1),
('kwargs_test', CustomKwargsStep1),
])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}}
)
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
testform = TestWizardWithInitAttrs.as_view(
[('start', Step1), ('step2', Step2)]
)
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = TestModel()
testform = TestWizard.as_view(
[('start', TestModelForm), ('step2', Step2)],
instance_dict={'start': the_instance}
)
response, instance = testform(request)
self.assertEqual(instance.get_form_instance('start'), the_instance)
self.assertIsNone(instance.get_form_instance('non_exist_instance'))
testform = TestWizardWithInitAttrs.as_view([('start', TestModelForm), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
TestWizardWithInitAttrs.instance_dict['start']
)
def test_formset_instance(self):
request = get_request()
the_instance1, created = TestModel.objects.get_or_create(name='test object 1')
the_instance2, created = TestModel.objects.get_or_create(name='test object 2')
testform = TestWizard.as_view(
[('start', TestModelFormSet), ('step2', Step2)],
instance_dict={'start': TestModel.objects.filter(name='test object 1')}
)
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_goto_step_kwargs(self):
"""Any extra kwarg given to render_goto_step is added to response context."""
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
_, instance = testform(request)
response = instance.render_goto_step('start', context_var='Foo')
self.assertIn('context_var', response.context_data.keys())
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
def test_form_list_type(self):
request = get_request({'test_wizard_with_type_check-current_step': 'start', 'start-name': 'data1'})
testform = TestWizardWithTypeCheck.as_view([('start', Step1)])
response, instance = testform(request)
self.assertEqual(response.status_code, 200)
def test_get_form_list_default(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1)])
response, instance = testform(request)
form_list = instance.get_form_list()
self.assertEqual(form_list, {'start': Step1})
with self.assertRaises(KeyError):
instance.get_form('step2')
def test_get_form_list_custom(self):
request = get_request()
testform = TestWizardWithCustomGetFormList.as_view([('start', Step1)])
response, instance = testform(request)
form_list = instance.get_form_list()
self.assertEqual(form_list, {'start': Step1, 'step2': Step2})
self.assertIsInstance(instance.get_form('step2'), Step2)
| FormTests |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 63990,
"end": 64153
} | class ____(_ConfigBase):
generative: Union[GenerativeSearches, str]
model: Dict[str, Any]
GenerativeConfig = _GenerativeConfig
@dataclass
| _GenerativeConfig |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 18941,
"end": 19072
} | class ____(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return False
| BasicObjectPerm |
python | getsentry__sentry | src/sentry/models/releases/release_project.py | {
"start": 471,
"end": 1464
} | class ____(BaseManager["ReleaseProject"]):
@staticmethod
def _on_post(project, trigger):
from sentry.dynamic_sampling import ProjectBoostedReleases
project_boosted_releases = ProjectBoostedReleases(project.id)
# We want to invalidate the project config only if dynamic sampling is enabled and there exists boosted releases
# in the project.
if (
features.has("organizations:dynamic-sampling", project.organization)
and project_boosted_releases.has_boosted_releases
):
schedule_invalidate_project_config(project_id=project.id, trigger=trigger)
def post_save(self, *, instance: ReleaseProject, created: bool, **kwargs: object) -> None:
self._on_post(project=instance.project, trigger="releaseproject.post_save")
def post_delete(self, instance, **kwargs):
self._on_post(project=instance.project, trigger="releaseproject.post_delete")
@region_silo_model
| ReleaseProjectModelManager |
python | jpadilla__pyjwt | tests/test_api_jwk.py | {
"start": 7421,
"end": 11352
} | class ____:
@crypto_required
def test_should_load_keys_from_jwk_data_dict(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("jwk_rsa_pub.json")) as keyfile:
pub_key = algo.from_jwk(keyfile.read())
key_data_str = algo.to_jwk(pub_key)
key_data = json.loads(key_data_str)
# TODO Should `to_jwk` set these?
key_data["alg"] = "RS256"
key_data["use"] = "sig"
key_data["kid"] = "keyid-abc123"
jwk_set = PyJWKSet.from_dict({"keys": [key_data]})
jwk = jwk_set.keys[0]
assert jwk.key_type == "RSA"
assert jwk.key_id == "keyid-abc123"
assert jwk.public_key_use == "sig"
@crypto_required
def test_should_load_keys_from_jwk_data_json_string(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("jwk_rsa_pub.json")) as keyfile:
pub_key = algo.from_jwk(keyfile.read())
key_data_str = algo.to_jwk(pub_key)
key_data = json.loads(key_data_str)
# TODO Should `to_jwk` set these?
key_data["alg"] = "RS256"
key_data["use"] = "sig"
key_data["kid"] = "keyid-abc123"
jwk_set = PyJWKSet.from_json(json.dumps({"keys": [key_data]}))
jwk = jwk_set.keys[0]
assert jwk.key_type == "RSA"
assert jwk.key_id == "keyid-abc123"
assert jwk.public_key_use == "sig"
@crypto_required
def test_keyset_should_index_by_kid(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("jwk_rsa_pub.json")) as keyfile:
pub_key = algo.from_jwk(keyfile.read())
key_data_str = algo.to_jwk(pub_key)
key_data = json.loads(key_data_str)
# TODO Should `to_jwk` set these?
key_data["alg"] = "RS256"
key_data["use"] = "sig"
key_data["kid"] = "keyid-abc123"
jwk_set = PyJWKSet.from_dict({"keys": [key_data]})
jwk = jwk_set.keys[0]
assert jwk == jwk_set["keyid-abc123"]
with pytest.raises(KeyError):
_ = jwk_set["this-kid-does-not-exist"]
@crypto_required
def test_keyset_iterator(self):
algo = RSAAlgorithm(RSAAlgorithm.SHA256)
with open(key_path("jwk_rsa_pub.json")) as keyfile:
pub_key = algo.from_jwk(keyfile.read())
key_data_str = algo.to_jwk(pub_key)
key_data = json.loads(key_data_str)
jwk_set = PyJWKSet.from_dict({"keys": [key_data]})
assert jwk_set.keys == [jwk for jwk in jwk_set]
@crypto_required
def test_keyset_with_unknown_alg(self):
# first keyset with unusable key and usable key
with open(key_path("jwk_keyset_with_unknown_alg.json")) as keyfile:
jwks_text = keyfile.read()
jwks = json.loads(jwks_text)
assert len(jwks.get("keys")) == 2
keyset = PyJWKSet.from_json(jwks_text)
assert len(keyset.keys) == 1
# second keyset with only unusable key -> catch exception
with open(key_path("jwk_keyset_only_unknown_alg.json")) as keyfile:
jwks_text = keyfile.read()
jwks = json.loads(jwks_text)
assert len(jwks.get("keys")) == 1
with pytest.raises(PyJWKSetError):
_ = PyJWKSet.from_json(jwks_text)
@crypto_required
def test_invalid_keys_list(self):
with pytest.raises(PyJWKSetError) as err:
PyJWKSet(keys="string") # type: ignore
assert str(err.value) == "Invalid JWK Set value"
@crypto_required
def test_empty_keys_list(self):
with pytest.raises(PyJWKSetError) as err:
PyJWKSet(keys=[])
assert str(err.value) == "The JWK Set did not contain any keys"
@no_crypto_required
def test_missing_crypto_library_raises_when_required(self):
with pytest.raises(MissingCryptographyError):
PyJWKSet(keys=[{"kty": "RSA"}])
| TestPyJWKSet |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 17661,
"end": 18028
} | class ____(TypedDict, total=False):
"""A single row from search results.
Only includes fields that were actually returned in the search.
The 'id' field is always present.
"""
id: str # Always present
document: Optional[str]
embedding: Optional[List[float]]
metadata: Optional[Dict[str, Any]]
score: Optional[float]
| SearchResultRow |
python | astropy__astropy | astropy/io/ascii/latex.py | {
"start": 15923,
"end": 17353
} | class ____(LatexHeader):
r"""In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
"""
header_start = r"\tablehead"
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r"\tablehead")
def write(self, lines):
if "col_align" not in self.latex:
self.latex["col_align"] = len(self.cols) * "c"
if "tablealign" in self.latex:
align = "[" + self.latex["tablealign"] + "]"
else:
align = ""
lines.append(
r"\begin{"
+ self.latex["tabletype"]
+ r"}{"
+ self.latex["col_align"]
+ r"}"
+ align
)
add_dictval_to_list(self.latex, "preamble", lines)
if "caption" in self.latex:
lines.append(r"\tablecaption{" + self.latex["caption"] + "}")
tablehead = " & ".join([r"\colhead{" + name + "}" for name in self.colnames])
units = self._get_units()
if "units" in self.latex:
units.update(self.latex["units"])
if units:
tablehead += r"\\ " + self.splitter.join(
[units.get(name, " ") for name in self.colnames]
)
lines.append(r"\tablehead{" + tablehead + "}")
| AASTexHeader |
python | doocs__leetcode | solution/0600-0699/0605.Can Place Flowers/Solution.py | {
"start": 0,
"end": 303
} | class ____:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
flowerbed = [0] + flowerbed + [0]
for i in range(1, len(flowerbed) - 1):
if sum(flowerbed[i - 1 : i + 2]) == 0:
flowerbed[i] = 1
n -= 1
return n <= 0
| Solution |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 132707,
"end": 133873
} | class ____:
def test_plurals(self):
assert self.locale._format_timeframe("now", 0) == "nu"
assert self.locale._format_timeframe("second", 1) == "een seconde"
assert self.locale._format_timeframe("seconds", 30) == "30 seconden"
assert self.locale._format_timeframe("minute", 1) == "een minuut"
assert self.locale._format_timeframe("minutes", 40) == "40 minuten"
assert self.locale._format_timeframe("hour", 1) == "een uur"
assert self.locale._format_timeframe("hours", 23) == "23 uur"
assert self.locale._format_timeframe("day", 1) == "een dag"
assert self.locale._format_timeframe("days", 12) == "12 dagen"
assert self.locale._format_timeframe("week", 1) == "een week"
assert self.locale._format_timeframe("weeks", 38) == "38 weken"
assert self.locale._format_timeframe("month", 1) == "een maand"
assert self.locale._format_timeframe("months", 11) == "11 maanden"
assert self.locale._format_timeframe("year", 1) == "een jaar"
assert self.locale._format_timeframe("years", 12) == "12 jaar"
@pytest.mark.usefixtures("lang_locale")
| TestDutchLocale |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 226103,
"end": 227563
} | class ____(TestCase):
class Foo:
def __init__(self, value):
self.value = value
self.iface = {"typestr": "f8"}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
@parametrize(
"val, iface, expected",
[
(f, {}, 0.5),
([f], {}, [0.5]),
([f, f], {}, [0.5, 0.5]),
(f, {"shape": ()}, 0.5),
(f, {"shape": None}, TypeError),
(f, {"shape": (1, 1)}, [[0.5]]),
(f, {"shape": (2,)}, ValueError),
(f, {"strides": ()}, 0.5),
(f, {"strides": (2,)}, ValueError),
(f, {"strides": 16}, TypeError),
],
)
def test_scalar_interface(self, val, iface, expected):
# Test scalar coercion within the array interface
self.f.iface = {"typestr": "f8"}
self.f.iface.update(iface)
if HAS_REFCOUNT:
pre_cnt = sys.getrefcount(np.dtype("f8"))
if isinstance(expected, type):
assert_raises(expected, np.array, val)
else:
result = np.array(val)
assert_equal(np.array(val), expected)
assert result.dtype == "f8"
del result
if HAS_REFCOUNT:
post_cnt = sys.getrefcount(np.dtype("f8"))
assert_equal(pre_cnt, post_cnt)
| TestArrayInterface |
python | Netflix__metaflow | metaflow/plugins/kubernetes/kube_utils.py | {
"start": 246,
"end": 4140
} | class ____(MetaflowException):
headline = "Kubernetes error"
def parse_cli_options(flow_name, run_id, user, my_runs, echo):
if user and my_runs:
raise CommandException("--user and --my-runs are mutually exclusive.")
if run_id and my_runs:
raise CommandException("--run_id and --my-runs are mutually exclusive.")
if my_runs:
user = get_username()
latest_run = True
if user and not run_id:
latest_run = False
if not run_id and latest_run:
run_id = get_latest_run_id(echo, flow_name)
if run_id is None:
raise CommandException("A previous run id was not found. Specify --run-id.")
return flow_name, run_id, user
def qos_requests_and_limits(qos: str, cpu: int, memory: int, storage: int):
"return resource requests and limits for the kubernetes pod based on the given QoS Class"
# case insensitive matching for QoS class
qos = qos.lower()
# Determine the requests and limits to define chosen QoS class
qos_limits = {}
qos_requests = {}
if qos == "guaranteed":
# Guaranteed - has both cpu/memory limits. requests not required, as these will be inferred.
qos_limits = {
"cpu": str(cpu),
"memory": "%sM" % str(memory),
"ephemeral-storage": "%sM" % str(storage),
}
# NOTE: Even though Kubernetes will produce matching requests for the specified limits, this happens late in the lifecycle.
# We specify them explicitly here to make some K8S tooling happy, in case they rely on .resources.requests being present at time of submitting the job.
qos_requests = qos_limits
else:
# Burstable - not Guaranteed, and has a memory/cpu limit or request
qos_requests = {
"cpu": str(cpu),
"memory": "%sM" % str(memory),
"ephemeral-storage": "%sM" % str(storage),
}
# TODO: Add support for BestEffort once there is a use case for it.
# BestEffort - no limit or requests for cpu/memory
return qos_requests, qos_limits
def validate_kube_labels(
labels: Optional[Dict[str, Optional[str]]],
) -> bool:
"""Validate label values.
This validates the kubernetes label values. It does not validate the keys.
Ideally, keys should be static and also the validation rules for keys are
more complex than those for values. For full validation rules, see:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set
"""
def validate_label(s: Optional[str]):
regex_match = r"^(([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9])?$"
if not s:
# allow empty label
return True
if not re.search(regex_match, s):
raise KubernetesException(
'Invalid value: "%s"\n'
"A valid label must be an empty string or one that\n"
" - Consist of alphanumeric, '-', '_' or '.' characters\n"
" - Begins and ends with an alphanumeric character\n"
" - Is at most 63 characters" % s
)
return True
return all([validate_label(v) for v in labels.values()]) if labels else True
def parse_kube_keyvalue_list(items: List[str], requires_both: bool = True):
try:
ret = {}
for item_str in items:
item = item_str.split("=", 1)
if requires_both:
item[1] # raise IndexError
if str(item[0]) in ret:
raise KubernetesException("Duplicate key found: %s" % str(item[0]))
ret[str(item[0])] = str(item[1]) if len(item) > 1 else None
return ret
except KubernetesException as e:
raise e
except (AttributeError, IndexError):
raise KubernetesException("Unable to parse kubernetes list: %s" % items)
| KubernetesException |
python | tensorflow__tensorflow | tensorflow/python/ops/gradients_test.py | {
"start": 26996,
"end": 27335
} | class ____(test_util.TensorFlowTestCase):
def testPreventGradient(self):
with ops.Graph().as_default():
inp = constant(1.0, shape=[100, 32], name="in")
out = array_ops.prevent_gradient(inp)
with self.assertRaisesRegex(LookupError, "explicitly disabled"):
_ = gradients.gradients(out, inp)
| PreventGradientTest |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py | {
"start": 7380,
"end": 8395
} | class ____(KeyValueParser):
"""Composite argument parser for network remote key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
provider=ChoicesParser(REMOTE_PROVIDERS),
arch=ChoicesParser(REMOTE_ARCHITECTURES),
collection=AnyParser(),
connection=AnyParser(),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
section_name = 'remote options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
' collection={collection}',
' connection={connection}',
])
return f'{{{section_name}}}'
| NetworkRemoteKeyValueParser |
python | keras-team__keras | keras/src/ops/math.py | {
"start": 4901,
"end": 6379
} | class ____(Operation):
def __init__(self, k, sorted=True, *, name=None):
super().__init__(name=name)
self.k = k
self.sorted = sorted
def compute_output_spec(self, x):
output_shape = list(x.shape)
output_shape[-1] = self.k
# Return a tuple (values, indices).
return (
KerasTensor(shape=output_shape, dtype=x.dtype),
KerasTensor(shape=output_shape, dtype="int32"),
)
def call(self, x):
return backend.math.top_k(x, self.k, self.sorted)
@keras_export("keras.ops.top_k")
def top_k(x, k, sorted=True):
"""Finds the top-k values and their indices in a tensor.
Args:
x: Input tensor.
k: An integer representing the number of top elements to retrieve.
sorted: A boolean indicating whether to sort the output in
descending order. Defaults to `True`.
Returns:
A tuple containing two tensors. The first tensor contains the
top-k values, and the second tensor contains the indices of the
top-k values in the input tensor.
Example:
>>> x = keras.ops.convert_to_tensor([5, 2, 7, 1, 9, 3])
>>> values, indices = top_k(x, k=3)
>>> print(values)
array([9 7 5], shape=(3,), dtype=int32)
>>> print(indices)
array([4 2 0], shape=(3,), dtype=int32)
"""
if any_symbolic_tensors((x,)):
return TopK(k, sorted).symbolic_call(x)
return backend.math.top_k(x, k, sorted)
| TopK |
python | coleifer__peewee | tests/sqliteq.py | {
"start": 577,
"end": 698
} | class ____(TestModel):
name = TextField(unique=True)
class Meta:
table_name = 'threaded_db_test_user'
| User |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 18025,
"end": 18229
} | class ____(HTTPException):
"""*423* `Locked`
Used if the resource that is being accessed is locked.
"""
code = 423
description = "The resource that is being accessed is locked."
| Locked |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_return_annotation_extends.py | {
"start": 279,
"end": 316
} | class ____(Test1_C1):
pass
| Test1_C2 |
python | astropy__astropy | astropy/table/column.py | {
"start": 2785,
"end": 12670
} | class ____(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError(
f"Cannot set any element of {type(self).__name__} class to True"
)
def _expand_string_array_for_values(arr, values):
"""
For string-dtype return a version of ``arr`` that is wide enough for ``values``.
If ``arr`` is not string-dtype or does not need expansion then return ``arr``.
Parameters
----------
arr : np.ndarray
Input array
values : scalar or array-like
Values for width comparison for string arrays
Returns
-------
arr_expanded : np.ndarray
"""
if arr.dtype.kind in ("U", "S") and values is not np.ma.masked:
# Starting with numpy 2.0, np.char.str_len() propagates the mask for
# masked data. We want masked values to be preserved so unmask
# `values` prior to counting string lengths.
values = np.asarray(values)
# Find the length of the longest string in the new values.
values_str_len = np.char.str_len(values).max()
# Determine character repeat count of arr.dtype. Returns a positive
# int or None (something like 'U0' is not possible in numpy). If new values
# are longer than current then make a new (wider) version of arr.
arr_str_len = dtype_bytes_or_chars(arr.dtype)
if arr_str_len and values_str_len > arr_str_len:
arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len)
arr = arr.astype(arr_dtype)
return arr
def _convert_sequence_data_to_array(data, dtype=None):
"""Convert N-d sequence-like data to ndarray or MaskedArray.
This is the core function for converting Python lists or list of lists to a
numpy array. This handles embedded np.ma.masked constants in ``data`` along
with the special case of an homogeneous list of MaskedArray elements.
Considerations:
- np.ma.array is about 50 times slower than np.array for list input. This
function avoids using np.ma.array on list input.
- np.array emits a UserWarning for embedded np.ma.masked, but only for int
or float inputs. For those it converts to np.nan and forces float dtype.
For other types np.array is inconsistent, for instance converting
np.ma.masked to "0.0" for str types.
- Searching in pure Python for np.ma.masked in ``data`` is comparable in
speed to calling ``np.array(data)``.
- This function may end up making two additional copies of input ``data``.
Parameters
----------
data : N-d sequence
Input data, typically list or list of lists
dtype : None or dtype-like
Output datatype (None lets np.array choose)
Returns
-------
np_data : np.ndarray or np.ma.MaskedArray
"""
np_ma_masked = np.ma.masked # Avoid repeated lookups of this object
has_len_gt0 = hasattr(data, "__len__") and len(data) > 0
# Special case of an homogeneous list of MaskedArray elements (see #8977).
# np.ma.masked is an instance of MaskedArray, so exclude those values.
if has_len_gt0 and all(
isinstance(val, np.ma.MaskedArray) and val is not np_ma_masked for val in data
):
np_data = np.ma.array(data, dtype=dtype)
return np_data
# First convert data to a plain ndarray. If there are instances of np.ma.masked
# in the data this will issue a warning for int and float.
with warnings.catch_warnings(record=True) as warns:
# Ensure this warning from numpy is always enabled and that it is not
# converted to an error (which can happen during pytest).
warnings.filterwarnings(
"always", category=UserWarning, message=".*converting a masked element.*"
)
# FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291
# and https://github.com/numpy/numpy/issues/18425.
warnings.filterwarnings(
"always",
category=FutureWarning,
message=".*Promotion of numbers and bools to strings.*",
)
has_unit = has_len_gt0 and any(hasattr(v, "unit") for v in data)
try:
cls = Quantity if has_unit else np.array
np_data = cls(data, dtype=dtype)
except np.ma.MaskError:
# Catches case of dtype=int with masked values, instead let it
# convert to float
np_data = np.array(data)
except Exception:
dtype = object
np_data = np.array(data, dtype=dtype)
if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0):
# Implies input was a scalar or an empty list (e.g. initializing an
# empty table with pre-declared names and dtypes but no data). Here we
# need to fall through to initializing with the original data=[].
return data
# If there were no warnings and the data are int or float, then we are done.
# Other dtypes like string or complex can have masked values and the
# np.array() conversion gives the wrong answer (e.g. converting np.ma.masked
# to the string "0.0").
if len(warns) == 0 and np_data.dtype.kind in ("i", "f"):
return np_data
# Now we need to determine if there is an np.ma.masked anywhere in input data.
# Make a statement like below to look for np.ma.masked in a nested sequence.
# Because np.array(data) succeeded we know that `data` has a regular N-d
# structure. Find ma_masked:
# any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data)
# Using this eval avoids creating a copy of `data` in the more-usual case of
# no masked elements.
any_statement = "d0 is ma_masked"
for ii in reversed(range(np_data.ndim)):
if ii == 0:
any_statement = f"any({any_statement} for d0 in data)"
elif ii == np_data.ndim - 1:
any_statement = f"any(d{ii} is ma_masked for d{ii} in d{ii - 1})"
else:
any_statement = f"any({any_statement} for d{ii} in d{ii - 1})"
context = {"ma_masked": np.ma.masked, "data": data}
has_masked = eval(any_statement, context)
# If there are any masks then explicitly change each one to a fill value and
# set a mask boolean array. If not has_masked then we're done.
if has_masked:
mask = np.zeros(np_data.shape, dtype=bool)
data_filled = np.array(data, dtype=object)
# Make type-appropriate fill value based on initial conversion.
if np_data.dtype.kind == "U":
fill = ""
elif np_data.dtype.kind == "S":
fill = b""
else:
# Zero works for every numeric type.
fill = 0
ranges = [range(dim) for dim in np_data.shape]
for idxs in itertools.product(*ranges):
val = data_filled[idxs]
if val is np_ma_masked:
data_filled[idxs] = fill
mask[idxs] = True
elif isinstance(val, bool) and dtype is None:
# If we see a bool and dtype not specified then assume bool for
# the entire array. Not perfect but in most practical cases OK.
# Unfortunately numpy types [False, 0] as int, not bool (and
# [False, np.ma.masked] => array([0.0, np.nan])).
dtype = bool
# If no dtype is provided then need to convert back to list so np.array
# does type autodetection.
if dtype is None:
data_filled = data_filled.tolist()
# Use np.array first to convert `data` to ndarray (fast) and then make
# masked array from an ndarray with mask (fast) instead of from `data`.
np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask)
return np_data
def _make_compare(oper):
"""
Make Column comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
Parameters
----------
oper : str
Operator name
"""
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# If other is a Quantity, we should let it do the work, since
# it can deal with our possible unit (which, for MaskedColumn,
# would get dropped below, as '.data' is accessed in super()).
if isinstance(other, Quantity):
return NotImplemented
# If we are unicode and other is a column with bytes, defer to it for
# doing the unicode sandwich. This avoids problems like those
# discussed in #6838 and #6899.
if (
self.dtype.kind == "U"
and isinstance(other, Column)
and other.dtype.kind == "S"
):
return NotImplemented
# If we are bytes, encode other as needed.
if self.dtype.char == "S":
other = self._encode_str(other)
# Now just let the regular ndarray.__eq__, etc., take over.
result = getattr(super(Column, self), op)(other)
# But we should not return Column instances for this case.
return result.data if isinstance(result, Column) else result
return _compare
| FalseArray |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 911083,
"end": 912052
} | class ____(sgqlc.types.relay.Connection):
"""A list of reactions that have been left on the subject."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count", "viewer_has_reacted")
edges = sgqlc.types.Field(sgqlc.types.list_of("ReactionEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Reaction"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
viewer_has_reacted = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerHasReacted")
"""Whether or not the authenticated user has left a reaction on the
subject.
"""
| ReactionConnection |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 296293,
"end": 297857
} | class ____(TypedDict, total=False):
"""
:class:`altair.VariableParameter` ``TypedDict`` wrapper.
Parameters
----------
name
A unique name for the variable parameter. Parameter names should be valid JavaScript
identifiers: they should contain only alphanumeric characters (or "$", or "_") and
may not start with a digit. Reserved keywords that may not be used as parameter
names are "datum", "event", "item", and "parent".
bind
Binds the parameter to an external input element such as a slider, selection list or
radio button group.
expr
An expression for the value of the parameter. This expression may include other
parameters, in which case the parameter will automatically update in response to
upstream parameter changes.
react
A boolean flag (default ``true``) indicating if the update expression should be
automatically re-evaluated when any upstream signal dependencies update. If
``false``, the update expression will not register any dependencies on other
signals, even for initialization.
**Default value:** ``true``
value
The `initial value <http://vega.github.io/vega-lite/docs/value.html>`__ of the
parameter.
**Default value:** ``undefined``
"""
name: str
bind: (
BindInputKwds
| BindRangeKwds
| BindDirectKwds
| BindCheckboxKwds
| BindRadioSelectKwds
)
expr: str
react: bool
value: Any
| VariableParameterKwds |
python | pypa__hatch | tests/backend/builders/test_wheel.py | {
"start": 28520,
"end": 141078
} | class ____:
def test_default_auto_detection(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_license_single", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
def test_default_reproducible_timestamp(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd(env_vars={"SOURCE_DATE_EPOCH": "1580601700"}):
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_license_single", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 1, 40)
def test_default_no_reproducible(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "reproducible": False}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd(env_vars={"SOURCE_DATE_EPOCH": "1580601700"}):
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_license_single", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
def test_default_multiple_licenses(self, hatch, helpers, config_file, temp_dir):
project_name = "My.App"
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.model.template.licenses.default = ["MIT", "Apache-2.0"]
config_file.save()
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
# Ensure that we trigger the non-file case for code coverage
(project_path / "LICENSES" / "test").mkdir()
config = {
"project": {"name": project_name, "dynamic": ["version"], "license-files": ["LICENSES/*"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_license_multiple", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_include(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "include": ["my_app", "tests"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_tests", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_only_packages(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
tests_path = project_path / "tests"
(tests_path / "__init__.py").replace(tests_path / "foo.py")
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"wheel": {"versions": ["standard"], "include": ["my_app", "tests"], "only-packages": True}
},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_license_single", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_only_packages_artifact_override(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
tests_path = project_path / "tests"
(tests_path / "__init__.py").replace(tests_path / "foo.py")
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"artifacts": ["foo.py"],
"targets": {
"wheel": {"versions": ["standard"], "include": ["my_app", "tests"], "only-packages": True}
},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_only_packages_artifact_override", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
@pytest.mark.parametrize(
("python_constraint", "expected_template_file"),
[
pytest.param(">3", "wheel.standard_default_python_constraint", id=">3"),
pytest.param("==3.11.4", "wheel.standard_default_python_constraint_three_components", id="==3.11.4"),
],
)
def test_default_python_constraint(
self, hatch, helpers, temp_dir, config_file, python_constraint, expected_template_file
):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "requires-python": python_constraint, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-py3-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
expected_template_file, project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_default_tag(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
pass
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
tag = "py3-none-any"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script", project_name, metadata_directory=metadata_directory, tag=tag
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_set_tag(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['tag'] = 'foo-bar-baz'
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
tag = "foo-bar-baz"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script", project_name, metadata_directory=metadata_directory, tag=tag
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_known_artifacts(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_artifacts",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_configured_build_hooks(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
pathlib.Path('my_app', 'lib.so').write_text(','.join(build_data['build_hooks']))
pathlib.Path('my_app', 'lib.h').write_text(','.join(build_data['build_hooks']))
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_configured_build_hooks",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_extra_dependencies(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
build_data['dependencies'].append('binary')
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_extra_dependencies",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_dynamic_artifacts(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
build_data['artifacts'] = ['my_app/lib.so']
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_artifacts",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_dynamic_force_include(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
build_data['artifacts'].extend(('lib.so', 'lib.h'))
build_data['force_include']['../artifacts'] = 'my_app'
artifact_path = pathlib.Path('..', 'artifacts')
artifact_path.mkdir()
(artifact_path / 'lib.so').touch()
(artifact_path / 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_force_include",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_dynamic_force_include_duplicate(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
target_file = project_path / "my_app" / "z.py"
target_file.write_text('print("hello world")')
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
build_data['force_include']['../tmp/new_z.py'] = 'my_app/z.py'
tmp_path = pathlib.Path('..', 'tmp')
tmp_path.mkdir()
(tmp_path / 'new_z.py').write_bytes(pathlib.Path('my_app/z.py').read_bytes())
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_force_include_no_duplication",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_build_script_dynamic_artifacts_with_src_layout(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.pyd\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
build_data['artifacts'] = ['src/my_app/lib.so']
build_data['force_include']['src/zlib.pyd'] = 'src/zlib.pyd'
pathlib.Path('src', 'my_app', 'lib.so').touch()
pathlib.Path('src', 'lib.h').touch()
pathlib.Path('src', 'zlib.pyd').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_artifacts_with_src_layout",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_shared_data(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
shared_data_path = temp_dir / "data"
shared_data_path.ensure_dir_exists()
(shared_data_path / "foo.txt").touch()
nested_data_path = shared_data_path / "nested"
nested_data_path.ensure_dir_exists()
(nested_data_path / "bar.txt").touch()
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "shared-data": {"../data": "/"}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
shared_data_directory = f"{builder.project_id}.data"
expected_files = helpers.get_template_files(
"wheel.standard_default_shared_data",
project_name,
metadata_directory=metadata_directory,
shared_data_directory=shared_data_directory,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_shared_data_from_build_data(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
shared_data_path = temp_dir / "data"
shared_data_path.ensure_dir_exists()
(shared_data_path / "foo.txt").touch()
nested_data_path = shared_data_path / "nested"
nested_data_path.ensure_dir_exists()
(nested_data_path / "bar.txt").touch()
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['shared_data']['../data'] = '/'
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "hooks": {"custom": {}}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
shared_data_directory = f"{builder.project_id}.data"
expected_files = helpers.get_template_files(
"wheel.standard_default_shared_data",
project_name,
metadata_directory=metadata_directory,
shared_data_directory=shared_data_directory,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_shared_scripts(self, hatch, platform, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
shared_data_path = temp_dir / "data"
shared_data_path.ensure_dir_exists()
binary_contents = os.urandom(1024)
binary_file = shared_data_path / "binary"
binary_file.write_bytes(binary_contents)
if not platform.windows:
expected_mode = 0o755
binary_file.chmod(expected_mode)
(shared_data_path / "other_script.sh").write_text(
helpers.dedent(
"""
#!/bin/sh arg1 arg2
echo "Hello, World!"
"""
)
)
(shared_data_path / "python_script.sh").write_text(
helpers.dedent(
"""
#!/usr/bin/env python3.11 arg1 arg2
print("Hello, World!")
"""
)
)
(shared_data_path / "pythonw_script.sh").write_text(
helpers.dedent(
"""
#!/usr/bin/pythonw3.11 arg1 arg2
print("Hello, World!")
"""
)
)
(shared_data_path / "pypy_script.sh").write_text(
helpers.dedent(
"""
#!/usr/bin/env pypy
print("Hello, World!")
"""
)
)
(shared_data_path / "pypyw_script.sh").write_text(
helpers.dedent(
"""
#!pypyw3.11 arg1 arg2
print("Hello, World!")
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "shared-scripts": {"../data": "/"}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extract_zip(expected_artifact, extraction_directory)
metadata_directory = f"{builder.project_id}.dist-info"
shared_data_directory = f"{builder.project_id}.data"
expected_files = helpers.get_template_files(
"wheel.standard_default_shared_scripts",
project_name,
metadata_directory=metadata_directory,
shared_data_directory=shared_data_directory,
binary_contents=binary_contents,
)
helpers.assert_files(extraction_directory, expected_files)
if not platform.windows:
extracted_binary = extraction_directory / shared_data_directory / "scripts" / "binary"
assert extracted_binary.stat().st_mode & 0o777 == expected_mode
def test_default_shared_scripts_from_build_data(self, hatch, platform, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
shared_data_path = temp_dir / "data"
shared_data_path.ensure_dir_exists()
binary_contents = os.urandom(1024)
binary_file = shared_data_path / "binary"
binary_file.write_bytes(binary_contents)
if not platform.windows:
expected_mode = 0o755
binary_file.chmod(expected_mode)
(shared_data_path / "other_script.sh").write_text(
helpers.dedent(
"""
#!/bin/sh arg1 arg2
echo "Hello, World!"
"""
)
)
(shared_data_path / "python_script.sh").write_text(
helpers.dedent(
"""
#!/usr/bin/env python3.11 arg1 arg2
print("Hello, World!")
"""
)
)
(shared_data_path / "pythonw_script.sh").write_text(
helpers.dedent(
"""
#!/usr/bin/pythonw3.11 arg1 arg2
print("Hello, World!")
"""
)
)
(shared_data_path / "pypy_script.sh").write_text(
helpers.dedent(
"""
#!/usr/bin/env pypy
print("Hello, World!")
"""
)
)
(shared_data_path / "pypyw_script.sh").write_text(
helpers.dedent(
"""
#!pypyw3.11 arg1 arg2
print("Hello, World!")
"""
)
)
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['shared_scripts']['../data'] = '/'
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "hooks": {"custom": {}}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extract_zip(expected_artifact, extraction_directory)
metadata_directory = f"{builder.project_id}.dist-info"
shared_data_directory = f"{builder.project_id}.data"
expected_files = helpers.get_template_files(
"wheel.standard_default_shared_scripts",
project_name,
metadata_directory=metadata_directory,
shared_data_directory=shared_data_directory,
binary_contents=binary_contents,
)
helpers.assert_files(extraction_directory, expected_files)
if not platform.windows:
extracted_binary = extraction_directory / shared_data_directory / "scripts" / "binary"
assert extracted_binary.stat().st_mode & 0o777 == expected_mode
def test_default_extra_metadata(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
extra_metadata_path = temp_dir / "data"
extra_metadata_path.ensure_dir_exists()
(extra_metadata_path / "foo.txt").touch()
nested_data_path = extra_metadata_path / "nested"
nested_data_path.ensure_dir_exists()
(nested_data_path / "bar.txt").touch()
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "extra-metadata": {"../data": "/"}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_extra_metadata",
project_name,
metadata_directory=metadata_directory,
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_extra_metadata_build_data(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
extra_metadata_path = temp_dir / "data"
extra_metadata_path.ensure_dir_exists()
(extra_metadata_path / "foo.txt").touch()
nested_data_path = extra_metadata_path / "nested"
nested_data_path.ensure_dir_exists()
(nested_data_path / "bar.txt").touch()
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['extra_metadata']['../data'] = '/'
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "hooks": {"custom": {}}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_extra_metadata",
project_name,
metadata_directory=metadata_directory,
)
helpers.assert_files(extraction_directory, expected_files)
@pytest.mark.requires_unix
def test_default_symlink(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
(temp_dir / "foo.so").write_bytes(b"data")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import os
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
pathlib.Path('my_app', 'lib.so').symlink_to(os.path.abspath(os.path.join('..', 'foo.so')))
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
best_matching_tag = next(sys_tags())
tag = f"{best_matching_tag.interpreter}-{best_matching_tag.abi}-{best_matching_tag.platform}"
assert expected_artifact == str(build_path / f"{builder.project_id}-{tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_symlink",
project_name,
metadata_directory=metadata_directory,
tag=tag,
)
helpers.assert_files(extraction_directory, expected_files)
@fixed_pathlib_resolution
def test_editable_default(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["editable"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_pth",
project_name,
metadata_directory=metadata_directory,
package_paths=[str(project_path / "src")],
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_default_extra_dependencies(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['dependencies'].append('binary')
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["editable"], "hooks": {"custom": {}}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_pth_extra_dependencies",
project_name,
metadata_directory=metadata_directory,
package_paths=[str(project_path / "src")],
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_default_force_include(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
# Prefix z just to satisfy our ordering test assertion
build_data['force_include_editable']['src/my_app/__about__.py'] = 'zfoo.py'
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["editable"], "hooks": {"custom": {}}}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_pth_force_include",
project_name,
metadata_directory=metadata_directory,
package_paths=[str(project_path / "src")],
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_default_force_include_option(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {
"targets": {
"wheel": {
"versions": ["editable"],
"force-include": {"src/my_app/__about__.py": "zfoo.py"},
}
}
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_pth_force_include",
project_name,
metadata_directory=metadata_directory,
package_paths=[str(project_path / "src")],
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@pytest.mark.requires_unix
def test_editable_default_symlink(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
symlink = project_path / "_" / "my_app"
symlink.parent.ensure_dir_exists()
symlink.symlink_to(project_path / "src" / "my_app")
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["editable"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_pth",
project_name,
metadata_directory=metadata_directory,
package_paths=[str(project_path / "src")],
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_exact(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["editable"], "dev-mode-exact": True}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_exact",
project_name,
metadata_directory=metadata_directory,
package_root=str(project_path / "my_app" / "__init__.py"),
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_exact_extra_dependencies(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['dependencies'].append('binary')
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"wheel": {"versions": ["editable"], "dev-mode-exact": True, "hooks": {"custom": {}}}
}
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_exact_extra_dependencies",
project_name,
metadata_directory=metadata_directory,
package_root=str(project_path / "my_app" / "__init__.py"),
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_exact_force_include(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
# Prefix z just to satisfy our ordering test assertion
build_data['force_include_editable']['my_app/__about__.py'] = 'zfoo.py'
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"wheel": {"versions": ["editable"], "dev-mode-exact": True, "hooks": {"custom": {}}}
}
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_exact_force_include",
project_name,
metadata_directory=metadata_directory,
package_root=str(project_path / "my_app" / "__init__.py"),
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_exact_force_include_option(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"wheel": {
"versions": ["editable"],
"dev-mode-exact": True,
"force-include": {"my_app/__about__.py": "zfoo.py"},
}
}
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_exact_force_include",
project_name,
metadata_directory=metadata_directory,
package_root=str(project_path / "my_app" / "__init__.py"),
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_exact_force_include_build_data_precedence(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
# Prefix z just to satisfy our ordering test assertion
build_data['force_include_editable']['my_app/__about__.py'] = 'zfoo.py'
"""
)
)
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {
"wheel": {
"versions": ["editable"],
"dev-mode-exact": True,
"force-include": {"my_app/__about__.py": "zbar.py"},
"hooks": {"custom": {}},
}
}
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_exact_force_include",
project_name,
metadata_directory=metadata_directory,
package_root=str(project_path / "my_app" / "__init__.py"),
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
@fixed_pathlib_resolution
def test_editable_pth(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["editable"], "dev-mode-dirs": ["."]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_editable_pth",
project_name,
metadata_directory=metadata_directory,
package_paths=[str(project_path)],
)
helpers.assert_files(extraction_directory, expected_files)
# Inspect the archive rather than the extracted files because on Windows they lose their metadata
# https://stackoverflow.com/q/9813243
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_info = zip_archive.getinfo(f"{metadata_directory}/WHEEL")
assert zip_info.date_time == (2020, 2, 2, 0, 0, 0)
def test_default_namespace_package(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
package_path = project_path / "my_app"
namespace_path = project_path / "namespace"
namespace_path.mkdir()
package_path.replace(namespace_path / "my_app")
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "namespace/my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_namespace_package",
project_name,
metadata_directory=metadata_directory,
namespace="namespace",
)
helpers.assert_files(extraction_directory, expected_files)
def test_default_entry_points(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"], "scripts": {"foo": "pkg:bar", "bar": "pkg:foo"}},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"]}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(build_path / f"{builder.project_id}-{get_python_versions_tag()}-none-any.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_entry_points", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
def test_explicit_selection_with_src_layout(self, hatch, helpers, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {
"targets": {
"wheel": {
"versions": ["standard"],
"artifacts": ["README.md"],
"only-include": ["src/my_app"],
"sources": ["src"],
}
},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_license_single",
project_name,
metadata_directory=metadata_directory,
)
helpers.assert_files(extraction_directory, expected_files)
def test_single_module(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
(project_path / "my_app").remove()
(project_path / "my_app.py").touch()
config = {"project": {"name": project_name, "version": "0.0.1"}}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_single_module",
project_name,
metadata_directory=metadata_directory,
)
helpers.assert_files(extraction_directory, expected_files)
def test_no_strict_naming(self, hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "strict-naming": False}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(
build_path / f"{builder.artifact_project_id}-{get_python_versions_tag()}-none-any.whl"
)
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.artifact_project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_no_strict_naming", project_name, metadata_directory=metadata_directory
)
helpers.assert_files(extraction_directory, expected_files)
def test_editable_sources_rewrite_error(self, hatch, temp_dir):
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "src/my_app/__about__.py"},
"build": {
"targets": {
"wheel": {
"versions": ["editable"],
"only-include": ["src/my_app"],
"sources": {"src/my_app": "namespace/plugins/my_app"},
}
},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with (
project_path.as_cwd(),
pytest.raises(
ValueError,
match=(
"Dev mode installations are unsupported when any path rewrite in the `sources` option "
"changes a prefix rather than removes it, see: "
"https://github.com/pfmoore/editables/issues/20"
),
),
):
list(builder.build(directory=str(build_path)))
@pytest.mark.skipif(
sys.platform != "darwin" or sys.version_info < (3, 8),
reason="requires support for ARM on macOS",
)
@pytest.mark.parametrize(
("archflags", "expected_arch"),
[("-arch x86_64", "x86_64"), ("-arch arm64", "arm64"), ("-arch arm64 -arch x86_64", "universal2")],
)
def test_macos_archflags(self, hatch, helpers, temp_dir, config_file, archflags, expected_arch):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"]}},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd({"ARCHFLAGS": archflags}):
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
tag = next(sys_tags())
tag_parts = [tag.interpreter, tag.abi, tag.platform]
tag_parts[2] = tag_parts[2].replace(platform.mac_ver()[2], expected_arch)
expected_tag = "-".join(tag_parts)
assert expected_artifact == str(build_path / f"{builder.project_id}-{expected_tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_artifacts",
project_name,
metadata_directory=metadata_directory,
tag=expected_tag,
)
helpers.assert_files(extraction_directory, expected_files)
@pytest.mark.requires_macos
@pytest.mark.parametrize("macos_max_compat", [True, False])
def test_macos_max_compat(self, hatch, helpers, temp_dir, config_file, macos_max_compat):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
vcs_ignore_file = project_path / ".gitignore"
vcs_ignore_file.write_text("*.pyc\n*.so\n*.h")
build_script = project_path / DEFAULT_BUILD_SCRIPT
build_script.write_text(
helpers.dedent(
"""
import pathlib
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomHook(BuildHookInterface):
def initialize(self, version, build_data):
build_data['pure_python'] = False
build_data['infer_tag'] = True
pathlib.Path('my_app', 'lib.so').touch()
pathlib.Path('my_app', 'lib.h').touch()
"""
)
)
config = {
"project": {"name": project_name, "requires-python": ">3", "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {
"targets": {"wheel": {"versions": ["standard"], "macos-max-compat": macos_max_compat}},
"artifacts": ["my_app/lib.so"],
"hooks": {"custom": {"path": DEFAULT_BUILD_SCRIPT}},
},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
build_path.mkdir()
with project_path.as_cwd():
artifacts = list(builder.build(directory=str(build_path)))
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
tag = next(sys_tags())
tag_parts = [tag.interpreter, tag.abi, tag.platform]
if macos_max_compat:
sdk_version_major, sdk_version_minor = tag_parts[2].split("_")[1:3]
if int(sdk_version_major) >= 11:
tag_parts[2] = tag_parts[2].replace(f"{sdk_version_major}_{sdk_version_minor}", "10_16", 1)
expected_tag = "-".join(tag_parts)
assert expected_artifact == str(build_path / f"{builder.project_id}-{expected_tag}.whl")
extraction_directory = temp_dir / "_archive"
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), "r") as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f"{builder.project_id}.dist-info"
expected_files = helpers.get_template_files(
"wheel.standard_default_build_script_artifacts",
project_name,
metadata_directory=metadata_directory,
tag=expected_tag,
)
helpers.assert_files(extraction_directory, expected_files)
def test_file_permissions_normalized(self, hatch, temp_dir, config_file):
config_file.model.template.plugins["default"]["src-layout"] = False
config_file.save()
project_name = "My.App"
with temp_dir.as_cwd():
result = hatch("new", project_name)
assert result.exit_code == 0, result.output
project_path = temp_dir / "my-app"
config = {
"project": {"name": project_name, "dynamic": ["version"]},
"tool": {
"hatch": {
"version": {"path": "my_app/__about__.py"},
"build": {"targets": {"wheel": {"versions": ["standard"], "strict-naming": False}}},
},
},
}
builder = WheelBuilder(str(project_path), config=config)
build_path = project_path / "dist"
with project_path.as_cwd():
artifacts = list(builder.build())
assert len(artifacts) == 1
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert len(build_artifacts) == 1
assert expected_artifact == str(build_artifacts[0])
assert expected_artifact == str(
build_path / f"{builder.artifact_project_id}-{get_python_versions_tag()}-none-any.whl"
)
file_stat = os.stat(expected_artifact)
# we assert that at minimum 644 is set, based on the platform (e.g.)
# windows it may be higher
assert file_stat.st_mode & 0o644
| TestBuildStandard |
python | getsentry__sentry | src/sentry/utils/auth.py | {
"start": 1728,
"end": 3051
} | class ____:
"""
The value returned from to_dict is stored in the django session cookie, with the org id being the key.
"""
SSO_SESSION_KEY = "sso_s"
SSO_LOGIN_TIMESTAMP = "ts"
def __init__(self, organization_id: int, time: datetime) -> None:
self.organization_id = organization_id
self.authenticated_at_time = time
self.session_key = self.django_session_key(organization_id)
def to_dict(self) -> dict[str, Any]:
return {self.SSO_LOGIN_TIMESTAMP: self.authenticated_at_time.timestamp()}
@classmethod
def create(cls, organization_id: int) -> SsoSession:
return cls(organization_id, datetime.now(tz=timezone.utc))
@classmethod
def from_django_session_value(
cls, organization_id: int, session_value: Mapping[str, Any]
) -> SsoSession:
return cls(
organization_id,
datetime.fromtimestamp(session_value[cls.SSO_LOGIN_TIMESTAMP], tz=timezone.utc),
)
def is_sso_authtime_fresh(self) -> bool:
expired_time_cutoff = datetime.now(tz=timezone.utc) - SSO_EXPIRY_TIME
return self.authenticated_at_time > expired_time_cutoff
@staticmethod
def django_session_key(organization_id: int) -> str:
return f"{SsoSession.SSO_SESSION_KEY}:{organization_id}"
| SsoSession |
python | google__jax | jax/_src/pallas/pipelining/schedule_api.py | {
"start": 2066,
"end": 11398
} | class ____:
"""Constructs an asynchronous pipeline stage."""
def __init__(self, max_in_flight: int):
self.start_func = None
self.end_func = None
self.max_in_flight = max_in_flight
def def_start(self, func):
self.start_func = func
return self
def def_end(self, func):
self.end_func = func
return self
def trace(
self, abstract_refs, state_avals, grid
) -> tuple[internal.PipelineStage, internal.PipelineStage]:
start_jaxpr, start_effs = trace_fun(
self.start_func, abstract_refs, state_avals, grid
)
end_jaxpr, end_effs = trace_fun(
self.end_func, abstract_refs, state_avals, grid
)
token = internal.make_token(self)
start_effs = {*start_effs, internal.WriteEffect(token)}
end_effs = {*end_effs, internal.ReadEffect(token)}
name = getattr(self.start_func, "__name__", str(self.start_func))
start_stage = internal.PipelineStage(
jaxpr=start_jaxpr,
effects=start_effs,
properties=internal.SchedulingProperties(
max_in_flight=self.max_in_flight,
is_async_start=True,
is_async_done=False,
),
name=name,
)
name = getattr(self.end_func, "__name__", str(self.end_func))
end_stage = internal.PipelineStage(
jaxpr=end_jaxpr,
effects=end_effs,
properties=internal.SchedulingProperties(
max_in_flight=self.max_in_flight,
is_async_start=False,
is_async_done=True,
),
name=name,
)
return start_stage, end_stage
Stage = SyncStage | AsyncStage
def trace_fun(
fun, ref_avals, state_avals, grid
) -> tuple[jax_core.ClosedJaxpr, Sequence[internal.RefEffect]]:
"""Trace a stage body function to a Jaxpr."""
ctx_aval = PipelineContext.aval_pytree(grid, state_avals)
num_ctx_avals = len(jax.tree.leaves(ctx_aval))
flat_avals, in_tree = jax.tree.flatten((ctx_aval, *ref_avals))
debug_info = api_util.debug_info("trace_fun", fun, flat_avals, {})
flat_fn, out_tree_thunk = api_util.flatten_fun_nokwargs(
lu.wrap_init(fun, debug_info=debug_info), in_tree
)
del out_tree_thunk
jaxpr, _, consts = pe.trace_to_jaxpr_dynamic(flat_fn, flat_avals)
ref_effects = [
eff for eff in jaxpr.effects if isinstance(eff, state_types.RefEffect)
]
# Subtract off the consts and state_avals, since this is variable per stage.
n_const = len(consts)
ref_effects = [
type(eff)(input_index=eff.input_index - n_const - num_ctx_avals)
for eff in ref_effects
]
return jax_core.ClosedJaxpr(jaxpr, consts), ref_effects
def apply_ref_filter(
stages: Sequence[internal.PipelineStage],
ref_filter: Any,
grid, state_avals
) -> Sequence[internal.PipelineStage]:
"""Removes any effects belonging to Refs that do not pass the filter."""
if ref_filter is None:
return stages
ctx_aval = PipelineContext.aval_pytree(grid, state_avals)
num_ctx_avals = len(jax.tree.leaves(ctx_aval))
new_stages = []
for stage_ in stages:
jaxpr = stage_.jaxpr.jaxpr
ref_effects = stage_.effects
token_effects = list(internal.filter_tokens(ref_effects))
refs_to_keep = {
i - num_ctx_avals
for i, aval in enumerate(jaxpr.in_avals)
if ref_filter(aval)
}
new_effects = [
eff for eff in ref_effects if eff.input_index in refs_to_keep
] + token_effects
new_stages.append(dataclasses.replace(stage_, effects=set(new_effects)))
return new_stages
def convert_accum_effects_to_writes(stages: Sequence[internal.PipelineStage]
) -> Sequence[internal.PipelineStage]:
"""Replaces all accumulate effects with simple writes."""
# After tracing, an accumulation such as ref[...] += y
# will result in both a ReadEffect and a WriteEffect into `ref`.
new_stages = []
for stage_ in stages:
read_effs = internal.filter_read_effects(stage_.effects)
write_effs = internal.filter_write_effects(stage_.effects)
new_read_effs = (
eff
for eff in read_effs
if state_types.WriteEffect(eff.input_index) not in write_effs
)
effs = (*new_read_effs, *write_effs)
new_stages.append(dataclasses.replace(stage_, effects=set(effs)))
return new_stages
def remove_duplicate_writes_between_async_stages(
stages: Sequence[internal.PipelineStage],
) -> Sequence[internal.PipelineStage]:
"""Removes duplicate writes between the async start and done stages.
This is done because the scheduler doesn't support multiple writes to
the same Ref in different stages. We instead write to a token in the
async_start stage that's read by the async_done and all direct consumers.
"""
new_stages = []
for stage_ in stages:
if stage_.properties.is_async_start:
start_read_effs = internal.filter_read_effects(stage_.effects)
start_write_effs = internal.filter_write_effects(stage_.effects)
write_token = internal.filter_tokens(start_write_effs)
assert len(write_token) == 1, stage_.effects
write_token = tuple(write_token)[0]
read_token = state_types.ReadEffect(write_token.input_index)
done_stage = [
x
for x in stages
if x.properties.is_async_done and read_token in x.effects
]
assert len(done_stage) == 1
done_stage = done_stage[0]
end_write_effs = internal.filter_write_effects(done_stage.effects)
start_write_effs = start_write_effs - end_write_effs
start_effs = (*start_read_effs, *start_write_effs)
new_stages.append(dataclasses.replace(stage_, effects=set(start_effs)))
else:
new_stages.append(stage_)
return new_stages
def thread_token_deps_to_consumers(stages: Sequence[internal.PipelineStage]
) -> Sequence[internal.PipelineStage]:
"""Threads the async token to consumers of async op.
This ensures that the async_start op does not start too soon and potentially
clobber buffers that the consumers are reading from.
"""
effects = [stage_.effects for stage_ in stages]
for stage_ in stages:
if stage_.properties.is_async_done:
write_tokens = internal.filter_tokens(
internal.filter_write_effects(stage_.effects)
)
read_tokens = internal.filter_tokens(
internal.filter_read_effects(stage_.effects)
)
assert not write_tokens, stage_.effects
assert len(read_tokens) == 1, stage_.effects
read_token_effect = tuple(read_tokens)[0]
write_idxs = stage_.get_write_idxs()
for i, other_stage in enumerate(stages):
if any(
write_idx in other_stage.get_read_idxs() for write_idx in write_idxs
):
effects[i].add(read_token_effect)
return [dataclasses.replace(stage_, effects=set(effects[i])
) for i, stage_ in enumerate(stages)]
def schedule_pipeline(
stages: Sequence[Stage],
grid: Sequence[int],
args: Sequence[Any],
ref_filter: Optional[Any] = None,
initial_state: schedulers.PipelineState | None = None,
scheduler: schedulers.PipelineScheduler = schedulers.static_nd_loop_scheduler,
**scheduler_kwargs,
):
"""Schedules stages and emits the code for a pipeline.
Args:
stages: A sequence of pipeline stages.
grid: The loop grid size.
args: A sequence of arguments to the pipeline. These will be passed
directly to each stage.
ref_filter: An optional function to filter out Refs during tracing so
that they do not affect the pipeline schedule.
initial_state: An optional pipeline state that will be passed as a
carry into each stage.
scheduler: Which scheduling function to use.
**scheduler_kwargs: Additional arguments to pass to the scheduler.
Returns:
A function that can be called with ``args`` and runs the pipeline.
"""
_, ref_tree = jax.tree.flatten(args)
def _get_aval(x):
if hasattr(x, "get_ref_aval"):
return x.get_ref_aval()
return jax_core.get_aval(x)
avals = jax.tree.map(_get_aval, args)
# Make state avals.
state_avals = jax.tree.map(_get_aval, initial_state)
traced_stages = []
for stage in stages:
if isinstance(stage, SyncStage):
traced_stages.append(stage.trace(avals, state_avals, grid))
elif isinstance(stage, AsyncStage):
start_stage, end_stage = stage.trace(avals, state_avals, grid)
traced_stages.append(start_stage)
traced_stages.append(end_stage)
else:
raise ValueError(f"Unsupported stage type: {type(stage)}")
# Run several "passes" to clean up effects before scheduling.
traced_stages = apply_ref_filter(traced_stages, ref_filter, grid, state_avals)
traced_stages = convert_accum_effects_to_writes(traced_stages)
traced_stages = remove_duplicate_writes_between_async_stages(traced_stages)
traced_stages = thread_token_deps_to_consumers(traced_stages)
loop_struct = internal.NDLoopStruct(stages=traced_stages, grid=grid)
def pipeline(*args):
flat_args, args_tree = jax.tree.flatten(args)
if args_tree != ref_tree:
raise ValueError(
f"Args tree and ref tree do not match.\n{args_tree=}\n{ref_tree=}"
)
scheduler(
loop_struct,
args=flat_args,
initial_state=initial_state,
**scheduler_kwargs,
)
return pipeline
| AsyncStage |
python | walkccc__LeetCode | solutions/802. Find Eventual Safe States/802.py | {
"start": 85,
"end": 570
} | class ____:
def eventualSafeNodes(self, graph: list[list[int]]) -> list[int]:
states = [State.INIT] * len(graph)
def hasCycle(u: int) -> bool:
if states[u] == State.VISITING:
return True
if states[u] == State.VISITED:
return False
states[u] = State.VISITING
if any(hasCycle(v) for v in graph[u]):
return True
states[u] = State.VISITED
return False
return [i for i in range(len(graph)) if not hasCycle(i)]
| Solution |
python | openai__openai-python | src/openai/types/static_file_chunking_strategy_object.py | {
"start": 278,
"end": 424
} | class ____(BaseModel):
static: StaticFileChunkingStrategy
type: Literal["static"]
"""Always `static`."""
| StaticFileChunkingStrategyObject |
python | Netflix__metaflow | metaflow/plugins/metadata_providers/spin.py | {
"start": 140,
"end": 472
} | class ____(LocalMetadataProvider):
TYPE = "spin"
DATASTORE_DIR = DATASTORE_SPIN_LOCAL_DIR # ".metaflow_spin"
@classmethod
def _get_storage_class(cls):
from metaflow.plugins.datastores.spin_storage import SpinStorage
return SpinStorage
def version(self):
return "spin"
| SpinMetadataProvider |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_Sphinx.py | {
"start": 7303,
"end": 7693
} | class ____:
"""test_finds_missing_raises_from_setter_sphinx
Example of a setter having missing raises documentation in
the Sphinx style docstring of the property
"""
@property
def foo(self): # [missing-raises-doc]
"""docstring ...
:type: int
"""
return 10
@foo.setter
def foo(self, value):
raise AttributeError()
| Foo |
python | huggingface__transformers | src/transformers/models/chinese_clip/modeling_chinese_clip.py | {
"start": 33977,
"end": 39309
} | class ____(ChineseCLIPPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
config: ChineseCLIPTextConfig
input_modalities = ("text",)
_no_split_modules = ["ChineseCLIPTextEmbeddings"]
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = ChineseCLIPTextEmbeddings(config)
self.encoder = ChineseCLIPTextEncoder(config)
self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The vision model from CHINESE_CLIP without any head or projection on top.
"""
)
| ChineseCLIPTextModel |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 27133,
"end": 27675
} | class ____(models.Model):
"""
Historic table with one to one relationship to non-historic table.
In this case it should simply behave like OneToOneField because
the origin model (this one) cannot be historic, so one to one field
lookups are always "current".
"""
name = models.CharField(max_length=15, unique=True)
organization = HistoricOneToOneField(
TestOrganization, on_delete=CASCADE, related_name="participant"
)
history = HistoricalRecords()
| TestHistoricParticipantToOrganizationOneToOne |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 28848,
"end": 31634
} | class ____(OutEdgeDataView):
"""An EdgeDataView for outward edges of MultiDiGraph; See EdgeDataView"""
__slots__ = ("keys",)
def __getstate__(self):
return {
"viewer": self._viewer,
"nbunch": self._nbunch,
"keys": self.keys,
"data": self._data,
"default": self._default,
}
def __setstate__(self, state):
self.__init__(**state)
def __init__(self, viewer, nbunch=None, data=False, *, default=None, keys=False):
self._viewer = viewer
adjdict = self._adjdict = viewer._adjdict
self.keys = keys
if nbunch is None:
self._nodes_nbrs = adjdict.items
else:
# dict retains order of nodes but acts like a set
nbunch = dict.fromkeys(viewer._graph.nbunch_iter(nbunch))
self._nodes_nbrs = lambda: [(n, adjdict[n]) for n in nbunch]
self._nbunch = nbunch
self._data = data
self._default = default
# Set _report based on data and default
if data is True:
if keys is True:
self._report = lambda n, nbr, k, dd: (n, nbr, k, dd)
else:
self._report = lambda n, nbr, k, dd: (n, nbr, dd)
elif data is False:
if keys is True:
self._report = lambda n, nbr, k, dd: (n, nbr, k)
else:
self._report = lambda n, nbr, k, dd: (n, nbr)
else: # data is attribute name
if keys is True:
self._report = (
lambda n, nbr, k, dd: (n, nbr, k, dd[data])
if data in dd
else (n, nbr, k, default)
)
else:
self._report = (
lambda n, nbr, k, dd: (n, nbr, dd[data])
if data in dd
else (n, nbr, default)
)
def __len__(self):
return sum(1 for e in self)
def __iter__(self):
return (
self._report(n, nbr, k, dd)
for n, nbrs in self._nodes_nbrs()
for nbr, kd in nbrs.items()
for k, dd in kd.items()
)
def __contains__(self, e):
u, v = e[:2]
if self._nbunch is not None and u not in self._nbunch:
return False # this edge doesn't start in nbunch
try:
kdict = self._adjdict[u][v]
except KeyError:
return False
if self.keys is True:
k = e[2]
try:
dd = kdict[k]
except KeyError:
return False
return e == self._report(u, v, k, dd)
return any(e == self._report(u, v, k, dd) for k, dd in kdict.items())
| OutMultiEdgeDataView |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 60422,
"end": 60569
} | class ____(_PrintableStructure):
_fields_ = [
('type', _nvmlBridgeChipType_t),
('fwVersion', c_uint),
]
| c_nvmlBridgeChipInfo_t |
python | simonw__datasette | datasette/utils/__init__.py | {
"start": 29327,
"end": 30839
} | class ____:
def __init__(self, data):
# data is a dictionary of key => [list, of, values] or a list of [["key", "value"]] pairs
if isinstance(data, dict):
for key in data:
assert isinstance(
data[key], (list, tuple)
), "dictionary data should be a dictionary of key => [list]"
self._data = data
elif isinstance(data, list) or isinstance(data, tuple):
new_data = {}
for item in data:
assert (
isinstance(item, (list, tuple)) and len(item) == 2
), "list data should be a list of [key, value] pairs"
key, value = item
new_data.setdefault(key, []).append(value)
self._data = new_data
def __repr__(self):
return f"<MultiParams: {self._data}>"
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data[key][0]
def keys(self):
return self._data.keys()
def __iter__(self):
yield from self._data.keys()
def __len__(self):
return len(self._data)
def get(self, name, default=None):
"""Return first value in the list, if available"""
try:
return self._data.get(name)[0]
except (KeyError, TypeError):
return default
def getlist(self, name):
"""Return full list"""
return self._data.get(name) or []
| MultiParams |
python | getsentry__sentry | src/sentry/api/serializers/models/project.py | {
"start": 35235,
"end": 36610
} | class ____(ProjectWithTeamResponseDict):
latestRelease: LatestReleaseDict | None
options: dict[str, Any]
digestsMinDelay: int
digestsMaxDelay: int
subjectPrefix: str
allowedDomains: list[str]
resolveAge: int
dataScrubber: bool
dataScrubberDefaults: bool
safeFields: list[str]
storeCrashReports: int | None
sensitiveFields: list[str]
subjectTemplate: str
securityToken: str
securityTokenHeader: str | None
verifySSL: bool
scrubIPAddresses: bool
scrapeJavaScript: bool
highlightTags: list[str]
highlightContext: dict[str, Any]
highlightPreset: HighlightPreset
groupingConfig: str
derivedGroupingEnhancements: str
groupingEnhancements: str
secondaryGroupingExpiry: int
secondaryGroupingConfig: str | None
fingerprintingRules: str
organization: OrganizationSerializerResponse
plugins: list[Plugin]
platforms: list[str]
processingIssues: int
defaultEnvironment: str | None
relayPiiConfig: str | None
builtinSymbolSources: list[str]
dynamicSamplingBiases: list[dict[str, str | bool]]
symbolSources: str
isDynamicallySampled: bool
tempestFetchScreenshots: NotRequired[bool]
autofixAutomationTuning: NotRequired[str]
seerScannerAutomation: NotRequired[bool]
debugFilesRole: NotRequired[str | None]
| DetailedProjectResponse |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-surveymonkey/source_surveymonkey/components.py | {
"start": 364,
"end": 2164
} | class ____(SubstreamPartitionRouter):
"""
A SurveyIdPartitionRouter is specifically tailored for survey data, addressing the limitations of the current solution,
SubstreamPartitionRouter, which only offers one option for partitioning via access to the parent stream with input.
The SurveyIdPartitionRouter generates stream slices for partitioning based on either provided survey IDs or parent stream keys.
Inherits from:
SubstreamPartitionRouter
Custom Methods:
stream_slices: Generates stream slices for partitioning.
"""
def stream_slices(self) -> Iterable[StreamSlice]:
"""
Generates stream slices for partitioning based on survey IDs or parent stream keys.
"""
# Get the survey IDs from the configuration
survey_ids = self.config.get("survey_ids", [])
# Extract necessary configuration parameters
parent_stream_config = self.parent_stream_configs[0]
parent_key = parent_stream_config.parent_key.string
partition_field = parent_stream_config.partition_field.string
if survey_ids:
# If specific survey IDs are provided, yield slices based on them
for item in survey_ids:
yield StreamSlice(partition={partition_field: item}, cursor_slice={})
else:
# If not, iterate over parent stream records and yield slices based on parent keys
for parent_stream_config in self.parent_stream_configs:
for item in parent_stream_config.stream.read_records(sync_mode=SyncMode.full_refresh):
yield StreamSlice(partition={partition_field: item[parent_key]}, cursor_slice={})
# Ensures the function always returns an iterable
yield from []
| SurveyIdPartitionRouter |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/protocol.py | {
"start": 255,
"end": 430
} | class ____(typing.TypedDict):
id: str
name: str
last_activity: str
execution_state: str
connections: int
connection_info: KernelConnectionInfo
| KernelInfo |
python | PyCQA__pylint | tests/checkers/unittest_design.py | {
"start": 351,
"end": 1844
} | class ____(CheckerTestCase):
CHECKER_CLASS = design_analysis.MisdesignChecker
@set_config(
ignored_parents=(".Dddd",),
max_parents=1,
)
def test_too_many_ancestors_ignored_parents_are_skipped(self) -> None:
"""Make sure that classes listed in ``ignored-parents`` aren't counted
by the too-many-ancestors message.
"""
node = astroid.extract_node(
"""
class Aaaa(object):
pass
class Bbbb(Aaaa):
pass
class Cccc(Bbbb):
pass
class Dddd(Cccc):
pass
class Eeee(Dddd):
pass
"""
)
with self.assertNoMessages():
self.checker.visit_classdef(node)
@set_config(exclude_too_few_public_methods="toml.*")
def test_exclude_too_few_methods_with_value(self) -> None:
"""Test exclude-too-few-public-methods option with value."""
options = self.linter.config.exclude_too_few_public_methods
assert any(i.match("toml") for i in options)
assert any(i.match("toml.*") for i in options)
assert any(i.match("toml.TomlEncoder") for i in options)
def test_ignore_paths_with_no_value(self) -> None:
"""Test exclude-too-few-public-methods option with no value.
Compare against actual list to see if validator works.
"""
options = self.linter.config.exclude_too_few_public_methods
assert options == []
| TestDesignChecker |
python | huggingface__transformers | src/transformers/models/sam3/modeling_sam3.py | {
"start": 31764,
"end": 33532
} | class ____(Sam3PreTrainedModel):
def __init__(self, config: Sam3ViTConfig):
super().__init__(config)
self.config = config
self.embeddings = Sam3ViTEmbeddings(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layers = nn.ModuleList(
[
Sam3ViTLayer(config, window_size=config.window_size if i not in config.global_attn_indexes else 0)
for i in range(config.num_hidden_layers)
]
)
self.post_init()
def get_input_embeddings(self) -> Sam3ViTPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
hidden_states = self.embeddings(pixel_values) # [batch_size, seq_len, hidden_size]
batch_size = hidden_states.shape[0]
height = pixel_values.shape[-2] // self.config.patch_size
width = pixel_values.shape[-1] // self.config.patch_size
hidden_size = hidden_states.shape[-1]
# Reshape to spatial format for windowed attention: [batch_size, height, width, hidden_size]
hidden_states = hidden_states.view(batch_size, height, width, hidden_size)
hidden_states = self.layer_norm(hidden_states)
for layer in self.layers:
hidden_states = layer(hidden_states, **kwargs)
# Reshape back to sequence format: [batch_size, height*width, hidden_size]
hidden_states = hidden_states.view(batch_size, height * width, hidden_size)
return BaseModelOutput(last_hidden_state=hidden_states)
| Sam3ViTModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchSequence2.py | {
"start": 779,
"end": 2425
} | class ____:
pass
type UA = (
A1 | A2 | A3 | A4 | A5 | A6 | A7 | A8 | A9 | A10 | A11 | A12 | A13 | A14 | A15 | A16
)
type UB = (
B1 | B2 | B3 | B4 | B5 | B6 | B7 | B8 | B9 | B10 | B11 | B12 | B13 | B14 | B15 | B16
)
def test(a: UA, b: UB) -> bool:
t = a, b
match t:
case A1(), B1():
return True
case A2(), B2():
return True
case A3(), B3():
return True
case A4(), B4():
reveal_type(t, expected_text="tuple[A4, B4]")
return True
case A5(), B5():
return True
case A6(), B6():
reveal_type(t, expected_text="tuple[A6, B6]")
return True
case A7(), B7():
reveal_type(t, expected_text="tuple[A7, B7]")
return True
case A8(), B8():
reveal_type(t, expected_text="tuple[A8, B8]")
return True
case A9(), B9():
# The type will become less precise in this case
# because narrowing in the negative case needs
# to fall back on less-precise types.
reveal_type(t, expected_text="Sequence[A9 | B9]")
return True
case A10(), B10():
return True
case A11(), B11():
return True
case A12(), B12():
return True
case A13(), B13():
return True
case A14(), B14():
return True
case A15(), B15():
return True
case A16(), B16():
return True
case _:
reveal_type(t, expected_text="Any")
raise ValueError()
| B16 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/components.py | {
"start": 23295,
"end": 24514
} | class ____(RecordTransformation):
"""
A record transformation that flattens the `associations` field in HubSpot records.
This transformation takes a nested dictionary under the `associations` key and extracts the IDs
of associated objects. The extracted lists of IDs are added as new top-level fields in the record,
using the association name as the key (spaces replaced with underscores).
Example:
Input:
{
"id": 1,
"associations": {
"Contacts": {"results": [{"id": 101}, {"id": 102}]}
}
}
Output:
{
"id": 1,
"Contacts": [101, 102]
}
"""
def transform(
self,
record: Dict[str, Any],
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> None:
if "associations" in record:
associations = record.pop("associations")
for name, association in associations.items():
record[name.replace(" ", "_")] = [row["id"] for row in association.get("results", [])]
@dataclass
| HubspotFlattenAssociationsTransformation |
python | apache__airflow | airflow-core/tests/unit/jobs/test_triggerer_job.py | {
"start": 10458,
"end": 25098
} | class ____:
def test_run_inline_trigger_canceled(self, session) -> None:
trigger_runner = TriggerRunner()
trigger_runner.triggers = {
1: {"task": MagicMock(spec=asyncio.Task), "name": "mock_name", "events": 0}
}
mock_trigger = MagicMock(spec=BaseTrigger)
mock_trigger.timeout_after = None
mock_trigger.run.side_effect = asyncio.CancelledError()
with pytest.raises(asyncio.CancelledError):
asyncio.run(trigger_runner.run_trigger(1, mock_trigger))
# @pytest.mark.asyncio
def test_run_inline_trigger_timeout(self, session, cap_structlog) -> None:
trigger_runner = TriggerRunner()
trigger_runner.triggers = {
1: {"task": MagicMock(spec=asyncio.Task), "name": "mock_name", "events": 0}
}
mock_trigger = MagicMock(spec=BaseTrigger)
mock_trigger.run.side_effect = asyncio.CancelledError()
with pytest.raises(asyncio.CancelledError):
asyncio.run(
trigger_runner.run_trigger(
1, mock_trigger, timeout_after=timezone.utcnow() - datetime.timedelta(hours=1)
)
)
assert {"event": "Trigger cancelled due to timeout", "log_level": "error"} in cap_structlog
@patch("airflow.jobs.triggerer_job_runner.Trigger._decrypt_kwargs")
@patch(
"airflow.jobs.triggerer_job_runner.TriggerRunner.get_trigger_by_classpath",
return_value=DateTimeTrigger,
)
@pytest.mark.asyncio
async def test_update_trigger_with_triggerer_argument_change(
self, mock_get_trigger_by_classpath, mock_decrypt_kwargs, session, cap_structlog
) -> None:
trigger_runner = TriggerRunner()
def fn(moment): ...
mock_decrypt_kwargs.return_value = {"moment": ..., "not_exists_arg": ...}
mock_get_trigger_by_classpath.return_value = fn
trigger_runner.to_create.append(
workloads.RunTrigger.model_construct(id=1, classpath="abc", encrypted_kwargs="fake"),
)
await trigger_runner.create_triggers()
assert "Trigger failed" in cap_structlog.text
err = cap_structlog[0]["error"]
assert isinstance(err, TypeError)
assert "got an unexpected keyword argument 'not_exists_arg'" in str(err)
@pytest.mark.asyncio
@patch("airflow.sdk.execution_time.task_runner.SUPERVISOR_COMMS", create=True)
async def test_invalid_trigger(self, supervisor_builder):
"""Test the behaviour when we try to run an invalid Trigger"""
workload = workloads.RunTrigger.model_construct(
id=1, ti=None, classpath="fake.classpath", encrypted_kwargs={}
)
trigger_runner = TriggerRunner()
trigger_runner.comms_decoder = AsyncMock(spec=TriggerCommsDecoder)
trigger_runner.comms_decoder.asend.return_value = messages.TriggerStateSync(
to_create=[], to_cancel=[]
)
trigger_runner.to_create.append(workload)
await trigger_runner.create_triggers()
assert (1, ANY) in trigger_runner.failed_triggers
ids = await trigger_runner.cleanup_finished_triggers()
await trigger_runner.sync_state_to_supervisor(ids)
# Check that we sent the right info in the failure message
assert trigger_runner.comms_decoder.asend.call_count == 1
msg = trigger_runner.comms_decoder.asend.mock_calls[0].args[0]
assert isinstance(msg, messages.TriggerStateChanges)
assert msg.events is None
assert msg.failures is not None
assert len(msg.failures) == 1
trigger_id, traceback = msg.failures[0]
assert trigger_id == 1
assert traceback[-1] == "ModuleNotFoundError: No module named 'fake'\n"
@pytest.mark.asyncio
async def test_trigger_kwargs_serialization_cleanup(self, session):
"""
Test that trigger kwargs are properly cleaned of serialization artifacts
(__var, __type keys).
"""
from airflow.serialization.serialized_objects import BaseSerialization
kw = {"simple": "test", "tuple": (), "dict": {}, "list": []}
serialized_kwargs = BaseSerialization.serialize(kw)
trigger_orm = Trigger(classpath="airflow.triggers.testing.SuccessTrigger", kwargs=serialized_kwargs)
session.add(trigger_orm)
session.commit()
stored_kwargs = trigger_orm.kwargs
assert stored_kwargs == {
"Encoding.TYPE": "dict",
"Encoding.VAR": {
"dict": {"Encoding.TYPE": "dict", "Encoding.VAR": {}},
"list": [],
"simple": "test",
"tuple": {"Encoding.TYPE": "tuple", "Encoding.VAR": []},
},
}
runner = TriggerRunner()
runner.to_create.append(
workloads.RunTrigger.model_construct(
id=trigger_orm.id,
ti=None,
classpath=trigger_orm.classpath,
encrypted_kwargs=trigger_orm.encrypted_kwargs,
)
)
await runner.create_triggers()
assert trigger_orm.id in runner.triggers
trigger_instance = runner.triggers[trigger_orm.id]["task"]
# The test passes if no exceptions were raised during trigger creation
trigger_instance.cancel()
await runner.cleanup_finished_triggers()
@pytest.mark.asyncio
async def test_trigger_create_race_condition_38599(session, supervisor_builder, testing_dag_bundle):
"""
This verifies the resolution of race condition documented in github issue #38599.
More details in the issue description.
The race condition may occur in the following scenario:
1. TaskInstance TI1 defers itself, which creates Trigger T1, which holds a
reference to TI1.
2. T1 gets picked up by TriggererJobRunner TJR1 and starts running T1.
3. TJR1 misses a heartbeat, most likely due to high host load causing delays in
each TriggererJobRunner._run_trigger_loop loop.
4. A second TriggererJobRunner TJR2 notices that T1 has missed its heartbeat,
so it starts the process of picking up any Triggers that TJR1 may have had,
including T1.
5. Before TJR2 starts executing T1, TJR1 finishes execution of T1 and cleans it
up by clearing the trigger_id of TI1.
6. TJR2 tries to execute T1, but it crashes (with the above error) while trying to
look up TI1 (because T1 no longer has a TaskInstance linked to it).
"""
trigger = TimeDeltaTrigger(delta=datetime.timedelta(microseconds=1))
trigger_orm = Trigger.from_object(trigger)
session.add(trigger_orm)
session.flush()
bundle_name = "testing"
dag = DAG(dag_id="test-dag")
dm = DagModel(dag_id="test-dag", bundle_name=bundle_name)
session.add(dm)
SerializedDagModel.write_dag(LazyDeserializedDAG.from_dag(dag), bundle_name=bundle_name)
dag_run = DagRun(dag.dag_id, run_id="abc", run_type="none", run_after=timezone.utcnow())
dag_version = DagVersion.get_latest_version(dag.dag_id)
ti = TaskInstance(
PythonOperator(task_id="dummy-task", python_callable=print),
run_id=dag_run.run_id,
state=TaskInstanceState.DEFERRED,
dag_version_id=dag_version.id,
)
ti.dag_id = dag.dag_id
ti.trigger_id = trigger_orm.id
session.add(dag_run)
session.add(ti)
job1 = Job()
job2 = Job()
session.add(job1)
session.add(job2)
session.commit()
supervisor1 = supervisor_builder(job1)
supervisor2 = supervisor_builder(job2)
# Assign and run the trigger on the first TriggererJobRunner
# Instead of running job_runner1._execute, we will run the individual methods
# to control the timing of the execution.
supervisor1.load_triggers()
assert {t.id for t in supervisor1.creating_triggers} == {trigger_orm.id}
trigger_orm = session.get(Trigger, trigger_orm.id)
assert trigger_orm.task_instance is not None, "Pre-condition"
# In a real execution environment, a missed heartbeat would cause the trigger to be picked up
# by another TriggererJobRunner.
# In this test, however, this is not necessary because we are controlling the execution
# of the TriggererJobRunner.
# job1.latest_heartbeat = timezone.utcnow() - datetime.timedelta(hours=1)
# session.commit()
# This calls Trigger.submit_event, which will unlink the trigger from the task instance
# Simulate this call: supervisor1._service_subprocess()
supervisor1.events.append((trigger_orm.id, TriggerEvent(True)))
supervisor1.handle_events()
trigger_orm = session.get(Trigger, trigger_orm.id)
# This is the "pre"-condition we need to assert to test the race condition
assert trigger_orm.task_instance is None
# Simulate the second TriggererJobRunner picking up the trigger
# The race condition happens here.
# AttributeError: 'NoneType' object has no attribute 'dag_id'
supervisor2.update_triggers({trigger_orm.id})
assert supervisor2.running_triggers == set()
# We should have not sent anything to the async runner process
supervisor2.stdin.write.assert_not_called()
@pytest.mark.execution_timeout(5)
def test_trigger_runner_exception_stops_triggerer():
"""
Checks that if an exception occurs when creating triggers, that the triggerer
process stops
"""
import signal
job_runner = TriggererJobRunner(Job())
time.sleep(0.1)
# Wait 4 seconds for the triggerer to stop
try:
def on_timeout(signum, frame):
os.kill(job_runner.trigger_runner.pid, signal.SIGKILL)
signal.signal(signal.SIGALRM, on_timeout)
signal.setitimer(signal.ITIMER_REAL, 0.1)
# This either returns cleanly, or the pytest timeout hits.
assert job_runner._execute() == -9
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
@pytest.mark.asyncio
async def test_trigger_firing():
"""
Checks that when a trigger fires, it correctly makes it into the
event queue.
"""
runner = TriggerRunner()
runner.to_create.append(
# Use a trigger that will immediately succeed
workloads.RunTrigger.model_construct(
id=1,
ti=None,
classpath=f"{SuccessTrigger.__module__}.{SuccessTrigger.__name__}",
encrypted_kwargs='{"__type":"dict", "__var":{}}',
),
)
await runner.create_triggers()
try:
# Wait for up to 3 seconds for it to fire and appear in the event queue
for _ in range(30):
await asyncio.sleep(0.1)
finished = await runner.cleanup_finished_triggers()
if runner.events:
assert list(runner.events) == [(1, TriggerEvent(True))]
assert finished == [1]
break
await asyncio.sleep(0.1)
else:
pytest.fail("TriggerRunner never sent the trigger event out")
finally:
for info in runner.triggers.values():
info["task"].cancel()
@pytest.mark.asyncio
async def test_trigger_failing():
"""
Checks that when a trigger fails, it correctly makes it into the
failure queue.
"""
runner = TriggerRunner()
runner.to_create.append(
# Use a trigger that will immediately fail
workloads.RunTrigger.model_construct(
id=1,
ti=None,
classpath=f"{FailureTrigger.__module__}.{FailureTrigger.__name__}",
encrypted_kwargs='{"__type":"dict", "__var":{}}',
),
)
await runner.create_triggers()
try:
# Wait for up to 3 seconds for it to fire and appear in the event queue
for _ in range(30):
await asyncio.sleep(0.1)
await runner.cleanup_finished_triggers()
if runner.failed_triggers:
assert len(runner.failed_triggers) == 1
trigger_id, exc = runner.failed_triggers[0]
assert trigger_id == 1
assert isinstance(exc, ValueError)
assert exc.args[0] == "Deliberate trigger failure"
break
else:
pytest.fail("TriggerRunner never marked the trigger as failed")
finally:
for info in runner.triggers.values():
info["task"].cancel()
def test_failed_trigger(session, dag_maker, supervisor_builder):
"""
Checks that the triggerer will correctly fail task instances that depend on
triggers that can't even be loaded.
This is the Supervisor side of the error reported in TestTriggerRunner::test_invalid_trigger
"""
# Create a totally invalid trigger
trigger_orm = Trigger(classpath="fake.classpath", kwargs={})
session.add(trigger_orm)
session.flush()
# Create the test DAG and task
with dag_maker(dag_id="test_invalid_trigger", session=session):
EmptyOperator(task_id="dummy1")
dr = dag_maker.create_dagrun()
task_instance = dr.task_instances[0]
# Make a task instance based on that and tie it to the trigger
task_instance.state = TaskInstanceState.DEFERRED
task_instance.trigger_id = trigger_orm.id
session.commit()
supervisor: TriggerRunnerSupervisor = supervisor_builder()
supervisor.load_triggers()
# Make sure it got picked up
assert {t.id for t in supervisor.creating_triggers} == {trigger_orm.id}, "Pre-condition"
# Simulate receiving the state update message
supervisor._handle_request(
messages.TriggerStateChanges(
events=None,
finished=None,
failures=[
(
trigger_orm.id,
[
"Traceback (most recent call last):\n",
'File "<frozen importlib._bootstrap>", line 1324, in _find_and_load_unlocked\n',
"ModuleNotFoundError: No module named 'fake'\n",
],
)
],
),
req_id=1,
log=MagicMock(),
)
# Run the failed trigger handler
supervisor.handle_failed_triggers()
# Make sure it marked the task instance as failed (which is actually the
# scheduled state with a payload to make it fail)
task_instance.refresh_from_db()
assert task_instance.state == TaskInstanceState.SCHEDULED
assert task_instance.next_method == "__fail__"
assert task_instance.next_kwargs["error"] == "Trigger failure"
assert task_instance.next_kwargs["traceback"][-1] == "ModuleNotFoundError: No module named 'fake'\n"
| TestTriggerRunner |
python | crytic__slither | slither/tools/upgradeability/checks/initialization.py | {
"start": 1483,
"end": 2607
} | class ____(AbstractCheck):
ARGUMENT = "init-missing"
IMPACT = CheckClassification.INFORMATIONAL
HELP = "Initializable is missing"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#initializable-is-missing"
WIKI_TITLE = "Initializable is missing"
# region wiki_description
WIKI_DESCRIPTION = """
Detect if a contract `Initializable` is present.
"""
# endregion wiki_description
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Review manually the contract's initialization..
Consider using a `Initializable` contract to follow [standard practice](https://docs.openzeppelin.com/upgrades/2.7/writing-upgradeable).
"""
# endregion wiki_recommendation
def _check(self):
initializable = self.contract.file_scope.get_contract_from_name("Initializable")
if initializable is None:
info = [
"Initializable contract not found, the contract does not follow a standard initalization schema.\n"
]
json = self.generate_result(info)
return [json]
return []
| InitializablePresent |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws_tests/pipes_tests/fake_lambda.py | {
"start": 1806,
"end": 4720
} | class ____:
def invoke(self, **kwargs):
# emulate lambda constraints with a subprocess invocation
# * json serialized "Payload" result
# * 4k log output as base64 "LogResult"
with tempfile.TemporaryDirectory() as tempdir:
in_path = os.path.join(tempdir, "in.json")
out_path = os.path.join(tempdir, "out.json")
log_path = os.path.join(tempdir, "logs")
with open(in_path, "w") as f:
f.write(kwargs["Payload"])
with open(log_path, "w") as log_file:
result = subprocess.run(
[
sys.executable,
os.path.join(os.path.dirname(__file__), "fake_lambda.py"),
kwargs["FunctionName"],
in_path,
out_path,
],
check=False,
env={}, # env vars part of lambda fn definition, can't vary at runtime
stdout=log_file,
stderr=log_file,
)
response: dict[str, Any] = {}
if result.returncode == 42:
response["FunctionError"] = "Unhandled"
elif result.returncode != 0:
with open(log_path) as f:
print(f.read()) # noqa: T201
result.check_returncode()
with open(out_path, "rb") as f:
payload = io.BytesIO(f.read())
response["Payload"] = payload
if kwargs.get("LogType") == "Tail":
logs_len = os.path.getsize(log_path)
with open(log_path, "rb") as log_file:
if logs_len > LOG_TAIL_LIMIT:
log_file.seek(-LOG_TAIL_LIMIT, os.SEEK_END)
outro = log_file.read()
log_result = base64.encodebytes(outro)
response["LogResult"] = log_result
return response
if __name__ == "__main__":
assert len(sys.argv) == 4, "python fake_lambda.py <fn_name> <in_path> <out_path>"
_, fn_name, in_path, out_path = sys.argv
event = json.load(open(in_path))
fn = getattr(LambdaFunctions, fn_name)
val = None
return_code = 0
try:
val = fn(event, FakeLambdaContext())
except Exception as e:
tb = traceback.TracebackException.from_exception(e)
if sys.version_info >= (3, 13):
name = tb.exc_type_str.split(".")[-1]
else:
name = tb.exc_type.__name__ if tb.exc_type is not None else None
val = {
"errorMessage": str(tb),
"errorType": name,
"stackTrace": tb.stack.format(),
"requestId": "fake-request-id",
}
return_code = 42
with open(out_path, "w") as f:
json.dump(val, f)
sys.exit(return_code)
| FakeLambdaClient |
python | great-expectations__great_expectations | great_expectations/validator/validator.py | {
"start": 2816,
"end": 4597
} | class ____:
# Note: Dependent "metric_name" (key) is different from "metric_name" in dependency "MetricConfiguration" (value). # noqa: E501 # FIXME CoP
metric_configurations: Dict[str, MetricConfiguration] = field(default_factory=dict)
result_format: Dict[str, Any] = field(default_factory=dict)
def set_metric_configuration(
self, metric_name: str, metric_configuration: MetricConfiguration
) -> None:
"""
Sets specified "MetricConfiguration" for "metric_name" to "metric_configurations" dependencies dictionary.
""" # noqa: E501 # FIXME CoP
self.metric_configurations[metric_name] = metric_configuration
def get_metric_configuration(self, metric_name: str) -> Optional[MetricConfiguration]:
"""
Obtains "MetricConfiguration" for specified "metric_name" from "metric_configurations" dependencies dictionary.
""" # noqa: E501 # FIXME CoP
return self.metric_configurations.get(metric_name)
def remove_metric_configuration(self, metric_name: str) -> None:
"""
Removes "MetricConfiguration" for specified "metric_name" from "metric_configurations" dependencies dictionary.
""" # noqa: E501 # FIXME CoP
del self.metric_configurations[metric_name]
def get_metric_names(self) -> List[str]:
"""
Returns "metric_name" keys, for which "MetricConfiguration" dependency objects have been specified.
""" # noqa: E501 # FIXME CoP
return list(self.metric_configurations.keys())
def get_metric_configurations(self) -> List[MetricConfiguration]:
"""
Returns "MetricConfiguration" dependency objects specified.
"""
return list(self.metric_configurations.values())
| ValidationDependencies |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 16355,
"end": 16990
} | class ____(NamedTuple):
"""Field that can be configured by the user with a default value."""
id: str
"""The unique identifier of the field."""
options: Mapping[str, Any]
"""The options for the field."""
default: str
"""The default value for the field."""
name: str | None = None
"""The name of the field. """
description: str | None = None
"""The description of the field. """
is_shared: bool = False
"""Whether the field is shared."""
@override
def __hash__(self) -> int:
return hash((self.id, tuple(self.options.keys()), self.default))
| ConfigurableFieldSingleOption |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/projection_queries/snippets.py | {
"start": 615,
"end": 1037
} | class ____(ndb.Model):
title = ndb.StringProperty()
author = ndb.StringProperty()
tags = ndb.StringProperty(repeated=True)
def print_author_tags():
query = Article.query()
articles = query.fetch(20, projection=[Article.author, Article.tags])
for article in articles:
print(article.author)
print(article.tags)
# article.title will raise a ndb.UnprojectedPropertyError
| Article |
python | ethereum__web3.py | web3/providers/persistent/async_ipc.py | {
"start": 873,
"end": 4998
} | class ____(PersistentConnectionProvider):
logger = logging.getLogger("web3.providers.AsyncIPCProvider")
_reader: asyncio.StreamReader | None = None
_writer: asyncio.StreamWriter | None = None
_decoder: json.JSONDecoder = json.JSONDecoder()
def __init__(
self,
ipc_path: str | Path | None = None,
read_buffer_limit: int = 20 * 1024 * 1024, # 20 MB
# `PersistentConnectionProvider` kwargs can be passed through
**kwargs: Any,
) -> None:
# initialize the ipc_path before calling the super constructor
if ipc_path is None:
self.ipc_path = get_default_ipc_path()
elif isinstance(ipc_path, str) or isinstance(ipc_path, Path):
self.ipc_path = str(Path(ipc_path).expanduser().resolve())
else:
raise Web3TypeError("ipc_path must be of type string or pathlib.Path")
super().__init__(**kwargs)
self.read_buffer_limit = read_buffer_limit
def __str__(self) -> str:
return f"<{self.__class__.__name__} {self.ipc_path}>"
async def is_connected(self, show_traceback: bool = False) -> bool:
if not self._writer or not self._reader:
return False
try:
await self.make_request(RPCEndpoint("web3_clientVersion"), [])
return True
except (OSError, ProviderConnectionError) as e:
if show_traceback:
raise ProviderConnectionError(
f"Problem connecting to provider with error: {type(e)}: {e}"
)
return False
async def socket_send(self, request_data: bytes) -> None:
if self._writer is None:
raise ProviderConnectionError(
"Connection to ipc socket has not been initiated for the provider."
)
return await asyncio.wait_for(
self._socket_send(request_data), timeout=self.request_timeout
)
async def socket_recv(self) -> RPCResponse:
try:
data = await self._reader.readline()
except ValueError as e:
if all(kw in str(e) for kw in ("limit", "chunk")):
raise ReadBufferLimitReached(
f"Read buffer limit of `{self.read_buffer_limit}` bytes was "
"reached. Consider increasing the ``read_buffer_limit`` on the "
"AsyncIPCProvider."
) from e
raise
if not data:
raise PersistentConnectionClosedOK(
user_message="Socket reader received end of stream."
)
return self.decode_rpc_response(data)
# -- private methods -- #
async def _socket_send(self, request_data: bytes) -> None:
try:
self._writer.write(request_data + b"\n")
await self._writer.drain()
except OSError as e:
# Broken pipe
if e.errno == errno.EPIPE:
# one extra attempt, then give up
await self._reset_socket()
self._writer.write(request_data)
await self._writer.drain()
async def _reset_socket(self) -> None:
self._writer.close()
await self._writer.wait_closed()
self._reader, self._writer = await async_get_ipc_socket(
self.ipc_path, self.read_buffer_limit
)
async def _provider_specific_connect(self) -> None:
self._reader, self._writer = await async_get_ipc_socket(
self.ipc_path, self.read_buffer_limit
)
async def _provider_specific_disconnect(self) -> None:
# this should remain idempotent
if self._writer and not self._writer.is_closing():
self._writer.close()
await self._writer.wait_closed()
self._writer = None
if self._reader:
self._reader = None
async def _provider_specific_socket_reader(self) -> RPCResponse:
return await self.socket_recv()
def _error_log_listener_task_exception(self, e: Exception) -> None:
super()._error_log_listener_task_exception(e)
| AsyncIPCProvider |
python | huggingface__transformers | src/transformers/models/zamba2/modular_zamba2.py | {
"start": 52961,
"end": 53015
} | class ____(ZambaForCausalLM):
pass
| Zamba2ForCausalLM |
python | Farama-Foundation__Gymnasium | gymnasium/envs/phys2d/cartpole.py | {
"start": 703,
"end": 1233
} | class ____:
"""Parameters for the jax CartPole environment."""
gravity: float = 9.8
masscart: float = 1.0
masspole: float = 0.1
total_mass: float = masspole + masscart
length: float = 0.5
polemass_length: float = masspole + length
force_mag: float = 10.0
tau: float = 0.02
theta_threshold_radians: float = 12 * 2 * np.pi / 360
x_threshold: float = 2.4
x_init: float = 0.05
sutton_barto_reward: bool = False
screen_width: int = 600
screen_height: int = 400
| CartPoleParams |
python | huggingface__transformers | src/transformers/models/imagegpt/modeling_imagegpt.py | {
"start": 12849,
"end": 16038
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = ImageGPTAttention(config, layer_idx=layer_idx)
self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx)
self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = ImageGPTMLP(inner_dim, config)
def forward(
self,
hidden_states: torch.Tensor,
layer_past: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
"cross-attention layers by setting `config.add_cross_attention=True`"
)
residual = hidden_states
hidden_states = self.ln_cross_attn(hidden_states)
cross_attn_outputs = self.crossattention(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = residual + attn_output
outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
return (hidden_states,) + outputs
@auto_docstring
| ImageGPTBlock |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_decorators_3.py | {
"start": 366,
"end": 480
} | class ____:
x: UUID
@validate_call(config={'arbitrary_types_allowed': True})
def test(user: Sequence):
...
| C |
python | ray-project__ray | doc/source/ray-overview/examples/mcp-ray-serve/multi_mcp_ray_serve.py | {
"start": 791,
"end": 3515
} | class ____:
_PODMAN_ARGS: List[str] = []
_ENV: Dict[str, str] = {}
def __init__(self):
self._ready = asyncio.create_task(self._startup())
async def _startup(self):
params = StdioServerParameters(
command="podman",
args=self._PODMAN_ARGS,
env=self._ENV,
)
self._stack = AsyncExitStack()
stdin, stdout = await self._stack.enter_async_context(stdio_client(params))
self.session = await self._stack.enter_async_context(
ClientSession(stdin, stdout)
)
await self.session.initialize()
logger.info("%s replica ready", type(self).__name__)
async def _ensure_ready(self):
await self._ready
async def list_tools(self) -> List[Dict[str, Any]]:
await self._ensure_ready()
resp = await self.session.list_tools()
return [
{
"name": t.name,
"description": t.description,
"input_schema": t.inputSchema,
}
for t in resp.tools
]
async def call_tool(self, tool_name: str, tool_args: Dict[str, Any]) -> Any:
await self._ensure_ready()
return await self.session.call_tool(tool_name, tool_args)
async def __del__(self):
if hasattr(self, "_stack"):
await self._stack.aclose()
def build_mcp_deployment(
*,
name: str,
docker_image: str,
num_replicas: int = 3,
num_cpus: float = 0.5,
autoscaling_config: Optional[Dict[str, Any]] = None,
server_command: Optional[str] = None,
extra_podman_args: Optional[List[str]] = None,
env: Optional[Dict[str, str]] = None,
) -> serve.Deployment:
"""
- If autoscaling_config is provided, Ray Serve autoscales between
autoscaling_config['min_replicas'] and ['max_replicas'].
- Otherwise it launches `num_replicas` fixed replicas.
"""
deployment_env = env or {}
podman_args = _podman_args(
docker_image, extra_args=extra_podman_args, env=deployment_env
)
if server_command:
podman_args.append(server_command)
# Build kwargs for the decorator:
deploy_kwargs: Dict[str, Any] = {
"name": name,
"ray_actor_options": {"num_cpus": num_cpus},
}
if autoscaling_config:
deploy_kwargs["autoscaling_config"] = autoscaling_config
else:
deploy_kwargs["num_replicas"] = num_replicas
@serve.deployment(**deploy_kwargs)
class MCP(_BaseMCP):
_PODMAN_ARGS = podman_args
_ENV = deployment_env
return MCP
# -------------------------
# HTTP router code
# -------------------------
api = FastAPI()
@serve.deployment
@serve.ingress(api)
| _BaseMCP |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/service/gpu_test.py | {
"start": 1255,
"end": 2793
} | class ____(
data_service_test_base.TestBase,
parameterized.TestCase,
):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
pinned=[False, True],
data_transfer_protocol=["grpc", "local"],
compression=[False, True],
),
)
)
def test_pinned(self, pinned, data_transfer_protocol, compression):
cpus = config.list_logical_devices("CPU")
gpus = config.list_logical_devices("GPU")
if not gpus:
self.skipTest("GPUs must be present to check GPU-pinnedness.")
num_elements = 10
cluster = self.make_test_cluster(num_workers=1)
with ops.device_v2(cpus[0].name):
ds = self.make_distributed_range_dataset(
num_elements=num_elements,
cluster=cluster,
data_transfer_protocol=data_transfer_protocol,
compression=("AUTO" if compression else None),
)
with ops.device_v2(gpus[0].name):
ds = ds.map(gen_experimental_dataset_ops.check_pinned)
options = options_lib.Options()
options.experimental_service.pinned = pinned
ds = ds.with_options(options)
if not pinned or data_transfer_protocol != "grpc" or compression:
with self.assertRaisesRegex(errors.InvalidArgumentError, "not pinned"):
self.assertDatasetProduces(ds, list(range(num_elements)))
return
self.assertDatasetProduces(ds, list(range(num_elements)))
if __name__ == "__main__":
test.main()
| TfDataServiceGpuTest |
python | pytorch__pytorch | tools/linter/adapters/nativefunctions_linter.py | {
"start": 1268,
"end": 3641
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="native functions linter",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--native-functions-yml",
required=True,
help="location of native_functions.yaml",
)
args = parser.parse_args()
with open(args.native_functions_yml) as f:
contents = f.read()
yaml = ruamel.yaml.YAML() # type: ignore[attr-defined]
yaml.preserve_quotes = True # type: ignore[assignment]
yaml.width = 1000 # type: ignore[assignment]
yaml.boolean_representation = ["False", "True"] # type: ignore[attr-defined]
try:
r = yaml.load(contents)
except Exception as err:
msg = LintMessage(
path=None,
line=None,
char=None,
code="NATIVEFUNCTIONS",
severity=LintSeverity.ERROR,
name="YAML load failure",
original=None,
replacement=None,
description=f"Failed due to {err.__class__.__name__}:\n{err}",
)
print(json.dumps(msg._asdict()), flush=True)
sys.exit(0)
# Cuz ruamel's author intentionally didn't include conversion to string
# https://stackoverflow.com/questions/47614862/best-way-to-use-ruamel-yaml-to-dump-to-string-not-to-stream
string_stream = StringIO()
yaml.dump(r, string_stream)
new_contents = string_stream.getvalue()
string_stream.close()
if contents != new_contents:
msg = LintMessage(
path=args.native_functions_yml,
line=None,
char=None,
code="NATIVEFUNCTIONS",
severity=LintSeverity.ERROR,
name="roundtrip inconsistency",
original=contents,
replacement=new_contents,
description=(
"YAML roundtrip failed; run `lintrunner --take NATIVEFUNCTIONS -a` to apply the suggested changes. "
"If you think this is in error, please see tools/linter/adapters/nativefunctions_linter.py"
),
)
print(json.dumps(msg._asdict()), flush=True)
| LintMessage |
python | jazzband__tablib | src/tablib/formats/_dbf.py | {
"start": 157,
"end": 1918
} | class ____:
title = 'dbf'
extensions = ('csv',)
DEFAULT_ENCODING = 'utf-8'
@classmethod
def export_set(cls, dataset):
"""Returns DBF representation of a Dataset"""
new_dbf = dbfnew.dbf_new()
temp_file, temp_uri = tempfile.mkstemp()
# create the appropriate fields based on the contents of the first row
first_row = dataset[0]
for fieldname, field_value in zip(dataset.headers, first_row):
if type(field_value) in [int, float]:
new_dbf.add_field(fieldname, 'N', 10, 8)
else:
new_dbf.add_field(fieldname, 'C', 80)
new_dbf.write(temp_uri)
dbf_file = dbf.Dbf(temp_uri, readOnly=0)
for row in dataset:
record = dbfrecord.DbfRecord(dbf_file)
for fieldname, field_value in zip(dataset.headers, row):
record[fieldname] = field_value
record.store()
dbf_file.close()
dbf_stream = open(temp_uri, 'rb')
stream = io.BytesIO(dbf_stream.read())
dbf_stream.close()
os.close(temp_file)
os.remove(temp_uri)
return stream.getvalue()
@classmethod
def import_set(cls, dset, in_stream):
"""Returns a dataset from a DBF stream."""
dset.wipe()
_dbf = dbf.Dbf(in_stream)
dset.headers = _dbf.fieldNames
for record in range(_dbf.recordCount):
row = [_dbf[record][f] for f in _dbf.fieldNames]
dset.append(row)
@classmethod
def detect(cls, stream):
"""Returns True if the given stream is valid DBF"""
try:
dbf.Dbf(stream, readOnly=True)
return True
except Exception:
return False
| DBFFormat |
python | FactoryBoy__factory_boy | tests/djapp/models.py | {
"start": 341,
"end": 491
} | class ____(models.Model):
foo = models.CharField(max_length=20, primary_key=True)
bar = models.CharField(max_length=20, blank=True)
| NonIntegerPk |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/block.py | {
"start": 2260,
"end": 2344
} | class ____(_ImageBlockNotRequired):
type: Literal["Image"]
url: str
| ImageBlock |
python | wandb__wandb | wandb/vendor/pygments/lexers/css.py | {
"start": 31076,
"end": 31513
} | class ____(CssLexer):
"""
For `LESS <http://lesscss.org/>`_ styleshets.
.. versionadded:: 2.1
"""
name = 'LessCss'
aliases = ['less']
filenames = ['*.less']
mimetypes = ['text/x-less-css']
tokens = {
'root': [
(r'@\w+', Name.Variable),
inherit,
],
'content': [
(r'\{', Punctuation, '#push'),
inherit,
],
}
| LessCssLexer |
python | falconry__falcon | tests/test_utils.py | {
"start": 2255,
"end": 2737
} | class ____(media.URLEncodedFormHandler):
def __init__(self):
super().__init__()
self.deserialize_count = 0
def deserialize(self, *args, **kwargs):
result = super().deserialize(*args, **kwargs)
self.deserialize_count += 1
return result
async def deserialize_async(self, *args, **kwargs):
result = await super().deserialize_async(*args, **kwargs)
self.deserialize_count += 1
return result
| TrackingFormHandler |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py | {
"start": 83040,
"end": 84321
} | class ____:
@mock.patch(VERTEX_AI_PATH.format("auto_ml.AutoMLHook"))
def test_execute(self, mock_hook):
page_token = "page_token"
page_size = 42
filter = "filter"
read_mask = "read_mask"
op = ListAutoMLTrainingJobOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.list_training_pipelines.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
page_size=page_size,
page_token=page_token,
filter=filter,
read_mask=read_mask,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestVertexAIListAutoMLTrainingJobOperator |
python | pytorch__pytorch | functorch/dim/__init__.py | {
"start": 45215,
"end": 53308
} | class ____:
"""
Helper class for organizing dimensions in dot products.
"""
def __init__(self) -> None:
self.dims: list[DimEntry] = []
self.total_size = 1
def append(self, dim_entry: Any) -> None:
"""Add a dimension entry to this part."""
self.dims.append(dim_entry)
if not dim_entry.is_positional():
self.total_size *= dim_entry.dim().size
def dot_prepare(parts: list[DotPart], tensor_info: TensorInfo) -> torch.Tensor:
"""
Prepare tensor for dot product by matching levels and reshaping.
"""
new_levels = []
needs_reshape = False
for part in parts:
if len(part.dims) != 1:
needs_reshape = True
new_levels.extend(part.dims)
if tensor_info.tensor is None:
raise RuntimeError("Cannot perform dot product on None tensor")
result = _match_levels(tensor_info.tensor, tensor_info.levels, new_levels)
if not needs_reshape:
return result
# Reshape for matrix operations
view = [part.total_size for part in parts]
return result.reshape(view)
def dot_finish(parts: list[DotPart], result_tensor: torch.Tensor) -> Tensor:
"""
Finish dot product by reshaping result and creating Tensor.
"""
result_levels = []
needs_reshape = False
for part in parts:
if len(part.dims) != 1:
needs_reshape = True
result_levels.extend(part.dims)
if needs_reshape:
new_size = []
for level in result_levels:
new_size.append(level.dim().size)
result_tensor = result_tensor.reshape(new_size)
tensor_result = Tensor.from_positional(result_tensor, result_levels, True)
return tensor_result # type: ignore[return-value]
def dot(lhs: Any, rhs: Any, sum_dims: Any) -> Union[_Tensor, torch.Tensor]:
"""
Perform dot product between two tensors along specified dimensions.
Args:
lhs: Left-hand side tensor
rhs: Right-hand side tensor
sum_dims: Dimensions to sum over (contract)
Returns:
Result of dot product
"""
# Get tensor info
lhs_info = TensorInfo.create(lhs, ensure_batched=False, ensure_present=False)
rhs_info = TensorInfo.create(rhs, ensure_batched=False, ensure_present=False)
if not (lhs_info and rhs_info):
# Fall back to regular operations
return torch.matmul(lhs, rhs)
assert lhs_info.tensor is not None and rhs_info.tensor is not None, (
"Cannot perform dot product on None tensors"
)
lhs_strides = lhs_info.tensor.stride()
rhs_strides = rhs_info.tensor.stride()
# Create dot parts for different dimension categories
lro_dims = DotPart() # Left-right-output (batch dims)
lo_dims = DotPart() # Left-output only
ro_dims = DotPart() # Right-output only
lr_dims = DotPart() # Left-right (contracted dims)
def insert_dim(d: Any, lhs_idx: Any, rhs_idx: Any) -> None:
"""Insert dimension into appropriate part based on stride pattern."""
reduced = d in sum_dims
lhs_stride = lhs_strides[lhs_idx] if lhs_idx is not None else 0
rhs_stride = rhs_strides[rhs_idx] if rhs_idx is not None else 0
if reduced:
lr_dims.append(d)
else:
if (lhs_stride == 0) == (rhs_stride == 0):
lro_dims.append(d) # Both have or both lack this dim
elif lhs_stride != 0:
lo_dims.append(d) # Only lhs has this dim
else:
ro_dims.append(d) # Only rhs has this dim
# Track which rhs dimensions we've seen
rhs_seen = [False] * len(rhs_info.levels)
# Process lhs dimensions
for i, lhs_level in enumerate(lhs_info.levels):
rhs_idx = None
for j, rhs_level in enumerate(rhs_info.levels):
if lhs_level == rhs_level:
rhs_idx = j
rhs_seen[j] = True
break
insert_dim(lhs_level, i, rhs_idx)
# Process remaining rhs dimensions
for i, rhs_level in enumerate(rhs_info.levels):
if not rhs_seen[i]:
insert_dim(rhs_level, None, i)
# Validate sum dimensions exist
if len(lr_dims.dims) != len(sum_dims):
for d in sum_dims:
if d not in lhs_info.levels and d not in rhs_info.levels:
raise ValueError(f"summing over non-existent dimension {d}")
# Prepare tensors and perform matrix multiplication
if len(lro_dims.dims) != 0:
# Batched matrix multiply
lhs_tensor = dot_prepare([lro_dims, lo_dims, lr_dims], lhs_info)
rhs_tensor = dot_prepare([lro_dims, lr_dims, ro_dims], rhs_info)
result = torch.bmm(lhs_tensor, rhs_tensor)
return dot_finish([lro_dims, lo_dims, ro_dims], result)
else:
# Regular matrix multiply
lhs_tensor = dot_prepare([lo_dims, lr_dims], lhs_info)
rhs_tensor = dot_prepare([lr_dims, ro_dims], rhs_info)
result = torch.mm(lhs_tensor, rhs_tensor)
return dot_finish([lo_dims, ro_dims], result)
from functorch.dim._wrap import _wrap
from functorch.dim.wrap_type import wrap_type
wrap_type(_Tensor, torch.Tensor, _Tensor.__torch_function__)
del _Tensor.ndim
def index(self: Any, positions: Any, dims: Any) -> _Tensor:
"""
Index a regular tensor by binding specified positions to dims.
This converts a regular tensor to a first-class tensor by binding
the specified positional dimensions to Dim objects.
Args:
positions: Tuple of dimension positions to bind
dims: Dim objects or tuple of Dim objects to bind to
Returns:
First-class tensor with specified dimensions bound
"""
# If this is already a first-class tensor (_Tensor), call its index method directly
if isinstance(self, _Tensor):
return _Tensor.index(self, positions, dims)
# Convert regular tensor to first-class tensor
info = TensorInfo.create(self, ensure_batched=False, ensure_present=False)
# Create the first-class tensor
assert info.tensor is not None, "Cannot index None tensor"
result = Tensor.from_positional(info.tensor, info.levels, info.has_device)
# Now call the index method on the first-class tensor
# Cast result to _Tensor for the method call
return _Tensor.index(result, positions, dims) # type: ignore[arg-type]
def _def(name: str, *args: Any, **kwargs: Any) -> None:
orig = getattr(torch.Tensor, name)
setattr(_Tensor, name, _wrap(orig, *args, **kwargs))
_def("mean")
_def("sum")
_def("all")
_def("amax")
_def("amin")
_def("aminmax")
_def("any")
_def("count_nonzero")
_def("logsumexp")
_def("nanmean")
_def("nansum")
_def("prod")
_def("std", keepdim_offset=2)
_def("var", keepdim_offset=2)
_def("max", single_dim=True)
_def("min", single_dim=True)
_def("argmax", single_dim=True)
_def("argmin", single_dim=True)
_def("kthvalue", single_dim=True)
_def("median", single_dim=True)
_def("nanmedian", single_dim=True)
_def("mode", single_dim=True)
_def("sort", reduce=False)
_def("argsort", reduce=False)
_def("unbind", single_dim=True)
_def("chunk", dim_offset=1, reduce=False)
_def("cummax", single_dim=True, reduce=False)
_def("cummin", single_dim=True, reduce=False)
_def("cumprod", single_dim=True, reduce=False)
_def("cumprod_", single_dim=True, reduce=False)
_def("cumsum", single_dim=True, reduce=False)
_def("cumsum_", single_dim=True, reduce=False)
_def("logcumsumexp", single_dim=True, reduce=False)
_def("renorm", dim_offset=1, single_dim=True, reduce=False)
_def("softmax", single_dim=True, reduce=False)
softmax = _wrap(torch.nn.functional.softmax, single_dim=True, reduce=False)
# stuff to handle in the future, because they require special
# binding logic for dims
# cross
# diag_embed
# diagonal
# diagonal_scatter
# diff
# nanquantile
# quantile
# roll
# rot90
# topk (new dimes on output)
# should these all be subsumed by inplace indexing?
# index_add_
# index_add
# index_copy
# index_copy_
# index_fill
# index_fill_
# index_select
# scatter
# scatter_
# scatter_add
# scatter_add_
# scatter_reduce
| DotPart |
python | django__django | django/contrib/sessions/models.py | {
"start": 91,
"end": 164
} | class ____(BaseSessionManager):
use_in_migrations = True
| SessionManager |
python | redis__redis-py | redis/asyncio/multidb/healthcheck.py | {
"start": 768,
"end": 1325
} | class ____(ABC):
"""
Health checks execution policy.
"""
@property
@abstractmethod
def health_check_probes(self) -> int:
"""Number of probes to execute health checks."""
pass
@property
@abstractmethod
def health_check_delay(self) -> float:
"""Delay between health check probes."""
pass
@abstractmethod
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
"""Execute health checks and return database health status."""
pass
| HealthCheckPolicy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.