language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/LinearRegionItem.py | {
"start": 198,
"end": 14081
} | class ____(GraphicsObject):
"""
**Bases:** :class:`GraphicsObject <pyqtgraph.GraphicsObject>`
Used for marking a horizontal or vertical region in plots.
The region can be dragged and is bounded by lines which can be dragged individually.
=============================== =============================================================================
**Signals:**
sigRegionChangeFinished(self) Emitted when the user has finished dragging the region (or one of its lines)
and when the region is changed programatically.
sigRegionChanged(self) Emitted while the user is dragging the region (or one of its lines)
and when the region is changed programatically.
=============================== =============================================================================
"""
sigRegionChangeFinished = QtCore.Signal(object)
sigRegionChanged = QtCore.Signal(object)
Vertical = 0
Horizontal = 1
_orientation_axis = {
Vertical: 0,
Horizontal: 1,
'vertical': 0,
'horizontal': 1,
}
def __init__(self, values=(0, 1), orientation='vertical', brush=None, pen=None,
hoverBrush=None, hoverPen=None, movable=True, bounds=None,
span=(0, 1), swapMode='sort', clipItem=None):
"""Create a new LinearRegionItem.
============== =====================================================================
**Arguments:**
values A list of the positions of the lines in the region. These are not
limits; limits can be set by specifying bounds.
orientation Options are 'vertical' or 'horizontal'
The default is 'vertical', indicating that the region is bounded
by vertical lines.
brush Defines the brush that fills the region. Can be any arguments that
are valid for :func:`mkBrush <pyqtgraph.mkBrush>`. Default is
transparent blue.
pen The pen to use when drawing the lines that bound the region.
hoverBrush The brush to use when the mouse is hovering over the region.
hoverPen The pen to use when the mouse is hovering over the region.
movable If True, the region and individual lines are movable by the user; if
False, they are static.
bounds Optional [min, max] bounding values for the region
span Optional [min, max] giving the range over the view to draw
the region. For example, with a vertical line, use
``span=(0.5, 1)`` to draw only on the top half of the
view.
swapMode Sets the behavior of the region when the lines are moved such that
their order reverses:
* "block" means the user cannot drag one line past the other
* "push" causes both lines to be moved if one would cross the other
* "sort" means that lines may trade places, but the output of
getRegion always gives the line positions in ascending order.
* None means that no attempt is made to handle swapped line
positions.
The default is "sort".
clipItem An item whose bounds will be used to limit the region bounds.
This is useful when a LinearRegionItem is added on top of an
:class:`~pyqtgraph.ImageItem` or
:class:`~pyqtgraph.PlotDataItem` and the visual region should
not extend beyond its range. This overrides ``bounds``.
============== =====================================================================
"""
GraphicsObject.__init__(self)
self.orientation = orientation
self.blockLineSignal = False
self.moving = False
self.mouseHovering = False
self.span = span
self.swapMode = swapMode
self.clipItem = clipItem
self._boundingRectCache = None
self._clipItemBoundsCache = None
# note LinearRegionItem.Horizontal and LinearRegionItem.Vertical
# are kept for backward compatibility.
lineKwds = dict(
movable=movable,
bounds=bounds,
span=span,
pen=pen,
hoverPen=hoverPen,
)
if orientation in ('horizontal', LinearRegionItem.Horizontal):
self.lines = [
# rotate lines to 180 to preserve expected line orientation
# with respect to region. This ensures that placing a '<|'
# marker on lines[0] causes it to point left in vertical mode
# and down in horizontal mode.
InfiniteLine(QtCore.QPointF(0, values[0]), angle=0, **lineKwds),
InfiniteLine(QtCore.QPointF(0, values[1]), angle=0, **lineKwds)]
tr = QtGui.QTransform.fromScale(1, -1)
self.lines[0].setTransform(tr, True)
self.lines[1].setTransform(tr, True)
elif orientation in ('vertical', LinearRegionItem.Vertical):
self.lines = [
InfiniteLine(QtCore.QPointF(values[0], 0), angle=90, **lineKwds),
InfiniteLine(QtCore.QPointF(values[1], 0), angle=90, **lineKwds)]
else:
raise Exception("Orientation must be 'vertical' or 'horizontal'.")
for l in self.lines:
l.setParentItem(self)
l.sigPositionChangeFinished.connect(self.lineMoveFinished)
self.lines[0].sigPositionChanged.connect(self._line0Moved)
self.lines[1].sigPositionChanged.connect(self._line1Moved)
if brush is None:
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255, 50))
self.setBrush(brush)
if hoverBrush is None:
c = self.brush.color()
c.setAlpha(min(c.alpha() * 2, 255))
hoverBrush = fn.mkBrush(c)
self.setHoverBrush(hoverBrush)
self.setMovable(movable)
def getRegion(self):
"""Return the values at the edges of the region."""
r = (self.lines[0].value(), self.lines[1].value())
if self.swapMode == 'sort':
return (min(r), max(r))
else:
return r
def setRegion(self, rgn):
"""Set the values for the edges of the region.
============== ==============================================
**Arguments:**
rgn A list or tuple of the lower and upper values.
============== ==============================================
"""
if self.lines[0].value() == rgn[0] and self.lines[1].value() == rgn[1]:
return
self.blockLineSignal = True
self.lines[0].setValue(rgn[0])
self.lines[1].setValue(rgn[1])
self.lineMoved(0)
self.blockLineSignal = False
self.lineMoved(1)
self.lineMoveFinished()
def setBrush(self, *br, **kargs):
"""Set the brush that fills the region. Can have any arguments that are valid
for :func:`mkBrush <pyqtgraph.mkBrush>`.
"""
self.brush = fn.mkBrush(*br, **kargs)
self.currentBrush = self.brush
def setHoverBrush(self, *br, **kargs):
"""Set the brush that fills the region when the mouse is hovering over.
Can have any arguments that are valid
for :func:`mkBrush <pyqtgraph.mkBrush>`.
"""
self.hoverBrush = fn.mkBrush(*br, **kargs)
def setBounds(self, bounds):
"""Set ``(min, max)`` bounding values for the region.
The current position is only affected it is outside the new bounds. See
:func:`~pyqtgraph.LinearRegionItem.setRegion` to set the position of the region.
Use ``(None, None)`` to disable bounds.
"""
if self.clipItem is not None:
self.setClipItem(None)
self._setBounds(bounds)
def _setBounds(self, bounds):
# internal impl so user-facing setBounds can clear clipItem and clipItem can
# set bounds without clearing itself
for line in self.lines:
line.setBounds(bounds)
def setMovable(self, m=True):
"""Set lines to be movable by the user, or not. If lines are movable, they will
also accept HoverEvents."""
for line in self.lines:
line.setMovable(m)
self.movable = m
self.setAcceptHoverEvents(m)
def setSpan(self, mn, mx):
if self.span == (mn, mx):
return
self.span = (mn, mx)
for line in self.lines:
line.setSpan(mn, mx)
self.update()
def setClipItem(self, item=None):
"""Set an item to which the region is bounded.
If ``None``, bounds are disabled.
"""
self.clipItem = item
self._clipItemBoundsCache = None
if item is None:
self._setBounds((None, None))
if item is not None:
self._updateClipItemBounds()
def _updateClipItemBounds(self):
# set region bounds corresponding to clipItem
item_vb = self.clipItem.getViewBox()
if item_vb is None:
return
item_bounds = item_vb.childrenBounds(items=(self.clipItem,))
if item_bounds == self._clipItemBoundsCache or None in item_bounds:
return
self._clipItemBoundsCache = item_bounds
if self.orientation in ('horizontal', LinearRegionItem.Horizontal):
self._setBounds(item_bounds[1])
else:
self._setBounds(item_bounds[0])
def boundingRect(self):
br = QtCore.QRectF(self.viewRect()) # bounds of containing ViewBox mapped to local coords.
if self.clipItem is not None:
self._updateClipItemBounds()
rng = self.getRegion()
if self.orientation in ('vertical', LinearRegionItem.Vertical):
br.setLeft(rng[0])
br.setRight(rng[1])
length = br.height()
br.setBottom(br.top() + length * self.span[1])
br.setTop(br.top() + length * self.span[0])
else:
br.setTop(rng[0])
br.setBottom(rng[1])
length = br.width()
br.setRight(br.left() + length * self.span[1])
br.setLeft(br.left() + length * self.span[0])
br = br.normalized()
if self._boundingRectCache != br:
self._boundingRectCache = br
self.prepareGeometryChange()
return br
def paint(self, p, *args):
profiler = debug.Profiler() # noqa: profiler does prints on GC
p.setBrush(self.currentBrush)
p.setPen(fn.mkPen(None))
p.drawRect(self.boundingRect())
def dataBounds(self, axis, frac=1.0, orthoRange=None):
if axis == self._orientation_axis[self.orientation]:
return self.getRegion()
else:
return None
def lineMoved(self, i):
if self.blockLineSignal:
return
# lines swapped
if self.lines[0].value() > self.lines[1].value():
if self.swapMode == 'block':
self.lines[i].setValue(self.lines[1-i].value())
elif self.swapMode == 'push':
self.lines[1-i].setValue(self.lines[i].value())
self.prepareGeometryChange()
self.sigRegionChanged.emit(self)
@QtCore.Slot()
def _line0Moved(self):
self.lineMoved(0)
@QtCore.Slot()
def _line1Moved(self):
self.lineMoved(1)
@QtCore.Slot()
def lineMoveFinished(self):
self.sigRegionChangeFinished.emit(self)
def mouseDragEvent(self, ev):
if not self.movable or ev.button() != QtCore.Qt.MouseButton.LeftButton:
return
ev.accept()
if ev.isStart():
bdp = ev.buttonDownPos()
self.cursorOffsets = [l.pos() - bdp for l in self.lines]
self.startPositions = [l.pos() for l in self.lines]
self.moving = True
if not self.moving:
return
self.blockLineSignal = True # only want to update once
for i, l in enumerate(self.lines):
l.setPos(self.cursorOffsets[i] + ev.pos())
self.prepareGeometryChange()
self.blockLineSignal = False
if ev.isFinish():
self.moving = False
self.sigRegionChangeFinished.emit(self)
else:
self.sigRegionChanged.emit(self)
def mouseClickEvent(self, ev):
if self.moving and ev.button() == QtCore.Qt.MouseButton.RightButton:
ev.accept()
for i, l in enumerate(self.lines):
l.setPos(self.startPositions[i])
self.moving = False
self.sigRegionChanged.emit(self)
self.sigRegionChangeFinished.emit(self)
def hoverEvent(self, ev):
if self.movable and (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.MouseButton.LeftButton):
self.setMouseHover(True)
else:
self.setMouseHover(False)
def setMouseHover(self, hover):
## Inform the item that the mouse is(not) hovering over it
if self.mouseHovering == hover:
return
self.mouseHovering = hover
if hover:
self.currentBrush = self.hoverBrush
else:
self.currentBrush = self.brush
self.update()
| LinearRegionItem |
python | getsentry__sentry | src/sentry/api/endpoints/relay/register_response.py | {
"start": 976,
"end": 4053
} | class ____(Endpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.OWNERS_INGEST
authentication_classes = ()
permission_classes = ()
enforce_rate_limit = True
rate_limits = RELAY_AUTH_RATE_LIMITS
def post(self, request: Request) -> Response:
"""
Registers a Relay
`````````````````
Registers the relay with the sentry installation. If a relay boots
it will always attempt to invoke this endpoint.
"""
try:
json_data = orjson.loads(request.body)
except orjson.JSONDecodeError:
return Response({"detail": "No valid json body"}, status=status.HTTP_400_BAD_REQUEST)
serializer = RelayRegisterResponseSerializer(data=json_data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
sig = get_header_relay_signature(request)
if not sig:
return Response(
{"detail": "Missing relay signature"}, status=status.HTTP_400_BAD_REQUEST
)
secret = options.get("system.secret-key")
try:
validated = validate_register_response(request.body, sig, secret)
except UnpackErrorSignatureExpired:
return Response({"detail": "Challenge expired"}, status=status.HTTP_401_UNAUTHORIZED)
except Exception as exc:
return Response(
{"detail": str(exc).splitlines()[0]}, status=status.HTTP_400_BAD_REQUEST
)
relay_id = str(validated["relay_id"])
version = str(validated["version"])
public_key = validated["public_key"]
if relay_id != get_header_relay_id(request):
return Response(
{"detail": "relay_id in payload did not match header"},
status=status.HTTP_400_BAD_REQUEST,
)
relay, static = relay_from_id(request, relay_id)
if not static:
is_internal = is_internal_relay(request, public_key)
if relay is None:
relay = Relay.objects.create(
relay_id=relay_id, public_key=public_key, is_internal=is_internal
)
elif relay.is_internal != is_internal:
# update the internal flag in case it is changed
relay.is_internal = is_internal
relay.save()
# only update usage for non static relays (static relays should not access the db)
try:
relay_usage = RelayUsage.objects.get(relay_id=relay_id, version=version)
except RelayUsage.DoesNotExist:
RelayUsage.objects.create(relay_id=relay_id, version=version, public_key=public_key)
else:
relay_usage.last_seen = timezone.now()
relay_usage.public_key = public_key
relay_usage.save()
assert relay is not None
return Response(serialize({"relay_id": relay.relay_id}))
| RelayRegisterResponseEndpoint |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_chisquare_simple_test_p_value_to_be_greater_than.py | {
"start": 2172,
"end": 5008
} | class ____(BatchExpectation):
"""Expect the chi-squared of 2 columns to have a p-value greater than the provided threshold."""
examples = [
{
"data": {"x": [30, 45, 25, 20], "y": [40, 40, 20, 20]},
"only_for": ["pandas"],
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"observed_column": "x",
"expected_column": "y",
"p_value_threshold": 0.1,
},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"observed_column": "x",
"expected_column": "y",
"p_value_threshold": 0.5,
},
"out": {"success": False},
},
],
}
]
# This is a tuple consisting of all Metrics necessary to evaluate the Expectation.
metric_dependencies = ("column.p_value_greater_than_threshold",)
# This a tuple of parameter names that can affect whether the Expectation evaluates to True or False.
success_keys = (
"p_value_threshold",
"observed_column",
"expected_column",
)
# This dictionary contains default values for any parameters that should have default values.
default_kwarg_values = {}
# This method performs a validation of your metrics against your success keys, returning a dict indicating the success or failure of the Expectation.
def _validate(
self,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
threshold = self.configuration["kwargs"].get("p_value_threshold")
_chi2, p_value = metrics.get("column.p_value_greater_than_threshold")
success = p_value >= threshold
return {"success": success, "result": {"observed_value": p_value}}
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": [
"statistical",
"test",
"testing",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@HaebichanGX", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
ExpectColumnChisquareSimpleTestPValueToBeGreaterThan().print_diagnostic_checklist()
| ExpectColumnChisquareSimpleTestPValueToBeGreaterThan |
python | pytorch__pytorch | test/dynamo/test_torchrec.py | {
"start": 603,
"end": 2367
} | class ____(torch.nn.Module):
def __init__(self, feature_boundaries: dict[str, list[float]]):
super().__init__()
self.bucket_w = torch.nn.ParameterDict()
self.boundaries_dict = {}
for key, boundaries in feature_boundaries.items():
self.bucket_w[key] = torch.nn.Parameter(
torch.empty([len(boundaries) + 1]).fill_(1.0),
requires_grad=True,
)
buf = torch.tensor(boundaries, requires_grad=False)
self.register_buffer(
f"{key}_boundaries",
buf,
persistent=False,
)
self.boundaries_dict[key] = buf
def forward(self, features: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
weights_list = []
for key, boundaries in self.boundaries_dict.items():
jt = features[key]
bucketized = torch.bucketize(jt.weights(), boundaries)
# doesn't super matter I guess
# hashed = torch.ops.fb.index_hash(bucketized, seed=0, modulo=len(boundaries))
hashed = bucketized
weights = torch.gather(self.bucket_w[key], dim=0, index=hashed)
weights_list.append(weights)
return KeyedJaggedTensor(
keys=features.keys(),
values=features.values(),
weights=torch.cat(weights_list),
lengths=features.lengths(),
offsets=features.offsets(),
stride=features.stride(),
length_per_key=features.length_per_key(),
)
if not HAS_TORCHREC:
print("torchrec not available, skipping tests", file=sys.stderr)
TestCase = NoTest # noqa: F811
@unittest.skipIf(not HAS_TORCHREC, "these tests require torchrec")
| BucketizeMod |
python | plotly__plotly.py | plotly/graph_objs/scatterternary/_unselected.py | {
"start": 233,
"end": 3445
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary"
_path_str = "scatterternary.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatterternary.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatterternary.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterternary.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.unselected.
Textfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Unselected`
marker
:class:`plotly.graph_objects.scatterternary.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.unselected.
Textfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_parent_decorator.py | {
"start": 566,
"end": 903
} | class ____(TestC):
def __init__(self, foo, bar, baz):
_test_sink(foo)
_test_sink(bar)
_test_sink(baz)
def setup():
TestC_1(0, 0, 0)
TestC_1(0, 0, 0)
TestC_1(0, 0, 0)
TestC_2(0, 0, 0)
TestC_2(0, 0, 0)
TestC_2(0, 0, 0)
TestC_3(0, 0, 0)
TestC_3(0, 0, 0)
TestC_3(0, 0, 0)
| TestC_3 |
python | django__django | tests/admin_changelist/admin.py | {
"start": 856,
"end": 1001
} | class ____(admin.ModelAdmin):
list_filter = ["child__name"]
search_fields = ["child__name"]
list_select_related = ["child"]
| ParentAdmin |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_parser.py | {
"start": 3077,
"end": 89636
} | class ____(BaseParser):
@property
def language(self) -> str:
return 'C++'
@property
def id_attributes(self) -> Sequence[str]:
return self.config.cpp_id_attributes
@property
def paren_attributes(self) -> Sequence[str]:
return self.config.cpp_paren_attributes
def _parse_string(self) -> str:
if self.current_char != '"':
return None
start_pos = self.pos
self.pos += 1
escape = False
while True:
if self.eof:
self.fail('Unexpected end during inside string.')
elif self.current_char == '"' and not escape:
self.pos += 1
break
elif self.current_char == '\\':
escape = True
else:
escape = False
self.pos += 1
return self.definition[start_pos : self.pos]
def _parse_literal(self) -> ASTLiteral:
# -> integer-literal
# | character-literal
# | floating-literal
# | string-literal
# | boolean-literal -> "false" | "true"
# | pointer-literal -> "nullptr"
# | user-defined-literal
def _udl(literal: ASTLiteral) -> ASTLiteral:
if not self.match(udl_identifier_re):
return literal
# hmm, should we care if it's a keyword?
# it looks like GCC does not disallow keywords
ident = ASTIdentifier(self.matched_text)
return ASTUserDefinedLiteral(literal, ident)
self.skip_ws()
if self.skip_word('nullptr'):
return ASTPointerLiteral()
if self.skip_word('true'):
return ASTBooleanLiteral(True)
if self.skip_word('false'):
return ASTBooleanLiteral(False)
pos = self.pos
if self.match(float_literal_re):
has_suffix = self.match(float_literal_suffix_re)
float_lit = ASTNumberLiteral(self.definition[pos : self.pos])
if has_suffix:
return float_lit
else:
return _udl(float_lit)
for regex in (
binary_literal_re,
hex_literal_re,
integer_literal_re,
octal_literal_re,
):
if self.match(regex):
has_suffix = self.match(integers_literal_suffix_re)
int_lit = ASTNumberLiteral(self.definition[pos : self.pos])
if has_suffix:
return int_lit
else:
return _udl(int_lit)
string = self._parse_string()
if string is not None:
return _udl(ASTStringLiteral(string))
# character-literal
if self.match(char_literal_re):
prefix = self.last_match.group(1) # may be None when no prefix
data = self.last_match.group(2)
try:
char_lit = ASTCharLiteral(prefix, data)
except UnicodeDecodeError as e:
self.fail(
'Can not handle character literal. Internal error was: %s' % e
)
except UnsupportedMultiCharacterCharLiteral:
self.fail(
'Can not handle character literal'
' resulting in multiple decoded characters.'
)
return _udl(char_lit)
return None
def _parse_fold_or_paren_expression(self) -> ASTExpression | None:
# "(" expression ")"
# fold-expression
# -> ( cast-expression fold-operator ... )
# | ( ... fold-operator cast-expression )
# | ( cast-expression fold-operator ... fold-operator cast-expression
if self.current_char != '(':
return None
self.pos += 1
self.skip_ws()
if self.skip_string_and_ws('...'):
# ( ... fold-operator cast-expression )
if not self.match(_fold_operator_re):
self.fail("Expected fold operator after '...' in fold expression.")
op = self.matched_text
right_expr = self._parse_cast_expression()
if not self.skip_string(')'):
self.fail("Expected ')' in end of fold expression.")
return ASTFoldExpr(None, op, right_expr)
# try first parsing a unary right fold, or a binary fold
pos = self.pos
try:
self.skip_ws()
left_expr = self._parse_cast_expression()
self.skip_ws()
if not self.match(_fold_operator_re):
self.fail(
'Expected fold operator after left expression in fold expression.'
)
op = self.matched_text
self.skip_ws()
if not self.skip_string_and_ws('...'):
self.fail("Expected '...' after fold operator in fold expression.")
except DefinitionError as e_fold:
self.pos = pos
# fall back to a paren expression
try:
res = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' in end of parenthesized expression.")
except DefinitionError as e_expr:
raise self._make_multi_error(
[
(e_fold, 'If fold expression'),
(e_expr, 'If parenthesized expression'),
],
'Error in fold expression or parenthesized expression.',
) from e_expr
return ASTParenExpr(res)
# now it definitely is a fold expression
if self.skip_string(')'):
return ASTFoldExpr(left_expr, op, None)
if not self.match(_fold_operator_re):
self.fail("Expected fold operator or ')' after '...' in fold expression.")
if op != self.matched_text:
self.fail(
"Operators are different in binary fold: '%s' and '%s'."
% (op, self.matched_text)
)
right_expr = self._parse_cast_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' to end binary fold expression.")
return ASTFoldExpr(left_expr, op, right_expr)
def _parse_primary_expression(self) -> ASTExpression:
# literal
# "this"
# lambda-expression
# "(" expression ")"
# fold-expression
# id-expression -> we parse this with _parse_nested_name
self.skip_ws()
res: ASTExpression = self._parse_literal()
if res is not None:
return res
self.skip_ws()
if self.skip_word('this'):
return ASTThisLiteral()
# TODO: try lambda expression
res = self._parse_fold_or_paren_expression()
if res is not None:
return res
nn = self._parse_nested_name()
if nn is not None:
return ASTIdExpression(nn)
return None
def _parse_initializer_list(
self, name: str, open: str, close: str
) -> tuple[list[ASTExpression | ASTBracedInitList], bool]:
# Parse open and close with the actual initializer-list in between
# -> initializer-clause '...'[opt]
# | initializer-list ',' initializer-clause '...'[opt]
self.skip_ws()
if not self.skip_string_and_ws(open):
return None, None
if self.skip_string(close):
return [], False
exprs: list[ASTExpression | ASTBracedInitList] = []
trailing_comma = False
while True:
self.skip_ws()
expr = self._parse_initializer_clause()
self.skip_ws()
if self.skip_string('...'):
exprs.append(ASTPackExpansionExpr(expr))
else:
exprs.append(expr)
self.skip_ws()
if self.skip_string(close):
break
if not self.skip_string_and_ws(','):
self.fail(f"Error in {name}, expected ',' or '{close}'.")
if self.current_char == close == '}':
self.pos += 1
trailing_comma = True
break
return exprs, trailing_comma
def _parse_paren_expression_list(self) -> ASTParenExprList:
# -> '(' expression-list ')'
# though, we relax it to also allow empty parens
# as it's needed in some cases
#
# expression-list
# -> initializer-list
exprs, _trailing_comma = self._parse_initializer_list(
'parenthesized expression-list', '(', ')'
)
if exprs is None:
return None
return ASTParenExprList(exprs)
def _parse_initializer_clause(self) -> ASTExpression | ASTBracedInitList:
braced_init_list = self._parse_braced_init_list()
if braced_init_list is not None:
return braced_init_list
return self._parse_assignment_expression(in_template=False)
def _parse_braced_init_list(self) -> ASTBracedInitList:
# -> '{' initializer-list ','[opt] '}'
# | '{' '}'
exprs, trailing_comma = self._parse_initializer_list(
'braced-init-list', '{', '}'
)
if exprs is None:
return None
return ASTBracedInitList(exprs, trailing_comma)
def _parse_expression_list_or_braced_init_list(
self,
) -> ASTParenExprList | ASTBracedInitList:
paren = self._parse_paren_expression_list()
if paren is not None:
return paren
return self._parse_braced_init_list()
def _parse_postfix_expression(self) -> ASTPostfixExpr:
# -> primary
# | postfix "[" expression "]"
# | postfix "[" braced-init-list [opt] "]"
# | postfix "(" expression-list [opt] ")"
# | postfix "." "template" [opt] id-expression
# | postfix "->" "template" [opt] id-expression
# | postfix "." pseudo-destructor-name
# | postfix "->" pseudo-destructor-name
# | postfix "++"
# | postfix "--"
# | simple-type-specifier "(" expression-list [opt] ")"
# | simple-type-specifier braced-init-list
# | typename-specifier "(" expression-list [opt] ")"
# | typename-specifier braced-init-list
# | "dynamic_cast" "<" type-id ">" "(" expression ")"
# | "static_cast" "<" type-id ">" "(" expression ")"
# | "reinterpret_cast" "<" type-id ">" "(" expression ")"
# | "const_cast" "<" type-id ">" "(" expression ")"
# | "typeid" "(" expression ")"
# | "typeid" "(" type-id ")"
prefix_type = None
prefix: Any = None
self.skip_ws()
cast = None
for c in _id_explicit_cast:
if self.skip_word_and_ws(c):
cast = c
break
if cast is not None:
prefix_type = 'cast'
if not self.skip_string('<'):
self.fail("Expected '<' after '%s'." % cast)
typ = self._parse_type(False)
self.skip_ws()
if not self.skip_string_and_ws('>'):
self.fail("Expected '>' after type in '%s'." % cast)
if not self.skip_string('('):
self.fail("Expected '(' in '%s'." % cast)
parser = self._parse_expression
expr = self._parse_expression_fallback([')'], parser)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' to end '%s'." % cast)
prefix = ASTExplicitCast(cast, typ, expr)
elif self.skip_word_and_ws('typeid'):
prefix_type = 'typeid'
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after 'typeid'.")
pos = self.pos
try:
typ = self._parse_type(False)
prefix = ASTTypeId(typ, isType=True)
if not self.skip_string(')'):
self.fail("Expected ')' to end 'typeid' of type.")
except DefinitionError as e_type:
self.pos = pos
try:
parser = self._parse_expression
expr = self._parse_expression_fallback([')'], parser)
prefix = ASTTypeId(expr, isType=False)
if not self.skip_string(')'):
self.fail("Expected ')' to end 'typeid' of expression.")
except DefinitionError as e_expr:
self.pos = pos
header = "Error in 'typeid(...)'."
header += ' Expected type or expression.'
errors = [
(e_type, 'If type'),
(e_expr, 'If expression'),
]
raise self._make_multi_error(errors, header) from e_expr
else: # a primary expression or a type
pos = self.pos
try:
prefix = self._parse_primary_expression()
prefix_type = 'expr'
except DefinitionError as e_outer:
self.pos = pos
try:
# we are potentially casting, so save parens for us
# TODO: hmm, would we need to try both with operatorCast and with None?
prefix = self._parse_type(False, 'operatorCast')
prefix_type = 'typeOperatorCast'
# | simple-type-specifier "(" expression-list [opt] ")"
# | simple-type-specifier braced-init-list
# | typename-specifier "(" expression-list [opt] ")"
# | typename-specifier braced-init-list
self.skip_ws()
if self.current_char not in {'(', '{'}:
self.fail("Expecting '(' or '{' after type in cast expression.")
except DefinitionError as e_inner:
self.pos = pos
header = 'Error in postfix expression,'
header += ' expected primary expression or type.'
errors = [
(e_outer, 'If primary expression'),
(e_inner, 'If type'),
]
raise self._make_multi_error(errors, header) from e_inner
# and now parse postfixes
post_fixes: list[ASTPostfixOp] = []
while True:
self.skip_ws()
if prefix_type in {'expr', 'cast', 'typeid'}:
if self.skip_string_and_ws('['):
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(']'):
self.fail("Expected ']' in end of postfix expression.")
post_fixes.append(ASTPostfixArray(expr))
continue
if self.skip_string('.'):
if self.skip_string('*'):
# don't steal the dot
self.pos -= 2
elif self.skip_string('..'):
# don't steal the dot
self.pos -= 3
else:
name = self._parse_nested_name()
post_fixes.append(ASTPostfixMember(name))
continue
if self.skip_string('->'):
if self.skip_string('*'):
# don't steal the arrow
self.pos -= 3
else:
name = self._parse_nested_name()
post_fixes.append(ASTPostfixMemberOfPointer(name))
continue
if self.skip_string('++'):
post_fixes.append(ASTPostfixInc())
continue
if self.skip_string('--'):
post_fixes.append(ASTPostfixDec())
continue
lst = self._parse_expression_list_or_braced_init_list()
if lst is not None:
post_fixes.append(ASTPostfixCallExpr(lst))
continue
break
return ASTPostfixExpr(prefix, post_fixes)
def _parse_unary_expression(self) -> ASTExpression:
# -> postfix
# | "++" cast
# | "--" cast
# | unary-operator cast -> (* | & | + | - | ! | ~) cast
# The rest:
# | "sizeof" unary
# | "sizeof" "(" type-id ")"
# | "sizeof" "..." "(" identifier ")"
# | "alignof" "(" type-id ")"
# | noexcept-expression -> noexcept "(" expression ")"
# | new-expression
# | delete-expression
self.skip_ws()
for op in _expression_unary_ops:
# TODO: hmm, should we be able to backtrack here?
if op[0] in 'cn':
res = self.skip_word(op)
else:
res = self.skip_string(op)
if res:
expr = self._parse_cast_expression()
return ASTUnaryOpExpr(op, expr)
if self.skip_word_and_ws('sizeof'):
if self.skip_string_and_ws('...'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'sizeof...'.")
if not self.match(identifier_re):
self.fail("Expecting identifier for 'sizeof...'.")
ident = ASTIdentifier(self.matched_text)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'sizeof...'.")
return ASTSizeofParamPack(ident)
if self.skip_string_and_ws('('):
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'sizeof'.")
return ASTSizeofType(typ)
expr = self._parse_unary_expression()
return ASTSizeofExpr(expr)
if self.skip_word_and_ws('alignof'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'alignof'.")
typ = self._parse_type(named=False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'alignof'.")
return ASTAlignofExpr(typ)
if self.skip_word_and_ws('noexcept'):
if not self.skip_string_and_ws('('):
self.fail("Expecting '(' after 'noexcept'.")
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'noexcept'.")
return ASTNoexceptExpr(expr)
# new-expression
pos = self.pos
rooted = self.skip_string('::')
self.skip_ws()
if not self.skip_word_and_ws('new'):
self.pos = pos
else:
# new-placement[opt] new-type-id new-initializer[opt]
# new-placement[opt] ( type-id ) new-initializer[opt]
is_new_type_id = True
if self.skip_string_and_ws('('):
# either this is a new-placement or it's the second production
# without placement, and it's actually the ( type-id ) part
self.fail(
'Sorry, neither new-placement nor parenthesised type-id '
'in new-epression is supported yet.'
)
# set is_new_type_id = False if it's (type-id)
if is_new_type_id:
decl_specs = self._parse_decl_specs(outer=None)
decl = self._parse_declarator(named=False, param_mode='new')
else:
self.fail(
'Sorry, parenthesised type-id in new expression not yet supported.'
)
lst = self._parse_expression_list_or_braced_init_list()
return ASTNewExpr(rooted, is_new_type_id, ASTType(decl_specs, decl), lst)
# delete-expression
pos = self.pos
rooted = self.skip_string('::')
self.skip_ws()
if not self.skip_word_and_ws('delete'):
self.pos = pos
else:
array = self.skip_string_and_ws('[')
if array and not self.skip_string_and_ws(']'):
self.fail("Expected ']' in array delete-expression.")
expr = self._parse_cast_expression()
return ASTDeleteExpr(rooted, array, expr)
return self._parse_postfix_expression()
def _parse_cast_expression(self) -> ASTExpression:
# -> unary | "(" type-id ")" cast
pos = self.pos
self.skip_ws()
if self.skip_string('('):
try:
typ = self._parse_type(False)
if not self.skip_string(')'):
self.fail("Expected ')' in cast expression.")
expr = self._parse_cast_expression()
return ASTCastExpr(typ, expr)
except DefinitionError as ex_cast:
self.pos = pos
try:
return self._parse_unary_expression()
except DefinitionError as ex_unary:
errs = [
(ex_cast, 'If type cast expression'),
(ex_unary, 'If unary expression'),
]
raise self._make_multi_error(
errs, 'Error in cast expression.'
) from ex_unary
else:
return self._parse_unary_expression()
def _parse_logical_or_expression(self, in_template: bool) -> ASTExpression:
# logical-or = logical-and ||
# logical-and = inclusive-or &&
# inclusive-or = exclusive-or |
# exclusive-or = and ^
# and = equality &
# equality = relational ==, !=
# relational = shift <, >, <=, >=, <=>
# shift = additive <<, >>
# additive = multiplicative +, -
# multiplicative = pm *, /, %
# pm = cast .*, ->*
def _parse_bin_op_expr(
self: DefinitionParser, op_id: int, in_template: bool
) -> ASTExpression:
if op_id + 1 == len(_expression_bin_ops):
def parser(in_template: bool) -> ASTExpression:
return self._parse_cast_expression()
else:
def parser(in_template: bool) -> ASTExpression:
return _parse_bin_op_expr(self, op_id + 1, in_template=in_template)
exprs = []
ops = []
exprs.append(parser(in_template=in_template))
while True:
self.skip_ws()
if in_template and self.current_char == '>':
break
pos = self.pos
one_more = False
for op in _expression_bin_ops[op_id]:
if op[0] in 'abcnox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
if op == self.current_char == '&':
# don't split the && 'token'
self.pos -= 1
# and btw. && has lower precedence, so we are done
break
try:
expr = parser(in_template=in_template)
exprs.append(expr)
ops.append(op)
one_more = True
break
except DefinitionError:
self.pos = pos
if not one_more:
break
return ASTBinOpExpr(exprs, ops)
return _parse_bin_op_expr(self, 0, in_template=in_template)
def _parse_conditional_expression_tail(
self, or_expr_head: ASTExpression, in_template: bool
) -> ASTConditionalExpr | None:
# Consumes the or_expr_head on success.
# -> "?" expression ":" assignment-expression
self.skip_ws()
if not self.skip_string('?'):
return None
then_expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(':'):
self.fail('Expected ":" after then-expression in conditional expression.')
else_expr = self._parse_assignment_expression(in_template)
return ASTConditionalExpr(or_expr_head, then_expr, else_expr)
def _parse_assignment_expression(self, in_template: bool) -> ASTExpression:
# -> conditional-expression
# | logical-or-expression assignment-operator initializer-clause
# | yield-expression -> "co_yield" assignment-expression
# | "co_yield" braced-init-list
# | throw-expression -> "throw" assignment-expression[opt]
# TODO: yield-expression
# TODO: throw-expression
# Now we have (after expanding conditional-expression:
# logical-or-expression
# | logical-or-expression "?" expression ":" assignment-expression
# | logical-or-expression assignment-operator initializer-clause
left_expr = self._parse_logical_or_expression(in_template=in_template)
# the ternary operator
cond_expr = self._parse_conditional_expression_tail(left_expr, in_template)
if cond_expr is not None:
return cond_expr
# and actual assignment
for op in _expression_assignment_ops:
if op[0] in 'anox':
if not self.skip_word(op):
continue
else:
if not self.skip_string(op):
continue
right_expr = self._parse_initializer_clause()
return ASTAssignmentExpr(left_expr, op, right_expr)
# just a logical-or-expression
return left_expr
def _parse_constant_expression(self, in_template: bool) -> ASTExpression:
# -> conditional-expression ->
# logical-or-expression
# | logical-or-expression "?" expression ":" assignment-expression
or_expr = self._parse_logical_or_expression(in_template=in_template)
cond_expr = self._parse_conditional_expression_tail(or_expr, in_template)
if cond_expr is not None:
return cond_expr
return or_expr
def _parse_expression(self) -> ASTExpression:
# -> assignment-expression
# | expression "," assignment-expression
exprs = [self._parse_assignment_expression(in_template=False)]
while True:
self.skip_ws()
if not self.skip_string(','):
break
exprs.append(self._parse_assignment_expression(in_template=False))
if len(exprs) == 1:
return exprs[0]
else:
return ASTCommaExpr(exprs)
def _parse_expression_fallback(
self, end: list[str], parser: Callable[[], ASTExpression], allow: bool = True
) -> ASTExpression:
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
# first try to use the provided parser
prev_pos = self.pos
try:
return parser()
except DefinitionError as e:
# some places (e.g., template parameters) we really don't want to use fallback,
# and for testing we may want to globally disable it
if not allow or not self.allowFallbackExpressionParsing:
raise
self.warn(
'Parsing of expression failed. Using fallback parser.'
' Error was:\n%s' % e
)
self.pos = prev_pos
# and then the fallback scanning
assert end is not None
self.skip_ws()
start_pos = self.pos
if self.match(_string_re):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
brackets = {'(': ')', '{': '}', '[': ']', '<': '>'}
symbols: list[str] = []
while not self.eof:
if len(symbols) == 0 and self.current_char in end:
break
if self.current_char in brackets:
symbols.append(brackets[self.current_char])
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
self.pos += 1
if len(end) > 0 and self.eof:
self.fail(
'Could not find end of expression starting at %d.' % start_pos
)
value = self.definition[start_pos : self.pos].strip()
return ASTFallbackExpr(value.strip())
# ==========================================================================
def _parse_operator(self) -> ASTOperator:
self.skip_ws()
# adapted from the old code
# yay, a regular operator definition
if self.match(_operator_re):
return ASTOperatorBuildIn(self.matched_text)
# new/delete operator?
for op in 'new', 'delete':
if not self.skip_word(op):
continue
self.skip_ws()
if self.skip_string('['):
self.skip_ws()
if not self.skip_string(']'):
self.fail('Expected "]" after "operator ' + op + '["')
op += '[]'
return ASTOperatorBuildIn(op)
# user-defined literal?
if self.skip_string('""'):
self.skip_ws()
if not self.match(identifier_re):
self.fail('Expected user-defined literal suffix.')
identifier = ASTIdentifier(self.matched_text)
return ASTOperatorLiteral(identifier)
# oh well, looks like a cast operator definition.
# In that case, eat another type.
type = self._parse_type(named=False, outer='operatorCast')
return ASTOperatorType(type)
def _parse_template_argument_list(self) -> ASTTemplateArgs:
# template-argument-list: (but we include the < and > here
# template-argument ...[opt]
# template-argument-list, template-argument ...[opt]
# template-argument:
# constant-expression
# type-id
# id-expression
self.skip_ws()
if not self.skip_string_and_ws('<'):
return None
if self.skip_string('>'):
return ASTTemplateArgs([], False)
prev_errors = []
template_args: list[ASTType | ASTTemplateArgConstant] = []
pack_expansion = False
while 1:
pos = self.pos
parsed_comma = False
parsed_end = False
try:
type = self._parse_type(named=False)
self.skip_ws()
if self.skip_string_and_ws('...'):
pack_expansion = True
parsed_end = True
if not self.skip_string('>'):
self.fail('Expected ">" after "..." in template argument list.')
elif self.skip_string('>'):
parsed_end = True
elif self.skip_string(','):
parsed_comma = True
else:
self.fail('Expected "...>", ">" or "," in template argument list.')
template_args.append(type)
except DefinitionError as e:
prev_errors.append((e, 'If type argument'))
self.pos = pos
try:
value = self._parse_constant_expression(in_template=True)
self.skip_ws()
if self.skip_string_and_ws('...'):
pack_expansion = True
parsed_end = True
if not self.skip_string('>'):
self.fail(
'Expected ">" after "..." in template argument list.'
)
elif self.skip_string('>'):
parsed_end = True
elif self.skip_string(','):
parsed_comma = True
else:
self.fail(
'Expected "...>", ">" or "," in template argument list.'
)
template_args.append(ASTTemplateArgConstant(value))
except DefinitionError as e:
self.pos = pos
prev_errors.append((e, 'If non-type argument'))
header = 'Error in parsing template argument list.'
raise self._make_multi_error(prev_errors, header) from e
if parsed_end:
assert not parsed_comma
break
assert not pack_expansion
return ASTTemplateArgs(template_args, pack_expansion)
def _parse_nested_name(self, member_pointer: bool = False) -> ASTNestedName:
names: list[ASTNestedNameElement] = []
templates: list[bool] = []
self.skip_ws()
rooted = False
if self.skip_string('::'):
rooted = True
while 1:
self.skip_ws()
if len(names) > 0:
template = self.skip_word_and_ws('template')
else:
template = False
templates.append(template)
ident_or_op: ASTIdentifier | ASTOperator | None = None
if self.skip_word_and_ws('operator'):
ident_or_op = self._parse_operator()
else:
if not self.match(identifier_re):
if member_pointer and len(names) > 0:
templates.pop()
break
self.fail('Expected identifier in nested name.')
identifier = self.matched_text
# make sure there isn't a keyword
if identifier in _keywords:
self.fail(
'Expected identifier in nested name, '
'got keyword: %s' % identifier
)
ident_or_op = ASTIdentifier(identifier)
# try greedily to get template arguments,
# but otherwise a < might be because we are in an expression
pos = self.pos
try:
template_args = self._parse_template_argument_list()
except DefinitionError as ex:
self.pos = pos
template_args = None
self.otherErrors.append(ex)
names.append(ASTNestedNameElement(ident_or_op, template_args))
self.skip_ws()
if not self.skip_string('::'):
if member_pointer:
self.fail("Expected '::' in pointer to member (function).")
break
return ASTNestedName(names, templates, rooted)
# ==========================================================================
def _parse_simple_type_specifiers(self) -> ASTTrailingTypeSpecFundamental:
modifier: str | None = None
signedness: str | None = None
width: list[str] = []
typ: str | None = None
names: list[str] = [] # the parsed sequence
self.skip_ws()
while self.match(_simple_type_specifiers_re):
t = self.matched_text
names.append(t)
if t in {
'auto', 'void', 'bool',
'char', 'wchar_t', 'char8_t', 'char16_t', 'char32_t',
'int', '__int64', '__int128',
'float', 'double',
'__float80', '_Float64x', '__float128', '_Float128',
}: # fmt: skip
if typ is not None:
self.fail(f'Can not have both {t} and {typ}.')
typ = t
elif t in {'signed', 'unsigned'}:
if signedness is not None:
self.fail(f'Can not have both {t} and {signedness}.')
signedness = t
elif t == 'short':
if len(width) != 0:
self.fail(f'Can not have both {t} and {width[0]}.')
width.append(t)
elif t == 'long':
if len(width) != 0 and width[0] != 'long':
self.fail(f'Can not have both {t} and {width[0]}.')
width.append(t)
elif t in {'_Imaginary', '_Complex'}:
if modifier is not None:
self.fail(f'Can not have both {t} and {modifier}.')
modifier = t
self.skip_ws()
if len(names) == 0:
return None
if typ in {
'auto', 'void', 'bool',
'wchar_t', 'char8_t', 'char16_t', 'char32_t',
'__float80', '_Float64x', '__float128', '_Float128',
}: # fmt: skip
if modifier is not None:
self.fail(f'Can not have both {typ} and {modifier}.')
if signedness is not None:
self.fail(f'Can not have both {typ} and {signedness}.')
if len(width) != 0:
self.fail(f'Can not have both {typ} and {" ".join(width)}.')
elif typ == 'char':
if modifier is not None:
self.fail(f'Can not have both {typ} and {modifier}.')
if len(width) != 0:
self.fail(f'Can not have both {typ} and {" ".join(width)}.')
elif typ == 'int':
if modifier is not None:
self.fail(f'Can not have both {typ} and {modifier}.')
elif typ in {'__int64', '__int128'}:
if modifier is not None:
self.fail(f'Can not have both {typ} and {modifier}.')
if len(width) != 0:
self.fail(f'Can not have both {typ} and {" ".join(width)}.')
elif typ == 'float':
if signedness is not None:
self.fail(f'Can not have both {typ} and {signedness}.')
if len(width) != 0:
self.fail(f'Can not have both {typ} and {" ".join(width)}.')
elif typ == 'double':
if signedness is not None:
self.fail(f'Can not have both {typ} and {signedness}.')
if len(width) > 1:
self.fail(f'Can not have both {typ} and {" ".join(width)}.')
if len(width) == 1 and width[0] != 'long':
self.fail(f'Can not have both {typ} and {" ".join(width)}.')
elif typ is None:
if modifier is not None:
self.fail(f'Can not have {modifier} without a floating point type.')
else:
msg = f'Unhandled type {typ}'
raise AssertionError(msg)
canon_names: list[str] = []
if modifier is not None:
canon_names.append(modifier)
if signedness is not None:
canon_names.append(signedness)
canon_names.extend(width)
if typ is not None:
canon_names.append(typ)
return ASTTrailingTypeSpecFundamental(names, canon_names)
def _parse_trailing_type_spec(self) -> ASTTrailingTypeSpec:
# fundamental types, https://en.cppreference.com/w/cpp/language/type
# and extensions
self.skip_ws()
res = self._parse_simple_type_specifiers()
if res is not None:
return res
# decltype
self.skip_ws()
if self.skip_word_and_ws('decltype'):
if not self.skip_string_and_ws('('):
self.fail("Expected '(' after 'decltype'.")
if self.skip_word_and_ws('auto'):
if not self.skip_string(')'):
self.fail("Expected ')' after 'decltype(auto'.")
return ASTTrailingTypeSpecDecltypeAuto()
expr = self._parse_expression()
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' after 'decltype(<expr>'.")
return ASTTrailingTypeSpecDecltype(expr)
# prefixed
prefix = None
self.skip_ws()
for k in ('class', 'struct', 'enum', 'union', 'typename'):
if self.skip_word_and_ws(k):
prefix = k
break
nested_name = self._parse_nested_name()
self.skip_ws()
placeholder_type = None
if self.skip_word('auto'):
placeholder_type = 'auto'
elif self.skip_word_and_ws('decltype'):
if not self.skip_string_and_ws('('):
self.fail(
"Expected '(' after 'decltype' in placeholder type specifier."
)
if not self.skip_word_and_ws('auto'):
self.fail(
"Expected 'auto' after 'decltype(' in placeholder type specifier."
)
if not self.skip_string_and_ws(')'):
self.fail(
"Expected ')' after 'decltype(auto' in placeholder type specifier."
)
placeholder_type = 'decltype(auto)'
return ASTTrailingTypeSpecName(prefix, nested_name, placeholder_type)
def _parse_parameters_and_qualifiers(
self, param_mode: str
) -> ASTParametersQualifiers | None:
if param_mode == 'new':
return None
self.skip_ws()
if not self.skip_string('('):
if param_mode == 'function':
self.fail('Expecting "(" in parameters-and-qualifiers.')
else:
return None
args = []
self.skip_ws()
if not self.skip_string(')'):
while 1:
self.skip_ws()
if self.skip_string('...'):
args.append(ASTFunctionParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail(
'Expected ")" after "..." in parameters-and-qualifiers.'
)
break
# note: it seems that function arguments can always be named,
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
args.append(ASTFunctionParameter(arg))
self.skip_ws()
if self.skip_string(','):
continue
if self.skip_string(')'):
break
self.fail(
'Expecting "," or ")" in parameters-and-qualifiers, '
f'got "{self.current_char}".'
)
self.skip_ws()
const = self.skip_word_and_ws('const')
volatile = self.skip_word_and_ws('volatile')
if not const: # the can be permuted
const = self.skip_word_and_ws('const')
ref_qual = None
if self.skip_string('&&'):
ref_qual = '&&'
if not ref_qual and self.skip_string('&'):
ref_qual = '&'
exception_spec = None
self.skip_ws()
if self.skip_string('noexcept'):
if self.skip_string_and_ws('('):
expr = self._parse_constant_expression(False)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expecting ')' to end 'noexcept'.")
exception_spec = ASTNoexceptSpec(expr)
else:
exception_spec = ASTNoexceptSpec(None)
self.skip_ws()
if self.skip_string('->'):
trailing_return = self._parse_type(named=False)
else:
trailing_return = None
self.skip_ws()
override = self.skip_word_and_ws('override')
final = self.skip_word_and_ws('final')
if not override:
override = self.skip_word_and_ws('override') # they can be permuted
attrs = self._parse_attribute_list()
self.skip_ws()
initializer = None
# if this is a function pointer we should not swallow an initializer
if param_mode == 'function' and self.skip_string('='):
self.skip_ws()
valid = ('0', 'delete', 'default')
for w in valid:
if self.skip_word_and_ws(w):
initializer = w
break
if not initializer:
self.fail(
'Expected "%s" in initializer-specifier.' % '" or "'.join(valid)
)
return ASTParametersQualifiers(
args,
volatile,
const,
ref_qual,
exception_spec,
trailing_return,
override,
final,
attrs,
initializer,
)
def _parse_decl_specs_simple(self, outer: str, typed: bool) -> ASTDeclSpecsSimple:
"""Just parse the simple ones."""
storage = None
thread_local = None
inline = None
virtual = None
explicit_spec = None
consteval = None
constexpr = None
constinit = None
volatile = None
const = None
friend = None
attrs = []
while 1: # accept any permutation of a subset of some decl-specs
self.skip_ws()
if not const and typed:
const = self.skip_word('const')
if const:
continue
if not volatile and typed:
volatile = self.skip_word('volatile')
if volatile:
continue
if not storage:
if outer in {'member', 'function'}:
if self.skip_word('static'):
storage = 'static'
continue
if self.skip_word('extern'):
storage = 'extern'
continue
if outer == 'member':
if self.skip_word('mutable'):
storage = 'mutable'
continue
if self.skip_word('register'):
storage = 'register'
continue
if not inline and outer in {'function', 'member'}:
inline = self.skip_word('inline')
if inline:
continue
if not constexpr and outer in {'member', 'function'}:
constexpr = self.skip_word('constexpr')
if constexpr:
continue
if outer == 'member':
if not constinit:
constinit = self.skip_word('constinit')
if constinit:
continue
if not thread_local:
thread_local = self.skip_word('thread_local')
if thread_local:
continue
if outer == 'function':
if not consteval:
consteval = self.skip_word('consteval')
if consteval:
continue
if not friend:
friend = self.skip_word('friend')
if friend:
continue
if not virtual:
virtual = self.skip_word('virtual')
if virtual:
continue
if not explicit_spec:
explicit = self.skip_word_and_ws('explicit')
if explicit:
expr: ASTExpression = None
if self.skip_string('('):
expr = self._parse_constant_expression(in_template=False)
if not expr:
self.fail(
"Expected constant expression after '('"
' in explicit specifier.'
)
self.skip_ws()
if not self.skip_string(')'):
self.fail("Expected ')' to end explicit specifier.")
explicit_spec = ASTExplicitSpec(expr)
continue
attr = self._parse_attribute()
if attr:
attrs.append(attr)
continue
break
return ASTDeclSpecsSimple(
storage,
thread_local,
inline,
virtual,
explicit_spec,
consteval,
constexpr,
constinit,
volatile,
const,
friend,
ASTAttributeList(attrs),
)
def _parse_decl_specs(self, outer: str, typed: bool = True) -> ASTDeclSpecs:
if outer:
if outer not in {'type', 'member', 'function', 'templateParam'}:
raise Exception('Internal error, unknown outer "%s".' % outer)
"""
storage-class-specifier function-specifier "constexpr"
"volatile" "const" trailing-type-specifier
storage-class-specifier ->
"static" (only for member_object and function_object)
| "register"
function-specifier -> "inline" | "virtual" | "explicit" (only for
function_object)
"constexpr" (only for member_object and function_object)
"""
left_specs = self._parse_decl_specs_simple(outer, typed)
right_specs = None
if typed:
trailing = self._parse_trailing_type_spec()
right_specs = self._parse_decl_specs_simple(outer, typed)
else:
trailing = None
return ASTDeclSpecs(outer, left_specs, right_specs, trailing)
def _parse_declarator_name_suffix(
self, named: bool | str, param_mode: str, typed: bool
) -> ASTDeclaratorNameParamQual | ASTDeclaratorNameBitField:
# now we should parse the name, and then suffixes
if named == 'maybe':
pos = self.pos
try:
decl_id = self._parse_nested_name()
except DefinitionError:
self.pos = pos
decl_id = None
elif named == 'single':
if self.match(identifier_re):
identifier = ASTIdentifier(self.matched_text)
nne = ASTNestedNameElement(identifier, None)
decl_id = ASTNestedName([nne], [False], rooted=False)
# if it's a member pointer, we may have '::', which should be an error
self.skip_ws()
if self.current_char == ':':
self.fail("Unexpected ':' after identifier.")
else:
decl_id = None
elif named:
decl_id = self._parse_nested_name()
else:
decl_id = None
array_ops = []
while 1:
self.skip_ws()
if typed and self.skip_string('['):
self.skip_ws()
if self.skip_string(']'):
array_ops.append(ASTArray(None))
continue
parser = self._parse_expression
value = self._parse_expression_fallback([']'], parser)
if not self.skip_string(']'):
self.fail("Expected ']' in end of array operator.")
array_ops.append(ASTArray(value))
continue
break
param_qual = self._parse_parameters_and_qualifiers(param_mode)
if param_qual is None and len(array_ops) == 0:
# perhaps a bit-field
if named and param_mode == 'type' and typed:
self.skip_ws()
if self.skip_string(':'):
size = self._parse_constant_expression(in_template=False)
return ASTDeclaratorNameBitField(declId=decl_id, size=size)
return ASTDeclaratorNameParamQual(
declId=decl_id, arrayOps=array_ops, paramQual=param_qual
)
def _parse_declarator(
self, named: bool | str, param_mode: str, typed: bool = True
) -> ASTDeclarator:
# 'typed' here means 'parse return type stuff'
if param_mode not in {'type', 'function', 'operatorCast', 'new'}:
raise Exception("Internal error, unknown param_mode '%s'." % param_mode)
prev_errors = []
self.skip_ws()
if typed and self.skip_string('*'):
self.skip_ws()
volatile = False
const = False
attr_list = []
while 1:
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
attr = self._parse_attribute()
if attr is not None:
attr_list.append(attr)
continue
break
next = self._parse_declarator(named, param_mode, typed)
return ASTDeclaratorPtr(
next=next,
volatile=volatile,
const=const,
attrs=ASTAttributeList(attr_list),
)
# TODO: shouldn't we parse an R-value ref here first?
if typed and self.skip_string('&'):
attrs = self._parse_attribute_list()
next = self._parse_declarator(named, param_mode, typed)
return ASTDeclaratorRef(next=next, attrs=attrs)
if typed and self.skip_string('...'):
next = self._parse_declarator(named, param_mode, False)
return ASTDeclaratorParamPack(next=next)
if typed and self.current_char == '(': # note: peeking, not skipping
if param_mode == 'operatorCast':
# TODO: we should be able to parse cast operators which return
# function pointers. For now, just hax it and ignore.
return ASTDeclaratorNameParamQual(
declId=None, arrayOps=[], paramQual=None
)
# maybe this is the beginning of params and quals,try that first,
# otherwise assume it's noptr->declarator > ( ptr-declarator )
pos = self.pos
try:
# assume this is params and quals
res = self._parse_declarator_name_suffix(named, param_mode, typed)
return res
except DefinitionError as ex_param_qual:
prev_errors.append((
ex_param_qual,
'If declarator-id with parameters-and-qualifiers',
))
self.pos = pos
try:
assert self.current_char == '('
self.skip_string('(')
# TODO: hmm, if there is a name, it must be in inner, right?
# TODO: hmm, if there must be parameters, they must be
# inside, right?
inner = self._parse_declarator(named, param_mode, typed)
if not self.skip_string(')'):
self.fail('Expected \')\' in "( ptr-declarator )"')
next = self._parse_declarator(
named=False, param_mode='type', typed=typed
)
return ASTDeclaratorParen(inner=inner, next=next)
except DefinitionError as ex_no_ptr_paren:
self.pos = pos
prev_errors.append((
ex_no_ptr_paren,
'If parenthesis in noptr-declarator',
))
header = 'Error in declarator'
raise self._make_multi_error(
prev_errors, header
) from ex_no_ptr_paren
if typed: # pointer to member
pos = self.pos
try:
name = self._parse_nested_name(member_pointer=True)
self.skip_ws()
if not self.skip_string('*'):
self.fail("Expected '*' in pointer to member declarator.")
self.skip_ws()
except DefinitionError as e:
self.pos = pos
prev_errors.append((e, 'If pointer to member declarator'))
else:
volatile = False
const = False
while 1:
if not volatile:
volatile = self.skip_word_and_ws('volatile')
if volatile:
continue
if not const:
const = self.skip_word_and_ws('const')
if const:
continue
break
next = self._parse_declarator(named, param_mode, typed)
return ASTDeclaratorMemPtr(name, const, volatile, next=next)
pos = self.pos
try:
res = self._parse_declarator_name_suffix(named, param_mode, typed)
# this is a heuristic for error messages, for when there is a < after a
# nested name, but it was not a successful template argument list
if self.current_char == '<':
self.otherErrors.append(self._make_multi_error(prev_errors, ''))
return res
except DefinitionError as e:
self.pos = pos
prev_errors.append((e, 'If declarator-id'))
header = 'Error in declarator or parameters-and-qualifiers'
raise self._make_multi_error(prev_errors, header) from e
def _parse_initializer(
self, outer: str | None = None, allow_fallback: bool = True
) -> ASTInitializer | None:
# initializer # global vars
# -> brace-or-equal-initializer
# | '(' expression-list ')'
#
# brace-or-equal-initializer # member vars
# -> '=' initializer-clause
# | braced-init-list
#
# initializer-clause # function params, non-type template params (with '=' in front)
# -> assignment-expression
# | braced-init-list
#
# we don't distinguish between global and member vars, so disallow paren:
#
# -> braced-init-list # var only
# | '=' assignment-expression
# | '=' braced-init-list
self.skip_ws()
if outer == 'member':
braced_init = self._parse_braced_init_list()
if braced_init is not None:
return ASTInitializer(braced_init, hasAssign=False)
if not self.skip_string('='):
return None
braced_init = self._parse_braced_init_list()
if braced_init is not None:
return ASTInitializer(braced_init)
if outer == 'member':
fallback_end: list[str] = []
elif outer == 'templateParam':
fallback_end = [',', '>']
elif outer is None: # function parameter
fallback_end = [',', ')']
else:
self.fail(
"Internal error, initializer for outer '%s' not implemented." % outer
)
in_template = outer == 'templateParam'
def parser() -> ASTExpression:
return self._parse_assignment_expression(in_template=in_template)
value = self._parse_expression_fallback(
fallback_end, parser, allow=allow_fallback
)
return ASTInitializer(value)
def _parse_type(self, named: bool | str, outer: str | None = None) -> ASTType:
"""named=False|'maybe'|True: 'maybe' is e.g., for function objects which
doesn't need to name the arguments
outer == operatorCast: annoying case, we should not take the params
"""
if outer: # always named
if outer not in {
'type',
'member',
'function',
'operatorCast',
'templateParam',
}:
raise Exception('Internal error, unknown outer "%s".' % outer)
if outer != 'operatorCast':
assert named
if outer in {'type', 'function'}:
# We allow type objects to just be a name.
# Some functions don't have normal return types: constructors,
# destructors, cast operators
prev_errors = []
start_pos = self.pos
# first try without the type
try:
decl_specs = self._parse_decl_specs(outer=outer, typed=False)
decl = self._parse_declarator(named=True, param_mode=outer, typed=False)
must_end = True
if outer == 'function':
# Allow trailing requires on functions.
self.skip_ws()
if re.compile(r'requires\b').match(self.definition, self.pos):
must_end = False
if must_end:
self.assert_end(allowSemicolon=True)
except DefinitionError as ex_untyped:
if outer == 'type':
desc = 'If just a name'
elif outer == 'function':
desc = 'If the function has no return type'
else:
raise AssertionError from ex_untyped
prev_errors.append((ex_untyped, desc))
self.pos = start_pos
try:
decl_specs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=True, param_mode=outer)
except DefinitionError as ex_typed:
self.pos = start_pos
if outer == 'type':
desc = 'If typedef-like declaration'
elif outer == 'function':
desc = 'If the function has a return type'
else:
raise AssertionError from ex_untyped
prev_errors.append((ex_typed, desc))
# Retain the else branch for easier debugging.
# TODO: it would be nice to save the previous stacktrace
# and output it here.
if True:
if outer == 'type':
header = 'Type must be either just a name or a '
header += 'typedef-like declaration.'
elif outer == 'function':
header = 'Error when parsing function declaration.'
else:
raise AssertionError from ex_untyped
raise self._make_multi_error(prev_errors, header) from ex_typed
else:
# For testing purposes.
# do it again to get the proper traceback (how do you
# reliably save a traceback when an exception is
# constructed?)
self.pos = start_pos
typed = True
decl_specs = self._parse_decl_specs(outer=outer, typed=typed)
decl = self._parse_declarator(
named=True, param_mode=outer, typed=typed
)
else:
param_mode = 'type'
if outer == 'member':
named = True
elif outer == 'operatorCast':
param_mode = 'operatorCast'
outer = None
elif outer == 'templateParam':
named = 'single'
decl_specs = self._parse_decl_specs(outer=outer)
decl = self._parse_declarator(named=named, param_mode=param_mode)
return ASTType(decl_specs, decl)
def _parse_type_with_init(
self, named: bool | str, outer: str
) -> ASTTypeWithInit | ASTTemplateParamConstrainedTypeWithInit:
if outer:
assert outer in {'type', 'member', 'function', 'templateParam'}
type = self._parse_type(outer=outer, named=named)
if outer != 'templateParam':
init = self._parse_initializer(outer=outer)
return ASTTypeWithInit(type, init)
# it could also be a constrained type parameter, e.g., C T = int&
pos = self.pos
e_expr = None
try:
init = self._parse_initializer(outer=outer, allow_fallback=False)
# note: init may be None if there is no =
if init is None:
return ASTTypeWithInit(type, None)
# we parsed an expression, so we must have a , or a >,
# otherwise the expression didn't get everything
self.skip_ws()
if self.current_char not in {',', '>'}:
# pretend it didn't happen
self.pos = pos
init = None
else:
# we assume that it was indeed an expression
return ASTTypeWithInit(type, init)
except DefinitionError as e:
self.pos = pos
e_expr = e
if not self.skip_string('='):
return ASTTypeWithInit(type, None)
try:
type_init = self._parse_type(named=False, outer=None)
return ASTTemplateParamConstrainedTypeWithInit(type, type_init)
except DefinitionError as e_type:
if e_expr is None:
raise
errs = [
(e_expr, 'If default template argument is an expression'),
(e_type, 'If default template argument is a type'),
]
msg = 'Error in non-type template parameter'
msg += ' or constrained template parameter.'
raise self._make_multi_error(errs, msg) from e_type
def _parse_type_using(self) -> ASTTypeUsing:
name = self._parse_nested_name()
self.skip_ws()
if not self.skip_string('='):
return ASTTypeUsing(name, None)
type = self._parse_type(False, None)
return ASTTypeUsing(name, type)
def _parse_concept(self) -> ASTConcept:
nested_name = self._parse_nested_name()
self.skip_ws()
initializer = self._parse_initializer('member')
return ASTConcept(nested_name, initializer)
def _parse_class(self) -> ASTClass:
attrs = self._parse_attribute_list()
name = self._parse_nested_name()
self.skip_ws()
final = self.skip_word_and_ws('final')
bases = []
self.skip_ws()
if self.skip_string(':'):
while 1:
self.skip_ws()
visibility = None
virtual = False
pack = False
if self.skip_word_and_ws('virtual'):
virtual = True
if self.match(_visibility_re):
visibility = self.matched_text
self.skip_ws()
if not virtual and self.skip_word_and_ws('virtual'):
virtual = True
base_name = self._parse_nested_name()
self.skip_ws()
pack = self.skip_string('...')
bases.append(ASTBaseClass(base_name, visibility, virtual, pack))
self.skip_ws()
if self.skip_string(','):
continue
break
return ASTClass(name, final, bases, attrs)
def _parse_union(self) -> ASTUnion:
attrs = self._parse_attribute_list()
name = self._parse_nested_name()
return ASTUnion(name, attrs)
def _parse_enum(self) -> ASTEnum:
scoped = None # is set by CPPEnumObject
attrs = self._parse_attribute_list()
name = self._parse_nested_name()
self.skip_ws()
underlying_type = None
if self.skip_string(':'):
underlying_type = self._parse_type(named=False)
return ASTEnum(name, scoped, underlying_type, attrs)
def _parse_enumerator(self) -> ASTEnumerator:
name = self._parse_nested_name()
attrs = self._parse_attribute_list()
self.skip_ws()
init = None
if self.skip_string('='):
self.skip_ws()
def parser() -> ASTExpression:
return self._parse_constant_expression(in_template=False)
init_val = self._parse_expression_fallback([], parser)
init = ASTInitializer(init_val)
return ASTEnumerator(name, init, attrs)
# ==========================================================================
def _parse_template_parameter(self) -> ASTTemplateParam:
self.skip_ws()
if self.skip_word('template'):
# declare a template template parameter
nested_params = self._parse_template_parameter_list()
else:
nested_params = None
pos = self.pos
try:
# Unconstrained type parameter or template type parameter
key = None
self.skip_ws()
if self.skip_word_and_ws('typename'):
key = 'typename'
elif self.skip_word_and_ws('class'):
key = 'class'
elif nested_params:
self.fail(
"Expected 'typename' or 'class' after "
'template template parameter list.'
)
else:
self.fail(
"Expected 'typename' or 'class' in the "
'beginning of template type parameter.'
)
self.skip_ws()
parameter_pack = self.skip_string('...')
self.skip_ws()
if self.match(identifier_re):
identifier = ASTIdentifier(self.matched_text)
else:
identifier = None
self.skip_ws()
if not parameter_pack and self.skip_string('='):
default = self._parse_type(named=False, outer=None)
else:
default = None
if self.current_char not in ',>':
self.fail('Expected "," or ">" after (template) type parameter.')
data = ASTTemplateKeyParamPackIdDefault(
key, identifier, parameter_pack, default
)
if nested_params:
return ASTTemplateParamTemplateType(nested_params, data)
else:
return ASTTemplateParamType(data)
except DefinitionError as e_type:
if nested_params:
raise
try:
# non-type parameter or constrained type parameter
self.pos = pos
param = self._parse_type_with_init('maybe', 'templateParam')
self.skip_ws()
parameter_pack = self.skip_string('...')
return ASTTemplateParamNonType(param, parameter_pack)
except DefinitionError as e_non_type:
self.pos = pos
header = 'Error when parsing template parameter.'
errs = [
(
e_type,
'If unconstrained type parameter or template type parameter',
),
(
e_non_type,
'If constrained type parameter or non-type parameter',
),
]
raise self._make_multi_error(errs, header) from None
def _parse_template_parameter_list(self) -> ASTTemplateParams:
# only: '<' parameter-list '>'
# we assume that 'template' has just been parsed
template_params: list[ASTTemplateParam] = []
self.skip_ws()
if not self.skip_string('<'):
self.fail("Expected '<' after 'template'")
while True:
pos = self.pos
err = None
try:
param = self._parse_template_parameter()
template_params.append(param)
except DefinitionError as e_param:
self.pos = pos
err = e_param
self.skip_ws()
if self.skip_string('>'):
requires_clause = self._parse_requires_clause()
return ASTTemplateParams(template_params, requires_clause)
elif self.skip_string(','):
continue
else:
header = 'Error in template parameter list.'
errs = []
if err:
errs.append((err, 'If parameter'))
try:
self.fail('Expected "," or ">".')
except DefinitionError as e:
errs.append((e, 'If no parameter'))
logger.debug(errs)
raise self._make_multi_error(errs, header)
def _parse_template_introduction(self) -> ASTTemplateIntroduction | None:
pos = self.pos
try:
concept = self._parse_nested_name()
except Exception:
self.pos = pos
return None
self.skip_ws()
if not self.skip_string('{'):
self.pos = pos
return None
# for sure it must be a template introduction now
params = []
while 1:
self.skip_ws()
parameter_pack = self.skip_string('...')
self.skip_ws()
if not self.match(identifier_re):
self.fail('Expected identifier in template introduction list.')
txt_identifier = self.matched_text
# make sure there isn't a keyword
if txt_identifier in _keywords:
self.fail(
'Expected identifier in template introduction list, '
'got keyword: %s' % txt_identifier
)
identifier = ASTIdentifier(txt_identifier)
params.append(ASTTemplateIntroductionParameter(identifier, parameter_pack))
self.skip_ws()
if self.skip_string('}'):
break
if self.skip_string(','):
continue
self.fail('Error in template introduction list. Expected ",", or "}".')
return ASTTemplateIntroduction(concept, params)
def _parse_requires_clause(self) -> ASTRequiresClause | None:
# requires-clause -> 'requires' constraint-logical-or-expression
# constraint-logical-or-expression
# -> constraint-logical-and-expression
# | constraint-logical-or-expression '||' constraint-logical-and-expression
# constraint-logical-and-expression
# -> primary-expression
# | constraint-logical-and-expression '&&' primary-expression
self.skip_ws()
if not self.skip_word('requires'):
return None
def parse_and_expr(self: DefinitionParser) -> ASTExpression:
and_exprs = []
ops = []
and_exprs.append(self._parse_primary_expression())
while True:
self.skip_ws()
one_more = False
if self.skip_string('&&'):
one_more = True
ops.append('&&')
elif self.skip_word('and'):
one_more = True
ops.append('and')
if not one_more:
break
and_exprs.append(self._parse_primary_expression())
if len(and_exprs) == 1:
return and_exprs[0]
else:
return ASTBinOpExpr(and_exprs, ops)
or_exprs = []
ops = []
or_exprs.append(parse_and_expr(self))
while True:
self.skip_ws()
one_more = False
if self.skip_string('||'):
one_more = True
ops.append('||')
elif self.skip_word('or'):
one_more = True
ops.append('or')
if not one_more:
break
or_exprs.append(parse_and_expr(self))
if len(or_exprs) == 1:
return ASTRequiresClause(or_exprs[0])
else:
return ASTRequiresClause(ASTBinOpExpr(or_exprs, ops))
def _parse_template_declaration_prefix(
self, object_type: str
) -> ASTTemplateDeclarationPrefix | None:
templates: list[ASTTemplateParams | ASTTemplateIntroduction] = []
while 1:
self.skip_ws()
# the saved position is only used to provide a better error message
params: ASTTemplateParams | ASTTemplateIntroduction | None = None
pos = self.pos
if self.skip_word('template'):
try:
params = self._parse_template_parameter_list()
except DefinitionError as e:
if object_type == 'member' and len(templates) == 0:
return ASTTemplateDeclarationPrefix(None)
else:
raise
if object_type == 'concept' and params.requiresClause is not None:
self.fail('requires-clause not allowed for concept')
else:
params = self._parse_template_introduction()
if not params:
break
if object_type == 'concept' and len(templates) > 0:
self.pos = pos
self.fail('More than 1 template parameter list for concept.')
templates.append(params)
if len(templates) == 0 and object_type == 'concept':
self.fail('Missing template parameter list for concept.')
if len(templates) == 0:
return None
else:
return ASTTemplateDeclarationPrefix(templates)
def _check_template_consistency(
self,
nested_name: ASTNestedName,
template_prefix: ASTTemplateDeclarationPrefix,
full_spec_shorthand: bool,
is_member: bool = False,
) -> ASTTemplateDeclarationPrefix:
num_args = nested_name.num_templates()
is_member_instantiation = False
if not template_prefix:
num_params = 0
else:
if is_member and template_prefix.templates is None:
num_params = 0
is_member_instantiation = True
else:
num_params = len(template_prefix.templates)
if num_args + 1 < num_params:
self.fail(
'Too few template argument lists compared to parameter'
' lists. Argument lists: %d, Parameter lists: %d.'
% (num_args, num_params)
)
if num_args > num_params:
num_extra = num_args - num_params
if not full_spec_shorthand and not is_member_instantiation:
msg = (
f'Too many template argument lists compared to parameter lists. '
f'Argument lists: {num_args:d}, Parameter lists: {num_params:d}, '
f'Extra empty parameters lists prepended: {num_extra:d}. '
'Declaration:\n\t'
)
if template_prefix:
msg += f'{template_prefix}\n\t'
msg += str(nested_name)
self.warn(msg)
new_templates: list[ASTTemplateParams | ASTTemplateIntroduction] = [
ASTTemplateParams([], requiresClause=None) for _i in range(num_extra)
]
if template_prefix and not is_member_instantiation:
new_templates.extend(template_prefix.templates)
template_prefix = ASTTemplateDeclarationPrefix(new_templates)
return template_prefix
def parse_declaration(self, objectType: str, directiveType: str) -> ASTDeclaration:
object_type = objectType
directive_type = directiveType
if object_type not in {
'class',
'union',
'function',
'member',
'type',
'concept',
'enum',
'enumerator',
}:
raise Exception('Internal error, unknown objectType "%s".' % object_type)
if directive_type not in {
'class',
'struct',
'union',
'function',
'member',
'var',
'type',
'concept',
'enum',
'enum-struct',
'enum-class',
'enumerator',
}:
raise Exception(
'Internal error, unknown directiveType "%s".' % directive_type
)
visibility = None
template_prefix = None
trailing_requires_clause = None
declaration: Any = None
self.skip_ws()
if self.match(_visibility_re):
visibility = self.matched_text
if object_type in {'type', 'concept', 'member', 'function', 'class', 'union'}:
template_prefix = self._parse_template_declaration_prefix(object_type)
if object_type == 'type':
prev_errors = []
pos = self.pos
try:
if not template_prefix:
declaration = self._parse_type(named=True, outer='type')
except DefinitionError as e:
prev_errors.append((e, 'If typedef-like declaration'))
self.pos = pos
pos = self.pos
try:
if not declaration:
declaration = self._parse_type_using()
except DefinitionError as e:
self.pos = pos
prev_errors.append((e, 'If type alias or template alias'))
header = 'Error in type declaration.'
raise self._make_multi_error(prev_errors, header) from e
elif object_type == 'concept':
declaration = self._parse_concept()
elif object_type == 'member':
declaration = self._parse_type_with_init(named=True, outer='member')
elif object_type == 'function':
declaration = self._parse_type(named=True, outer='function')
trailing_requires_clause = self._parse_requires_clause()
elif object_type == 'class':
declaration = self._parse_class()
elif object_type == 'union':
declaration = self._parse_union()
elif object_type == 'enum':
declaration = self._parse_enum()
elif object_type == 'enumerator':
declaration = self._parse_enumerator()
else:
raise AssertionError
template_prefix = self._check_template_consistency(
declaration.name,
template_prefix,
full_spec_shorthand=False,
is_member=object_type == 'member',
)
self.skip_ws()
semicolon = self.skip_string(';')
return ASTDeclaration(
object_type,
directive_type,
visibility,
template_prefix,
declaration,
trailing_requires_clause,
semicolon,
)
def parse_namespace_object(self) -> ASTNamespace:
template_prefix = self._parse_template_declaration_prefix(
object_type='namespace'
)
name = self._parse_nested_name()
template_prefix = self._check_template_consistency(
name, template_prefix, full_spec_shorthand=False
)
res = ASTNamespace(name, template_prefix)
res.objectType = 'namespace' # type: ignore[attr-defined]
return res
def parse_xref_object(self) -> tuple[ASTNamespace | ASTDeclaration, bool]:
pos = self.pos
try:
template_prefix = self._parse_template_declaration_prefix(
object_type='xref'
)
name = self._parse_nested_name()
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
template_prefix = self._check_template_consistency(
name, template_prefix, full_spec_shorthand=True
)
res1 = ASTNamespace(name, template_prefix)
res1.objectType = 'xref' # type: ignore[attr-defined]
return res1, True
except DefinitionError as e1:
try:
self.pos = pos
res2 = self.parse_declaration('function', 'function')
# if there are '()' left, just skip them
self.skip_ws()
self.skip_string('()')
self.assert_end()
return res2, False
except DefinitionError as e2:
errs = [
(e1, 'If shorthand ref'),
(e2, 'If full function ref'),
]
msg = 'Error in cross-reference.'
raise self._make_multi_error(errs, msg) from e2
def parse_expression(self) -> ASTExpression | ASTType:
pos = self.pos
try:
expr = self._parse_expression()
self.skip_ws()
self.assert_end()
return expr
except DefinitionError as ex_expr:
self.pos = pos
try:
typ = self._parse_type(False)
self.skip_ws()
self.assert_end()
return typ
except DefinitionError as ex_type:
header = 'Error when parsing (type) expression.'
errs = [(ex_expr, 'If expression'), (ex_type, 'If type')]
raise self._make_multi_error(errs, header) from ex_type
| DefinitionParser |
python | pytorch__pytorch | test/inductor/test_codecache.py | {
"start": 107619,
"end": 116439
} | class ____(TestCase):
device_type = GPU_TYPE
def setUp(self):
super().setUp()
counters.clear()
PatchCaches.setUp()
def tearDown(self):
super().tearDown()
PatchCaches.tearDown()
def reset(self):
PyCodeCache.cache_clear(purge=True)
torch._dynamo.reset()
clear_caches()
@requires_cuda_and_triton
@unittest.skipIf(not SM80OrLater, "Requires SM80+")
@unittest.skipIf(
TEST_WITH_ROCM, "Requires static cuda launcher, which does not support ROCM"
)
@config.patch({"use_static_cuda_launcher": True})
@config.patch({"fx_graph_cache": True})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"autotune_local_cache": False})
@config.patch({"autotune_remote_cache": True})
@config.patch({"bundled_autotune_remote_cache": False})
@config.patch({"max_autotune": True})
@config.patch(
{"compile_threads": 1}
) # Worker processes do not register PatchCaches() properly
def test_autotune_cache_warm_start(self):
class Model(torch.nn.Module):
def forward(self, x, y, a, b):
return x + y, a + b
def f(x, y, a, b):
return Model()(x, y, a, b)
x = torch.randn(100, 100).cuda()
y = torch.randn(100, 100).cuda()
a = torch.randn(1000, 100).cuda()
b = torch.randn(1000, 100).cuda()
f_compiled = torch.compile(f, fullgraph=True)
with PatchCaches():
a1 = f_compiled(x, y, a, b)
self.assertEqual(global_stats.autotune_remote, Stats(2, 0, 2))
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0)
# Don't reset FxGraphCache, see that it loads again
torch._dynamo.reset()
a2 = f_compiled(x, y, a, b)
self.assertEqual(a1, a2)
self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1)
self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1)
self.assertEqual(global_stats.autotune_remote, Stats(2, 2, 2))
# Check that the cache entries seem reasonable
for k in global_stats.autotune_remote.cache:
self.assertRegex(k, r"[0-9a-z]{52}")
for k in global_stats.triton.cache:
self.assertRegex(k, r"triton:[0-9a-f]{64}::[0-9a-f]{64}:c[0-9]+")
@requires_gpu_and_triton
@unittest.skipIf(not HAS_XPU_AND_TRITON and not SM80OrLater, "Requires SM80+")
@config.patch({"fx_graph_cache": False})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"autotune_local_cache": False})
@config.patch({"autotune_remote_cache": True})
@config.patch({"bundled_autotune_remote_cache": False})
@config.patch({"max_autotune": True})
@config.patch(
{"compile_threads": 1}
) # Worker processes do not register PatchCaches() properly
def test_autotune_cache(self):
class Model(torch.nn.Module):
def forward(self, x, y, a, b):
return x + y, a + b
def f(x, y, a, b):
return Model()(x, y, a, b)
x = torch.randn(100, 100).to(GPU_TYPE)
y = torch.randn(100, 100).to(GPU_TYPE)
a = torch.randn(1000, 100).to(GPU_TYPE)
b = torch.randn(1000, 100).to(GPU_TYPE)
f_compiled = torch.compile(f, fullgraph=True)
with PatchCaches():
f_compiled(x, y, a, b)
self.assertEqual(global_stats.autotune_remote, Stats(2, 0, 2))
self.reset()
f_compiled(x, y, a, b)
self.assertEqual(global_stats.autotune_remote, Stats(2, 2, 2))
# Check that the cache entries seem reasonable
for k in global_stats.autotune_remote.cache:
self.assertRegex(k, r"[0-9a-z]{52}")
for k in global_stats.triton.cache:
self.assertRegex(k, r"triton:[0-9a-f]{64}::[0-9a-f]{64}:c[0-9]+")
@requires_gpu_and_triton
@unittest.skipIf(not HAS_XPU_AND_TRITON and not SM80OrLater, "Requires SM80+")
@config.patch({"fx_graph_cache": False})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"autotune_local_cache": True})
@config.patch({"autotune_remote_cache": False})
@config.patch({"bundled_autotune_remote_cache": True})
@config.patch({"compile_threads": 1})
@config.patch({"max_autotune": True})
def test_bundled_autotune_remote_cache(self):
class Model(torch.nn.Module):
def forward(self, a, b, c, d, e, f):
return a + b, c + d, e + f
def f(a, b, c, d, e, f):
return Model()(a, b, c, d, e, f)
f_compiled = torch.compile(f, fullgraph=True)
a = torch.randn(101, 100).to(GPU_TYPE)
b = torch.randn(101, 100).to(GPU_TYPE)
c = torch.randn(102, 100).to(GPU_TYPE)
d = torch.randn(102, 100).to(GPU_TYPE)
e = torch.randn(103, 100).to(GPU_TYPE)
f = torch.randn(103, 100).to(GPU_TYPE)
with PatchCaches():
f_compiled(a, b, c, d, e, f)
self.assertEqual(global_stats.autotune_local, Stats(3, 0, 3))
self.assertEqual(global_stats.bundled_autotune, Stats(1, 0, 1))
self.reset()
f_compiled(a, b, c, d, e, f)
self.assertEqual(global_stats.autotune_local, Stats(6, 3, 3))
self.assertEqual(global_stats.bundled_autotune, Stats(1, 1, 1))
with torch.compiler.config.patch({"cache_key_tag": "test"}):
global_stats.reset()
self.reset()
f_compiled(a, b, c, d, e, f)
self.assertEqual(global_stats.autotune_local, Stats(3, 0, 3))
self.assertEqual(global_stats.bundled_autotune, Stats(1, 0, 1))
self.reset()
f_compiled(a, b, c, d, e, f)
self.assertEqual(global_stats.autotune_local, Stats(6, 3, 3))
self.assertEqual(global_stats.bundled_autotune, Stats(1, 1, 1))
# Check that the cache entries seem reasonable
for k in global_stats.autotune_local.cache:
self.assertRegex(k, r"tmp[^/]*/([^/]{2})/[^/]{64}\.best_config")
for k in global_stats.bundled_autotune.cache:
self.assertRegex(k, r"pt2:bundled-autotune-v1::[0-9a-z]{64}:c[0-9]+")
for k in global_stats.triton.cache:
self.assertRegex(k, r"triton:[0-9a-f]{64}::[0-9a-f]{64}:c[0-9]+")
@requires_triton()
@requires_gpu_and_triton
@unittest.skipIf(not HAS_XPU_AND_TRITON and not SM80OrLater, "Requires SM80+")
@config.patch({"fx_graph_cache": False})
@config.patch({"fx_graph_remote_cache": False})
@config.patch({"bundled_autotune_remote_cache": False})
@config.patch({"max_autotune": True})
@config.patch(
{"compile_threads": 1}
) # Worker processes do not register PatchCaches() properly
@parametrize("remote_cache", (True, False))
def test_modified_autotune_cache(self, remote_cache):
"""
If a developer changes the way the autotune cache is handled,
there's a chance it'll break the cache. This happened with
#150122. This test ensures that if torch code changes, then
old cache entries will be invalidated.
"""
def mock_torch_key(value: str) -> bytes:
return value.encode("utf-8")
def get_autotune_stats():
if remote_cache:
return global_stats.autotune_remote
return global_stats.autotune_local
def fn(x, y):
return (x + y).relu()
x = torch.randn(100, 100).to(GPU_TYPE)
y = torch.randn(100, 100).to(GPU_TYPE)
with config.patch(
{
"autotune_local_cache": not remote_cache,
"autotune_remote_cache": remote_cache,
}
):
with PatchCaches():
with mock.patch(
"torch._inductor.codecache.torch_key",
functools.partial(mock_torch_key, "torchkey1"),
):
f_compiled = torch.compile(fn, fullgraph=True)
res1 = f_compiled(x, y)
self.assertEqual(get_autotune_stats(), Stats(1, 0, 1))
torch._dynamo.reset()
PyCodeCache.cache_clear()
with mock.patch(
"torch._inductor.codecache.torch_key",
functools.partial(mock_torch_key, "torchkey2"),
):
f_compiled = torch.compile(fn, fullgraph=True)
res2 = f_compiled(x, y)
self.assertEqual(get_autotune_stats(), Stats(2, 0, 2))
self.assertEqual(res1, res2)
| TestAutotuneCache |
python | jina-ai__jina | tests/integration/deployments/test_deployment.py | {
"start": 11600,
"end": 12613
} | class ____(Executor):
@requests(on='/foo')
def foo(self, docs, **kwargs): ...
@pytest.mark.parametrize(
'uses', [DummyExecutor, 'executor.yml']
)
def test_deployment_uses(uses):
depl = Deployment(uses=uses)
with depl:
pass
@pytest.mark.parametrize(
'config_file,expected_replicas,expected_shards,expected_text',
[
('deployment-nested-executor-config.yml', 3, 2, 'hello'),
('deployment-embedded-executor-config.yml', 2, 3, 'world'),
('deployment-overridden-executor-config.yml', 3, 3, 'helloworld'),
],
)
def test_deployment_load_config(
config_file, expected_replicas, expected_shards, expected_text
):
depl = Deployment.load_config(config_file)
with depl:
assert depl.args.replicas == expected_replicas
assert depl.args.shards == expected_shards
docs = depl.post(on='/', inputs=DocumentArray.empty(5))
assert len(docs) == 5
assert all(doc.text == expected_text for doc in docs)
| DummyExecutor |
python | pytorch__pytorch | torch/_classes.py | {
"start": 55,
"end": 459
} | class ____(types.ModuleType):
def __init__(self, name: str) -> None:
super().__init__("torch.classes" + name)
self.name = name
def __getattr__(self, attr: str) -> Any:
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if proxy is None:
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
return proxy
| _ClassNamespace |
python | getsentry__sentry | src/sentry/grouping/enhancer/matchers.py | {
"start": 13504,
"end": 14103
} | class ____(EnhancementMatch):
def __init__(self, inner: FrameMatch):
self.inner = inner
@property
def description(self) -> str:
return f"[ {self.inner.description} ] |"
def _to_config_structure(self, version: int) -> str:
return f"[{self.inner._to_config_structure(version)}]|"
def matches_frame(
self,
frames: list[MatchFrame],
idx: int,
exception_data: dict[str, Any],
cache: ReturnValueCache,
) -> bool:
return idx > 0 and self.inner.matches_frame(frames, idx - 1, exception_data, cache)
| CallerMatch |
python | ray-project__ray | python/ray/data/_internal/stats.py | {
"start": 35441,
"end": 43246
} | class ____:
"""Holds the execution times for a given Dataset.
This object contains a reference to the parent Dataset's stats as well,
but not the Dataset object itself, to allow its blocks to be dropped from
memory."""
def __init__(
self,
*,
metadata: StatsDict,
parent: Union[Optional["DatasetStats"], List["DatasetStats"]],
base_name: str = None,
):
"""Create dataset stats.
Args:
metadata: Dict of operators used to create this Dataset from the
previous one. Typically one entry, e.g., {"map": [...]}.
parent: Reference to parent Dataset's stats, or a list of parents
if there are multiple.
base_name: The name of the base operation for a multi-operator operation.
"""
self.metadata: StatsDict = metadata
if parent is not None and not isinstance(parent, list):
parent = [parent]
self.parents: List["DatasetStats"] = parent or []
self.number: int = (
0 if not self.parents else max(p.number for p in self.parents) + 1
)
self.base_name = base_name
# TODO(ekl) deprecate and remove the notion of dataset UUID once we move
# fully to streaming execution.
self.dataset_uuid: str = "unknown_uuid"
self.time_total_s: float = 0
# Streaming executor stats
self.streaming_exec_schedule_s: Timer = Timer()
# Iteration stats, filled out if the user iterates over the dataset.
self.iter_wait_s: Timer = Timer()
self.iter_get_ref_bundles_s: Timer = Timer()
self.iter_get_s: Timer = Timer()
self.iter_next_batch_s: Timer = Timer()
self.iter_format_batch_s: Timer = Timer()
self.iter_collate_batch_s: Timer = Timer()
self.iter_finalize_batch_s: Timer = Timer()
self.iter_time_to_first_batch_s: Timer = Timer()
self.iter_total_blocked_s: Timer = Timer()
self.iter_user_s: Timer = Timer()
self.iter_initialize_s: Timer = Timer()
self.iter_total_s: Timer = Timer()
self.extra_metrics = {}
# Block fetch stats during iteration.
# These are stats about locations of blocks when the iterator is trying to
# consume them. The iteration performance will be affected depending on
# whether the block is in the local object store of the node where the
# iterator is running.
# This serves as an indicator of block prefetching effectiveness.
self.iter_blocks_local: int = 0
self.iter_blocks_remote: int = 0
self.iter_unknown_location: int = 0
self.iter_prefetched_bytes: int = 0
# Memory usage stats
self.global_bytes_spilled: int = 0
self.global_bytes_restored: int = 0
self.dataset_bytes_spilled: int = 0
# Streaming split coordinator stats (dataset level)
self.streaming_split_coordinator_s: Timer = Timer()
@property
def stats_actor(self):
return get_or_create_stats_actor()
def child_builder(
self, name: str, override_start_time: Optional[float] = None
) -> _DatasetStatsBuilder:
"""Start recording stats for an op of the given name (e.g., map)."""
return _DatasetStatsBuilder(name, self, override_start_time)
def to_summary(self) -> "DatasetStatsSummary":
"""Generate a `DatasetStatsSummary` object from the given `DatasetStats`
object, which can be used to generate a summary string."""
operators_stats = []
is_sub_operator = len(self.metadata) > 1
iter_stats = IterStatsSummary(
self.iter_wait_s,
self.iter_get_ref_bundles_s,
self.iter_get_s,
self.iter_next_batch_s,
self.iter_format_batch_s,
self.iter_collate_batch_s,
self.iter_finalize_batch_s,
self.iter_time_to_first_batch_s,
self.iter_total_blocked_s,
self.iter_user_s,
self.iter_initialize_s,
self.iter_total_s,
self.streaming_split_coordinator_s,
self.iter_blocks_local,
self.iter_blocks_remote,
self.iter_unknown_location,
self.iter_prefetched_bytes,
)
stats_summary_parents = []
if self.parents is not None:
stats_summary_parents = [p.to_summary() for p in self.parents]
# Collect the sum of the final output row counts from all parent nodes
parent_total_output = 0
for i, parent_summary in enumerate(stats_summary_parents):
if parent_summary.operators_stats:
# Get the last operator stats from the current parent summary
last_parent_op = parent_summary.operators_stats[-1]
# Extract output row count (handle dict type with "sum" key)
op_output = (
last_parent_op.output_num_rows.get("sum", 0)
if isinstance(last_parent_op.output_num_rows, dict)
else 0
)
logger.debug(
f"Parent {i + 1} (operator: {last_parent_op.operator_name}) contributes {op_output} rows to input"
)
parent_total_output += op_output
# Create temporary operator stats objects from block metadata
op_stats = [
OperatorStatsSummary.from_block_metadata(
name, stats, is_sub_operator=is_sub_operator
)
for name, stats in self.metadata.items()
]
for i, op_stat in enumerate(op_stats):
# For sub-operators: inherit input based on the order in the current list
if is_sub_operator:
if i == 0:
# Input of the first sub-operator is the total output from parent nodes
op_stat.total_input_num_rows = parent_total_output
else:
# Input of subsequent sub-operators is the output of the previous sub-operator
prev_op = op_stats[i - 1]
op_stat.total_input_num_rows = (
prev_op.output_num_rows["sum"]
if (
prev_op.output_num_rows and "sum" in prev_op.output_num_rows
)
else 0
)
else:
# Single operator scenario: input rows = total output from all parent nodes
op_stat.total_input_num_rows = parent_total_output
operators_stats.append(op_stat)
streaming_exec_schedule_s = (
self.streaming_exec_schedule_s.get()
if self.streaming_exec_schedule_s
else 0
)
return DatasetStatsSummary(
operators_stats,
iter_stats,
stats_summary_parents,
self.number,
self.dataset_uuid,
self.time_total_s,
self.base_name,
self.extra_metrics,
self.global_bytes_spilled,
self.global_bytes_restored,
self.dataset_bytes_spilled,
streaming_exec_schedule_s,
)
def runtime_metrics(self) -> str:
"""Generate a string representing the runtime metrics of a Dataset. This is
a high level summary of the time spent in Ray Data code broken down by operator.
It also includes the time spent in the scheduler. Times are shown as the total
time for each operator and percentages of time are shown as a fraction of the
total time for the whole dataset."""
return self.to_summary().runtime_metrics()
@DeveloperAPI
@dataclass
| DatasetStats |
python | gevent__gevent | src/gevent/queue.py | {
"start": 22127,
"end": 22657
} | class ____(Queue):
# A specialization of Queue that knows it can never
# be bound. Changing its maxsize has no effect.
__slots__ = ()
def __init__(self, maxsize=None, items=()):
if maxsize is not None:
raise ValueError("UnboundQueue has no maxsize")
Queue.__init__(self, maxsize, items)
self.putters = None # Will never be used.
def put(self, item, block=True, timeout=None):
self._put(item)
if self.getters:
self._schedule_unlock()
| UnboundQueue |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/organization_test_fire_action.py | {
"start": 1380,
"end": 1868
} | class ____(CamelSnakeSerializer):
actions = serializers.ListField(required=True)
def validate_actions(self, value):
validated_actions = []
for action in value:
action_validator = BaseActionValidator(data=action, context=self.context)
action_validator.is_valid(raise_exception=True)
action.update(action_validator.validated_data)
validated_actions.append(action)
return validated_actions
| TestActionsValidator |
python | pytorch__pytorch | test/test_serialization.py | {
"start": 306589,
"end": 307217
} | class ____(torch.Tensor):
elem: torch.Tensor
__slots__ = ['elem', 'other']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (TestSubclass) is just a meta tensor, so it
# doesn't hold any memory (meta tensor is generally the preferred type
# of tensor you want to make a subclass from)...
r = torch.Tensor._make_subclass(cls, elem.to('meta'), elem.requires_grad)
# ...the real tensor is held as an element on the tensor.
r.elem = elem
return r
def clone(self):
return type(self)(self.elem.clone())
| TestWrapperSubclass |
python | astral-sh__uv | crates/uv-python/fetch-download-metadata.py | {
"start": 5479,
"end": 5643
} | class ____:
implementation: ImplementationName
@abc.abstractmethod
async def find(self) -> list[PythonDownload]:
raise NotImplementedError
| Finder |
python | pandas-dev__pandas | pandas/tests/plotting/test_groupby.py | {
"start": 261,
"end": 5732
} | class ____:
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.default_rng(2).normal(166, 20, size=n))
gender = np.random.default_rng(2).choice(["male", "female"], size=n)
weight.groupby(gender).plot()
def test_series_groupby_plotting_nominally_works_hist(self):
n = 10
height = Series(np.random.default_rng(2).normal(60, 10, size=n))
gender = np.random.default_rng(2).choice(["male", "female"], size=n)
height.groupby(gender).hist()
def test_series_groupby_plotting_nominally_works_alpha(self):
n = 10
height = Series(np.random.default_rng(2).normal(60, 10, size=n))
gender = np.random.default_rng(2).choice(["male", "female"], size=n)
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame(
{
"def": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"val": np.random.default_rng(2).standard_normal(9),
},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
)
df.groupby("def")["val"].plot()
def test_plotting_with_float_index_works_apply(self):
# GH 7025
df = DataFrame(
{
"def": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"val": np.random.default_rng(2).standard_normal(9),
},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
)
df.groupby("def")["val"].apply(lambda x: x.plot())
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_hist_single_row_single_bycol(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
df.groupby("z").plot.scatter("x", "y")
def test_plot_submethod_works_line(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
df.groupby("z")["x"].plot.line()
def test_plot_kwargs(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
res = df.groupby("z").plot(kind="scatter", x="x", y="y")
# check that a scatter plot is effectively plotted: the axes should
# contain a PathCollection from the scatter plot (GH11805)
assert len(res["a"].collections) == 1
def test_plot_kwargs_scatter(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
res = df.groupby("z").plot.scatter(x="x", y="y")
assert len(res["a"].collections) == 1
@pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)])
def test_groupby_hist_frame_with_legend(self, column, expected_axes_num):
# GH 6279 - DataFrameGroupBy histogram can have a legend
expected_layout = (1, expected_axes_num)
expected_labels = column or [["a"], ["b"]]
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(
np.random.default_rng(2).standard_normal((30, 2)),
index=index,
columns=["a", "b"],
)
g = df.groupby("c")
for axes in g.hist(legend=True, column=column):
_check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout)
for ax, expected_label in zip(axes[0], expected_labels):
_check_legend_labels(ax, expected_label)
@pytest.mark.parametrize("column", [None, "b"])
def test_groupby_hist_frame_with_legend_raises(self, column):
# GH 6279 - DataFrameGroupBy histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(
np.random.default_rng(2).standard_normal((30, 2)),
index=index,
columns=["a", "b"],
)
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, column=column, label="d")
def test_groupby_hist_series_with_legend(self):
# GH 6279 - SeriesGroupBy histogram can have a legend
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(
np.random.default_rng(2).standard_normal((30, 2)),
index=index,
columns=["a", "b"],
)
g = df.groupby("c")
for ax in g["a"].hist(legend=True):
_check_axes_shape(ax, axes_num=1, layout=(1, 1))
_check_legend_labels(ax, ["1", "2"])
def test_groupby_hist_series_with_legend_raises(self):
# GH 6279 - SeriesGroupBy histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(
np.random.default_rng(2).standard_normal((30, 2)),
index=index,
columns=["a", "b"],
)
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, label="d")
| TestDataFrameGroupByPlots |
python | pypa__twine | tests/test_auth.py | {
"start": 9977,
"end": 13871
} | class ____:
def __init__(
self,
get_response_list: t.List[MockResponse],
post_response_list: t.List[MockResponse],
) -> None:
self.post_counter = self.get_counter = 0
self.get_response_list = get_response_list
self.post_response_list = post_response_list
def get(self, url: str, **kwargs) -> MockResponse:
response = self.get_response_list[self.get_counter]
self.get_counter += 1
return response
def post(self, url: str, **kwargs) -> MockResponse:
response = self.post_response_list[self.post_counter]
self.post_counter += 1
return response
def test_trusted_publish_authenticator_refreshes_token(monkeypatch, config):
def make_session():
return MockSession(
get_response_list=[
MockResponse(status_code=200, json={"audience": "fake-aud"})
],
post_response_list=[
MockResponse(
status_code=200,
json={
"success": True,
"token": "new-token",
"expires": int(time.time()) + 900,
},
),
],
)
def detect_credential(*args, **kwargs) -> str:
return "fake-oidc-token"
config.update({"repository": utils.TEST_REPOSITORY})
res = auth.Resolver(config, auth.CredentialInput(username="__token__"))
res._tp_token = auth.TrustedPublishingToken(
success=True,
token="expiring-tp-token",
)
res._expires = int(time.time()) + 4 * 60
monkeypatch.setattr(auth, "detect_credential", detect_credential)
monkeypatch.setattr(auth.utils, "make_requests_session", make_session)
authenticator = auth.TrustedPublishingAuthenticator(resolver=res)
prepped_req = requests.models.PreparedRequest()
prepped_req.prepare_headers({})
request = authenticator(prepped_req)
assert (
request.headers["Authorization"]
== f"Basic {base64.b64encode(b'__token__:new-token').decode()}"
)
def test_trusted_publish_authenticator_reuses_token(monkeypatch, config):
def make_session():
return MockSession(
get_response_list=[
MockResponse(status_code=200, json={"audience": "fake-aud"})
],
post_response_list=[
MockResponse(
status_code=200,
json={
"success": True,
"token": "new-token",
"expires": int(time.time()) + 900,
},
),
],
)
def detect_credential(*args, **kwargs) -> str:
return "fake-oidc-token"
config.update({"repository": utils.TEST_REPOSITORY})
res = auth.Resolver(config, auth.CredentialInput(username="__token__"))
res._tp_token = auth.TrustedPublishingToken(
success=True,
token="valid-tp-token",
)
res._expires = int(time.time()) + 900
monkeypatch.setattr(auth, "detect_credential", detect_credential)
monkeypatch.setattr(auth.utils, "make_requests_session", make_session)
authenticator = auth.TrustedPublishingAuthenticator(resolver=res)
prepped_req = requests.models.PreparedRequest()
prepped_req.prepare_headers({})
request = authenticator(prepped_req)
assert (
request.headers["Authorization"]
== f"Basic {base64.b64encode(b'__token__:valid-tp-token').decode()}"
)
def test_inability_to_make_token_raises_error():
class MockResolver:
def make_trusted_publishing_token(self) -> None:
return None
authenticator = auth.TrustedPublishingAuthenticator(
resolver=MockResolver(),
)
with pytest.raises(exceptions.TrustedPublishingFailure):
authenticator(None)
| MockSession |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 29441,
"end": 30597
} | class ____:
def __init__(
self,
organization: str | None,
notice_numbers: Iterable[int],
) -> None:
self._organization = organization
notice_numbers = list(notice_numbers)
if not all(isinstance(x, int) for x in notice_numbers):
raise TypeError("notice_numbers must be a list of integers")
self._notice_numbers = notice_numbers
def __repr__(self) -> str:
return (
f"<NoticeReference(organization={self.organization!r}, "
f"notice_numbers={self.notice_numbers})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization
and self.notice_numbers == other.notice_numbers
)
def __hash__(self) -> int:
return hash((self.organization, tuple(self.notice_numbers)))
@property
def organization(self) -> str | None:
return self._organization
@property
def notice_numbers(self) -> list[int]:
return self._notice_numbers
| NoticeReference |
python | apache__airflow | providers/redis/tests/unit/redis/log/test_redis_task_handler.py | {
"start": 1729,
"end": 5951
} | class ____:
@staticmethod
def clear_db():
clear_db_dags()
clear_db_runs()
if AIRFLOW_V_3_0_PLUS:
clear_db_dag_bundles()
@pytest.fixture
def ti(self):
date = timezone.datetime(2020, 1, 1)
dag = DAG(dag_id="dag_for_testing_redis_task_handler", schedule=None, start_date=date)
task = EmptyOperator(task_id="task_for_testing_redis_log_handler", dag=dag)
if AIRFLOW_V_3_0_PLUS:
dag_run = DagRun(
dag_id=dag.dag_id,
logical_date=date,
data_interval=(date, date),
run_after=date,
run_id="test",
run_type="scheduled",
)
else:
dag_run = DagRun(
dag_id=dag.dag_id,
execution_date=date,
run_id="test",
run_type="scheduled",
)
dag_run.set_state(State.RUNNING)
with create_session() as session:
session.add(dag_run)
session.flush()
session.refresh(dag_run)
bundle_name = "testing"
if AIRFLOW_V_3_1_PLUS:
sync_dag_to_db(dag, bundle_name=bundle_name, session=session)
elif AIRFLOW_V_3_0_PLUS:
from airflow.models.dagbundle import DagBundleModel
from airflow.models.serialized_dag import SerializedDagModel
from airflow.serialization.serialized_objects import SerializedDAG
session.add(DagBundleModel(name=bundle_name))
session.flush()
SerializedDAG.bulk_write_to_db(bundle_name, None, [dag])
SerializedDagModel.write_dag(dag, bundle_name=bundle_name)
if AIRFLOW_V_3_0_PLUS:
from airflow.models.dag_version import DagVersion
dag_version = DagVersion.get_latest_version(dag.dag_id)
ti = TaskInstance(task=task, run_id=dag_run.run_id, dag_version_id=dag_version.id)
else:
ti = TaskInstance(task=task, run_id=dag_run.run_id)
ti.dag_run = dag_run
ti.try_number = 1
ti.state = State.RUNNING
yield ti
self.clear_db()
@pytest.mark.db_test
@conf_vars({("logging", "remote_log_conn_id"): "redis_default"})
def test_write(self, ti):
handler = RedisTaskHandler("any", max_lines=5, ttl_seconds=2)
handler.set_context(ti)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
key = (
"dag_id=dag_for_testing_redis_task_handler/run_id=test"
"/task_id=task_for_testing_redis_log_handler/attempt=1.log"
)
with patch("redis.Redis.pipeline") as pipeline:
logger.info("Test log event")
pipeline.return_value.rpush.assert_called_once_with(key, "Test log event")
pipeline.return_value.ltrim.assert_called_once_with(key, start=-5, end=-1)
pipeline.return_value.expire.assert_called_once_with(key, time=2)
pipeline.return_value.execute.assert_called_once_with()
@pytest.mark.db_test
@conf_vars({("logging", "remote_log_conn_id"): "redis_default"})
def test_read(self, ti):
handler = RedisTaskHandler("any")
handler.set_context(ti)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
key = (
"dag_id=dag_for_testing_redis_task_handler/run_id=test"
"/task_id=task_for_testing_redis_log_handler/attempt=1.log"
)
with patch("redis.Redis.lrange") as lrange:
lrange.return_value = [b"Line 1", b"Line 2"]
logs = handler.read(ti)
if AIRFLOW_V_3_0_PLUS:
if get_base_airflow_version_tuple() < (3, 0, 4):
assert logs == (["Line 1\nLine 2"], {"end_of_log": True})
else:
log_stream, metadata = logs
assert extract_events(log_stream) == ["Line 1", "Line 2"]
assert metadata == {"end_of_log": True}
else:
assert logs == ([[("", "Line 1\nLine 2")]], [{"end_of_log": True}])
lrange.assert_called_once_with(key, start=0, end=-1)
| TestRedisTaskHandler |
python | huggingface__transformers | src/transformers/integrations/integration_utils.py | {
"start": 76485,
"end": 89003
} | class ____(TrainerCallback):
"""
A [`TrainerCallback`] that sends the logs to [ClearML](https://clear.ml/).
Environment:
- **CLEARML_PROJECT** (`str`, *optional*, defaults to `HuggingFace Transformers`):
ClearML project name.
- **CLEARML_TASK** (`str`, *optional*, defaults to `Trainer`):
ClearML task name.
- **CLEARML_LOG_MODEL** (`bool`, *optional*, defaults to `False`):
Whether to log models as artifacts during training.
"""
log_suffix = ""
_hparams_section = "Transformers"
_model_config_section = "Model Configuration"
_ignore_hparams_overrides = "_ignore_hparams_ui_overrides_"
_ignoge_model_config_overrides = "_ignore_model_config_ui_overrides_"
_model_config_description = "The configuration of model number {}."
_model_config_description_note = (
"Note that, when cloning this task and running it remotely,"
" the configuration might be applied to another model instead of this one."
" To avoid this, initialize the task externally by calling `Task.init`"
" before the `ClearMLCallback` is instantiated."
)
_train_run_counter = 0
_model_connect_counter = 0
_task_created_in_callback = False
_should_close_on_train_end = None
def __init__(self):
if is_clearml_available():
import clearml
self._clearml = clearml
else:
raise RuntimeError("ClearMLCallback requires 'clearml' to be installed. Run `pip install clearml`.")
self._initialized = False
self._clearml_task = None
self._log_model = False
self._checkpoints_saved = []
def setup(self, args, state, model, processing_class, **kwargs):
if self._clearml is None:
return
if self._initialized:
return
ClearMLCallback._train_run_counter += 1
ClearMLCallback._model_connect_counter += 1
ClearMLCallback.log_suffix = (
"" if ClearMLCallback._train_run_counter == 1 else "_" + str(ClearMLCallback._train_run_counter)
)
if state.is_world_process_zero:
logger.info("Automatic ClearML logging enabled.")
if self._clearml_task is None:
if ClearMLCallback._should_close_on_train_end is None:
if not self._clearml.Task.running_locally() or self._clearml.Task.current_task():
ClearMLCallback._should_close_on_train_end = False
else:
ClearMLCallback._should_close_on_train_end = True
# This might happen when running inside of a pipeline, where the task is already initialized
# from outside of Hugging Face
if self._clearml.Task.running_locally() and self._clearml.Task.current_task():
self._clearml_task = self._clearml.Task.current_task()
self._log_model = os.getenv(
"CLEARML_LOG_MODEL",
"FALSE" if not ClearMLCallback._task_created_in_callback else "TRUE",
).upper() in ENV_VARS_TRUE_VALUES.union({"TRUE"})
logger.info("External ClearML Task has been connected.")
else:
self._clearml_task = self._clearml.Task.init(
project_name=os.getenv("CLEARML_PROJECT", "HuggingFace Transformers"),
task_name=os.getenv("CLEARML_TASK", "Trainer"),
auto_connect_frameworks={"tensorboard": False, "pytorch": False},
output_uri=True,
)
self._log_model = os.getenv("CLEARML_LOG_MODEL", "TRUE").upper() in ENV_VARS_TRUE_VALUES.union(
{"TRUE"}
)
ClearMLCallback._task_created_in_callback = True
logger.info("ClearML Task has been initialized.")
self._initialized = True
suffixed_hparams_section = ClearMLCallback._hparams_section + ClearMLCallback.log_suffix
ignore_hparams_config_section = suffixed_hparams_section + "/" + ClearMLCallback._ignore_hparams_overrides
if self._clearml.Task.running_locally():
self._copy_training_args_as_hparams(args, suffixed_hparams_section)
self._clearml_task.set_parameter(
name=ignore_hparams_config_section,
value=True,
value_type=bool,
description=(
"If True, ignore Transformers hyperparameters overrides done in the UI/backend "
+ "when running remotely. Otherwise, the overrides will be applied when running remotely"
),
)
elif not self._clearml_task.get_parameter(ignore_hparams_config_section, default=True, cast=True):
self._clearml_task.connect(args, suffixed_hparams_section)
else:
self._copy_training_args_as_hparams(
args, ClearMLCallback._hparams_section + ClearMLCallback.log_suffix
)
if getattr(model, "config", None) is not None:
ignore_model_config_section = (
suffixed_hparams_section + "/" + ClearMLCallback._ignoge_model_config_overrides
)
configuration_object_description = ClearMLCallback._model_config_description.format(
ClearMLCallback._model_connect_counter
)
if ClearMLCallback._model_connect_counter != ClearMLCallback._train_run_counter:
configuration_object_description += " " + ClearMLCallback._model_config_description_note
if self._clearml.Task.running_locally():
self._clearml_task.set_parameter(
name=ignore_model_config_section,
value=True,
value_type=bool,
description=(
"If True, ignore Transformers model configuration overrides done in the UI/backend "
+ "when running remotely. Otherwise, the overrides will be applied when running remotely"
),
)
self._clearml_task.set_configuration_object(
name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix,
config_dict=model.config.to_dict(),
description=configuration_object_description,
)
elif not self._clearml_task.get_parameter(ignore_model_config_section, default=True, cast=True):
model.config = model.config.from_dict(
self._clearml_task.get_configuration_object_as_dict(
ClearMLCallback._model_config_section + ClearMLCallback.log_suffix
)
)
else:
self._clearml_task.set_configuration_object(
name=ClearMLCallback._model_config_section + ClearMLCallback.log_suffix,
config_dict=model.config.to_dict(),
description=configuration_object_description,
)
def on_train_begin(self, args, state, control, model=None, processing_class=None, **kwargs):
if self._clearml is None:
return
self._checkpoints_saved = []
if state.is_hyper_param_search:
self._initialized = False
if not self._initialized:
self.setup(args, state, model, processing_class, **kwargs)
def on_train_end(self, args, state, control, **kwargs):
if ClearMLCallback._should_close_on_train_end:
self._clearml_task.close()
ClearMLCallback._train_run_counter = 0
def on_log(self, args, state, control, model=None, processing_class=None, logs=None, **kwargs):
if self._clearml is None:
return
if not self._initialized:
self.setup(args, state, model, processing_class, **kwargs)
if state.is_world_process_zero:
eval_prefix = "eval_"
eval_prefix_len = len(eval_prefix)
test_prefix = "test_"
test_prefix_len = len(test_prefix)
single_value_scalars = [
"train_runtime",
"train_samples_per_second",
"train_steps_per_second",
"train_loss",
"total_flos",
"epoch",
]
for k, v in logs.items():
if isinstance(v, (int, float)):
if k in single_value_scalars:
self._clearml_task.get_logger().report_single_value(
name=k + ClearMLCallback.log_suffix, value=v
)
elif k.startswith(eval_prefix):
self._clearml_task.get_logger().report_scalar(
title="eval" + ClearMLCallback.log_suffix,
series=k[eval_prefix_len:],
value=v,
iteration=state.global_step,
)
elif k.startswith(test_prefix):
self._clearml_task.get_logger().report_scalar(
title="test" + ClearMLCallback.log_suffix,
series=k[test_prefix_len:],
value=v,
iteration=state.global_step,
)
else:
self._clearml_task.get_logger().report_scalar(
title="train" + ClearMLCallback.log_suffix,
series=k,
value=v,
iteration=state.global_step,
)
else:
logger.warning(
"Trainer is attempting to log a value of "
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
"This invocation of ClearML logger's report_scalar() "
"is incorrect so we dropped this attribute."
)
def on_save(self, args, state, control, **kwargs):
if self._log_model and self._clearml_task and state.is_world_process_zero:
ckpt_dir = f"checkpoint-{state.global_step}"
artifact_path = os.path.join(args.output_dir, ckpt_dir)
name = ckpt_dir + ClearMLCallback.log_suffix
logger.info(f"Logging checkpoint artifact `{name}`. This may take some time.")
output_model = self._clearml.OutputModel(task=self._clearml_task, name=name)
output_model.connect(task=self._clearml_task, name=name)
output_model.update_weights_package(
weights_path=artifact_path,
target_filename=ckpt_dir,
iteration=state.global_step,
auto_delete_file=False,
)
self._checkpoints_saved.append(output_model)
while args.save_total_limit and args.save_total_limit < len(self._checkpoints_saved):
try:
self._clearml.model.Model.remove(
self._checkpoints_saved[0],
delete_weights_file=True,
force=True,
raise_on_errors=True,
)
except Exception as e:
logger.warning(
f"Could not remove checkpoint `{self._checkpoints_saved[0].name}` after going over the `save_total_limit`. Error is: {e}"
)
break
self._checkpoints_saved = self._checkpoints_saved[1:]
def _copy_training_args_as_hparams(self, training_args, prefix):
as_dict = {
field.name: getattr(training_args, field.name)
for field in fields(training_args)
if field.init and not field.name.endswith("_token")
}
flat_dict = {str(k): v for k, v in self._clearml.utilities.proxy_object.flatten_dictionary(as_dict).items()}
self._clearml_task._arguments.copy_from_dict(flat_dict, prefix=prefix)
| ClearMLCallback |
python | spack__spack | lib/spack/spack/util/timer.py | {
"start": 825,
"end": 1224
} | class ____:
def start(self, name=None):
pass
def stop(self, name=None):
pass
def duration(self, name=None):
return 0.0
@contextmanager
def measure(self, name):
yield self
@property
def phases(self):
return []
def write_json(self, out=sys.stdout):
pass
def write_tty(self, out=sys.stdout):
pass
| BaseTimer |
python | PyCQA__pylint | tests/functional/a/arguments_renamed.py | {
"start": 2685,
"end": 3161
} | class ____(FruitConditional):
fruit = "orange"
override_condiment = True
if fruit == "orange":
def brew(self, orange_name: str): # [arguments-renamed]
print(f"Brewing an orange named {orange_name}")
if override_condiment:
def eat_with_condiment(self, fruit_name: str, condiment: Condiment, error: str): # [arguments-differ]
print(f"Eating a fruit named {fruit_name} with {condiment}")
| FruitOverrideConditional |
python | run-llama__llama_index | llama-index-integrations/indices/llama-index-indices-managed-dashscope/llama_index/indices/managed/dashscope/transformations.py | {
"start": 3028,
"end": 3804
} | class ____(BaseModel, Generic[T]):
"""
A class containing metadata & implementation for a transformation in a dashscope pipeline.
"""
name: str
component: T = Field(description="Component that implements the transformation")
@classmethod
def from_component(cls, component: BaseComponent) -> "ConfiguredTransformation":
"""
Build a ConfiguredTransformation from a component in dashscope.
"""
return ConfigurableTransformations.from_component(
component
).build_configured_transformation(component)
@property
def configurable_transformation_type(self) -> ConfigurableTransformations:
return ConfigurableTransformations.from_component(self.component)
| DashScopeConfiguredTransformation |
python | pytorch__pytorch | torch/backends/_coreml/preprocess.py | {
"start": 490,
"end": 894
} | class ____:
Float = 0
Double = 1
Int = 2
Long = 3
Undefined = 4
# Supported Tensor types in coremltools:
# https://github.com/apple/coremltools/blob/main/coremltools/converters/mil/frontend/torch/converter.py#L28
torch_to_mil_types = {
ScalarType.Float: types.fp32,
ScalarType.Double: types.fp64,
ScalarType.Int: types.int32,
ScalarType.Long: types.int64,
}
| ScalarType |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 3370,
"end": 3829
} | class ____(vLLMTranscriptionRequest):
model_config = ConfigDict(arbitrary_types_allowed=True)
request_id: str = Field(
default_factory=lambda: f"{random_uuid()}",
description=(
"The request_id related to this request. If the caller does "
"not set it, a random_uuid will be generated. This id is used "
"through out the inference process and return in response."
),
)
| TranscriptionRequest |
python | dagster-io__dagster | python_modules/libraries/dagster-mysql/dagster_mysql/run_storage/run_storage.py | {
"start": 1197,
"end": 7456
} | class ____(SqlRunStorage, ConfigurableClass):
"""MySQL-backed run storage.
Users should not directly instantiate this class; it is instantiated by internal machinery when
``dagster-webserver`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.
.. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-mysql-legacy.yaml
:caption: dagster.yaml
:start-after: start_marker_runs
:end-before: end_marker_runs
:language: YAML
Note that the fields in this config are :py:class:`~dagster.StringSource` and
:py:class:`~dagster.IntSource` and can be configured from environment variables.
"""
def __init__(self, mysql_url: str, inst_data: Optional[ConfigurableClassData] = None):
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
self.mysql_url = mysql_url
# Default to not holding any connections open to prevent accumulating connections per DagsterInstance
self._engine = create_engine(
self.mysql_url,
isolation_level=mysql_isolation_level(),
poolclass=db_pool.NullPool,
)
self._index_migration_cache = {}
table_names = retry_mysql_connection_fn(db.inspect(self._engine).get_table_names)
# Stamp and create tables if the main table does not exist (we can't check alembic
# revision because alembic config may be shared with other storage classes)
if "runs" not in table_names:
retry_mysql_creation_fn(self._init_db)
self.migrate()
self.optimize()
elif "instance_info" not in table_names:
InstanceInfo.create(self._engine)
self._mysql_version = self.get_server_version()
super().__init__()
def _init_db(self) -> None:
with self.connect() as conn:
RunStorageSqlMetadata.create_all(conn)
stamp_alembic_rev(mysql_alembic_config(__file__), conn)
def optimize_for_webserver(
self, statement_timeout: int, pool_recycle: int, max_overflow: int
) -> None:
# When running in dagster-webserver, hold 1 open connection
# https://github.com/dagster-io/dagster/issues/3719
self._engine = create_engine(
self.mysql_url,
isolation_level=mysql_isolation_level(),
pool_size=1,
pool_recycle=pool_recycle,
max_overflow=max_overflow,
)
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return mysql_config()
def get_server_version(self) -> Optional[str]:
with self.connect() as conn:
row = conn.execute(db.text("select version()")).fetchone()
if not row:
return None
return cast("str", row[0])
@classmethod
def from_config_value( # pyright: ignore[reportIncompatibleMethodOverride]
cls, inst_data: Optional[ConfigurableClassData], config_value: MySqlStorageConfig
) -> "MySQLRunStorage":
return MySQLRunStorage(inst_data=inst_data, mysql_url=mysql_url_from_config(config_value))
@staticmethod
def wipe_storage(mysql_url: str) -> None:
engine = create_engine(
mysql_url, isolation_level=mysql_isolation_level(), poolclass=db_pool.NullPool
)
try:
RunStorageSqlMetadata.drop_all(engine)
finally:
engine.dispose()
@staticmethod
def create_clean_storage(mysql_url: str) -> "MySQLRunStorage":
MySQLRunStorage.wipe_storage(mysql_url)
return MySQLRunStorage(mysql_url)
def connect(self, run_id: Optional[str] = None) -> ContextManager[Connection]:
return create_mysql_connection(self._engine, __file__, "run")
def upgrade(self) -> None:
alembic_config = mysql_alembic_config(__file__)
with self.connect() as conn:
run_alembic_upgrade(alembic_config, conn)
def has_built_index(self, migration_name: str) -> None: # pyright: ignore[reportIncompatibleMethodOverride]
if migration_name not in self._index_migration_cache:
self._index_migration_cache[migration_name] = super().has_built_index(migration_name)
return self._index_migration_cache[migration_name]
def mark_index_built(self, migration_name: str) -> None:
super().mark_index_built(migration_name)
if migration_name in self._index_migration_cache:
del self._index_migration_cache[migration_name]
def add_daemon_heartbeat(self, daemon_heartbeat: DaemonHeartbeat) -> None:
with self.connect() as conn:
conn.execute(
db_dialects.mysql.insert(DaemonHeartbeatsTable)
.values(
timestamp=datetime_from_timestamp(daemon_heartbeat.timestamp),
daemon_type=daemon_heartbeat.daemon_type,
daemon_id=daemon_heartbeat.daemon_id,
body=serialize_value(daemon_heartbeat),
)
.on_duplicate_key_update(
timestamp=datetime_from_timestamp(daemon_heartbeat.timestamp),
daemon_id=daemon_heartbeat.daemon_id,
body=serialize_value(daemon_heartbeat),
)
)
def set_cursor_values(self, pairs: Mapping[str, str]) -> None:
check.mapping_param(pairs, "pairs", key_type=str, value_type=str)
db_values = [{"key": k, "value": v} for k, v in pairs.items()]
with self.connect() as conn:
insert_stmt = db_dialects.mysql.insert(KeyValueStoreTable).values(db_values)
conn.execute(
insert_stmt.on_duplicate_key_update(
value=insert_stmt.inserted.value,
)
)
def alembic_version(self) -> AlembicVersion:
alembic_config = mysql_alembic_config(__file__)
with self.connect() as conn:
return check_alembic_revision(alembic_config, conn)
| MySQLRunStorage |
python | google__jax | jax/_src/source_info_util.py | {
"start": 982,
"end": 2359
} | class ____(NamedTuple):
file_name: str
function_name: str
start_line: int
start_column: int
end_line: int
end_column: int
_exclude_paths: list[str] = [
# Attach the separator to make sure that .../jax does not end up matching
# .../jax_triton and other packages that might have a jax prefix.
os.path.dirname(os.path.dirname(__file__)) + os.sep,
# Also exclude stdlib as user frames. In a non-standard Python runtime,
# the following may be different.
sysconfig.get_path('stdlib'),
os.path.dirname(contextlib.__file__),
]
@functools.cache
def _exclude_path_regex() -> re.Pattern[str]:
# The regex below would not handle an empty set of exclusions correctly.
assert len(_exclude_paths) > 0
return re.compile('|'.join(f'^{re.escape(path)}' for path in _exclude_paths))
def register_exclusion(path: str):
_exclude_paths.append(path)
_exclude_path_regex.cache_clear()
is_user_filename.cache_clear()
# Explicit inclusions take priority over exclude paths.
_include_paths: list[str] = []
@functools.cache
def _include_path_regex() -> re.Pattern[str]:
patterns = [f'^{re.escape(path)}' for path in _include_paths]
patterns.append('_test.py$')
return re.compile('|'.join(patterns))
def register_inclusion(path: str):
_include_paths.append(path)
_include_path_regex.cache_clear()
is_user_filename.cache_clear()
| Frame |
python | google__jax | tests/debugging_primitives_test.py | {
"start": 1262,
"end": 1365
} | class ____:
def __init__(self, platform, id):
self.platform = platform
self.id = id
| DummyDevice |
python | cherrypy__cherrypy | cherrypy/test/test_plugins.py | {
"start": 62,
"end": 341
} | class ____:
def test_file_for_file_module_when_None(self):
"""No error when ``module.__file__`` is :py:data:`None`."""
class test_module:
__file__ = None
assert plugins.Autoreloader._file_for_file_module(test_module) is None
| TestAutoreloader |
python | ray-project__ray | python/ray/data/tests/unit/test_expressions.py | {
"start": 6183,
"end": 7785
} | class ____:
"""Test enhanced binary expression functionality."""
@pytest.mark.parametrize(
"expr, expected_op",
[
(col("age") != lit(25), Operation.NE),
(col("status").is_in(["active", "pending"]), Operation.IN),
(col("status").not_in(["inactive", "deleted"]), Operation.NOT_IN),
(col("a").is_in(col("b")), Operation.IN),
],
ids=["not_equal", "is_in", "not_in", "is_in_amongst_cols"],
)
def test_new_binary_operations(self, expr, expected_op):
"""Test new binary operations."""
assert isinstance(expr, BinaryExpr)
assert expr.op == expected_op
def test_is_in_with_list(self):
"""Test is_in with list of values."""
expr = col("status").is_in(["active", "pending", "completed"])
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.IN
# The right operand should be a LiteralExpr containing the list
assert expr.right.value == ["active", "pending", "completed"]
def test_is_in_with_expr(self):
"""Test is_in with expression."""
values_expr = lit(["a", "b", "c"])
expr = col("category").is_in(values_expr)
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.IN
assert expr.right == values_expr
def test_is_in_amongst_cols(self):
"""Test is_in with expression."""
expr = col("a").is_in(col("b"))
assert isinstance(expr, BinaryExpr)
assert expr.op == Operation.IN
assert expr.right == col("b")
| TestBinaryExpressions |
python | realpython__materials | python-enum/disk_player.py | {
"start": 30,
"end": 132
} | class ____(Enum):
EMPTY = auto()
STOPPED = auto()
PAUSED = auto()
PLAYING = auto()
| State |
python | apache__airflow | providers/amazon/tests/system/amazon/aws/utils/__init__.py | {
"start": 4665,
"end": 6650
} | class ____:
"""
Stores metadata about a variable to be fetched for AWS System Tests.
:param name: The name of the variable to be fetched.
:param to_split: If True, the input is a string-formatted List and needs to be split. Defaults to False.
:param delimiter: If to_split is true, this will be used to split the string. Defaults to ','.
:param test_name: The name of the system test that the variable is associated with.
"""
def __init__(
self,
name: str,
test_name: str,
to_split: bool = False,
delimiter: str | None = None,
optional: bool = False,
):
self.name = name
self.test_name = test_name
self.to_split = to_split
if to_split:
self.delimiter = delimiter or ","
elif delimiter:
raise ValueError(f"Variable {name} has a delimiter but split_string is set to False.")
self.optional = optional
def get_value(self):
if hasattr(self, "default_value"):
return self._format_value(
fetch_variable(
key=self.name,
default_value=self.default_value,
test_name=self.test_name,
optional=self.optional,
)
)
return self._format_value(
fetch_variable(key=self.name, test_name=self.test_name, optional=self.optional)
)
def set_default(self, default):
# Since 'None' is a potentially valid "default" value, we are only creating this
# field when a default is provided, and in get_value we check if the field exists.
self.default_value = default
def _format_value(self, value):
if self.to_split:
if not isinstance(value, str):
raise TypeError(f"{self.name} is type {type(value)} and can not be split as requested.")
return value.split(self.delimiter)
return value
| Variable |
python | getsentry__sentry | src/sentry/relay/projectconfig_debounce_cache/base.py | {
"start": 44,
"end": 1841
} | class ____(Service):
"""A cache for debouncing updates for the relay projectconfig cache.
Whenever a project or organization option changes, we schedule a task
that updates the relay configuration in the projectconfig cache.
However, at the same time we want to debounce this task in case multiple
option updates have been scheduled at the same time.
This cache is allowed to randomly lose data but `mark_task_done` should be
visible immediately, everywhere, consistently. Memcached is probably not
going to cut it.
The constructor takes an optional ``key_prefix`` option, which can be used to create
multiple instances of this debounce cache with different keys.
"""
__all__ = ("is_debounced", "debounce", "mark_task_done")
def __init__(self, **options):
pass
def is_debounced(self, *, public_key, project_id, organization_id):
"""Checks if the given project/organization should be debounced.
If this is called this with multiple arguments each scope is checked, so that even
if you only need to check a single key an org-level debounce will be respected. You
must make sure that the several arguments relate to each other.
"""
return False
def debounce(self, *, public_key, project_id, organization_id):
"""Debounces the given project/organization, without performing any checks.
The highest-scoped argument passed in will be debounced.
"""
def mark_task_done(self, *, public_key, project_id, organization_id):
"""
Mark a task done such that `is_debounced` starts emitting False
for the given parameters.
Returns 1 if the task was removed, 0 if it wasn't.
"""
return 1
| ProjectConfigDebounceCache |
python | optuna__optuna | optuna/storages/_in_memory.py | {
"start": 15186,
"end": 15632
} | class ____:
def __init__(self, name: str, directions: list[StudyDirection]) -> None:
self.trials: list[FrozenTrial] = []
self.param_distribution: dict[str, distributions.BaseDistribution] = {}
self.user_attrs: dict[str, Any] = {}
self.system_attrs: dict[str, Any] = {}
self.name: str = name
self.directions: list[StudyDirection] = directions
self.best_trial_id: int | None = None
| _StudyInfo |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/utils/misc.py | {
"start": 19194,
"end": 23745
} | class ____(BuildBackendHookCaller):
def __init__(
self,
config_holder: Any,
source_dir: str,
build_backend: str,
backend_path: Optional[str] = None,
runner: Optional[Callable[..., None]] = None,
python_executable: Optional[str] = None,
):
super().__init__(
source_dir, build_backend, backend_path, runner, python_executable
)
self.config_holder = config_holder
def build_wheel(
self,
wheel_directory: str,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
metadata_directory: Optional[str] = None,
) -> str:
cs = self.config_holder.config_settings
return super().build_wheel(
wheel_directory, config_settings=cs, metadata_directory=metadata_directory
)
def build_sdist(
self,
sdist_directory: str,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
) -> str:
cs = self.config_holder.config_settings
return super().build_sdist(sdist_directory, config_settings=cs)
def build_editable(
self,
wheel_directory: str,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
metadata_directory: Optional[str] = None,
) -> str:
cs = self.config_holder.config_settings
return super().build_editable(
wheel_directory, config_settings=cs, metadata_directory=metadata_directory
)
def get_requires_for_build_wheel(
self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
) -> List[str]:
cs = self.config_holder.config_settings
return super().get_requires_for_build_wheel(config_settings=cs)
def get_requires_for_build_sdist(
self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
) -> List[str]:
cs = self.config_holder.config_settings
return super().get_requires_for_build_sdist(config_settings=cs)
def get_requires_for_build_editable(
self, config_settings: Optional[Dict[str, Union[str, List[str]]]] = None
) -> List[str]:
cs = self.config_holder.config_settings
return super().get_requires_for_build_editable(config_settings=cs)
def prepare_metadata_for_build_wheel(
self,
metadata_directory: str,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
_allow_fallback: bool = True,
) -> str:
cs = self.config_holder.config_settings
return super().prepare_metadata_for_build_wheel(
metadata_directory=metadata_directory,
config_settings=cs,
_allow_fallback=_allow_fallback,
)
def prepare_metadata_for_build_editable(
self,
metadata_directory: str,
config_settings: Optional[Dict[str, Union[str, List[str]]]] = None,
_allow_fallback: bool = True,
) -> str:
cs = self.config_holder.config_settings
return super().prepare_metadata_for_build_editable(
metadata_directory=metadata_directory,
config_settings=cs,
_allow_fallback=_allow_fallback,
)
def warn_if_run_as_root() -> None:
"""Output a warning for sudo users on Unix.
In a virtual environment, sudo pip still writes to virtualenv.
On Windows, users may run pip as Administrator without issues.
This warning only applies to Unix root users outside of virtualenv.
"""
if running_under_virtualenv():
return
if not hasattr(os, "getuid"):
return
# On Windows, there are no "system managed" Python packages. Installing as
# Administrator via pip is the correct way of updating system environments.
#
# We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform
# checks: https://mypy.readthedocs.io/en/stable/common_issues.html
if sys.platform == "win32" or sys.platform == "cygwin":
return
if os.getuid() != 0:
return
logger.warning(
"Running pip as the 'root' user can result in broken permissions and "
"conflicting behaviour with the system package manager, possibly "
"rendering your system unusable."
"It is recommended to use a virtual environment instead: "
"https://pip.pypa.io/warnings/venv. "
"Use the --root-user-action option if you know what you are doing and "
"want to suppress this warning."
)
| ConfiguredBuildBackendHookCaller |
python | tensorflow__tensorflow | tensorflow/compiler/tests/clustering_test.py | {
"start": 1152,
"end": 3694
} | class ____(xla_test.XLATestCase):
def testAdd(self):
val1 = np.array([4, 3, 2, 1], dtype=np.float32)
val2 = np.array([5, 6, 7, 8], dtype=np.float32)
expected = val1 + val2
with self.session():
with self.test_scope():
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
val1 = np.array([4, 3, 2, 1]).astype(np.float32)
val2 = np.array([5, 6, 7, 8]).astype(np.float32)
expected = val1 + val2
with self.session():
with ops.device(CPU_DEVICE):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in range(10):
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):
# Builds a graph of the form:
# x -> y
# | \
# z -> w
# where x and z are placed on the CPU and y and w are placed on the XLA
# device. If y and w are clustered for compilation, then the graph will
# deadlock since the clustered graph will contain a self-loop.
with self.session() as sess:
with ops.device(CPU_DEVICE):
x = array_ops.placeholder(dtypes.float32, [2])
with self.test_scope():
y = x * 2
with ops.device(CPU_DEVICE):
z = y * y
with self.test_scope():
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
def testHostMemory(self):
with self.session() as sess:
x = array_ops.placeholder(dtypes.int32)
with self.test_scope():
y = x + 1
with ops.device(CPU_DEVICE):
# Place a computation on the CPU, so y and w cannot be merged into the
# same JIT compilation.
z = y * 2
with self.test_scope():
# Argument 'y' is a non-constant output of a previous cluster. Make sure
# it is properly copied to host memory so it can be used as a
# compile-time constant input for this cluster.
w = array_ops.reshape(z, y)
result = sess.run(w, {x: [1, 0]})
expected = np.array([[4], [2]], dtype=np.int32)
self.assertAllClose(expected, result, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
| ClusteringTest |
python | pytorch__pytorch | test/test_utils.py | {
"start": 26994,
"end": 28732
} | class ____(TestCase):
def test_load_standalone(self):
build_dir = tempfile.mkdtemp()
try:
src_path = os.path.join(build_dir, "main.cpp")
src = textwrap.dedent(
"""\
#include <iostream>
#include <torch/torch.h>
int main() {
auto x = torch::eye(3);
std::cout << x << std::endl;
}
"""
)
with open(src_path, "w") as f:
f.write(src)
exec_path = torch.utils.cpp_extension.load(
"standalone_load_test",
src_path,
build_directory=build_dir,
is_python_module=False,
is_standalone=True,
)
ext = ".exe" if IS_WINDOWS else ""
self.assertEqual(
exec_path, os.path.join(build_dir, f"standalone_load_test{ext}")
)
for shell in [True, False]:
r = subprocess.run(
[exec_path],
shell=shell,
stdout=subprocess.PIPE,
)
self.assertEqual(r.returncode, 0)
self.assertEqual(
# Windows prints "\r\n" for newlines.
textwrap.dedent(r.stdout.decode("utf-8")).replace("\r\n", "\n"),
textwrap.dedent(
"""\
1 0 0
0 1 0
0 0 1
[ CPUFloatType{3,3} ]
"""
),
)
finally:
shutil.rmtree(build_dir)
| TestStandaloneCPPJIT |
python | lepture__authlib | authlib/jose/rfc7516/models.py | {
"start": 827,
"end": 1552
} | class ____(JWEAlgorithmBase, metaclass=ABCMeta):
"""Interface for JWE algorithm with tag-aware key agreement (in key agreement
with key wrapping mode).
ECDH-1PU is an example of such an algorithm.
"""
def generate_keys_and_prepare_headers(self, enc_alg, key, sender_key, preset=None):
raise NotImplementedError
def agree_upon_key_and_wrap_cek(
self, enc_alg, headers, key, sender_key, epk, cek, tag
):
raise NotImplementedError
def wrap(self, enc_alg, headers, key, sender_key, preset=None):
raise NotImplementedError
def unwrap(self, enc_alg, ek, headers, key, sender_key, tag=None):
raise NotImplementedError
| JWEAlgorithmWithTagAwareKeyAgreement |
python | django-compressor__django-compressor | compressor/tests/test_offline.py | {
"start": 16623,
"end": 16891
} | class ____(OfflineTestCaseMixin, TestCase):
templates_dir = "test_with_context"
expected_hash = "c6bf81bca7ad"
additional_test_settings = {
"COMPRESS_OFFLINE_CONTEXT": {
"content": "OK!",
}
}
| OfflineCompressTestCaseWithContext |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 348626,
"end": 349503
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateEnterpriseTwoFactorAuthenticationRequiredSetting
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set the two factor
authentication required setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseEnabledSettingValue), graphql_name="settingValue")
"""The value for the two factor authentication required setting on
the enterprise.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseTwoFactorAuthenticationRequiredSettingInput |
python | jmcnamara__XlsxWriter | xlsxwriter/test/vml/test_write_path.py | {
"start": 289,
"end": 1362
} | class ____(unittest.TestCase):
"""
Test the Vml _write_path() method.
"""
def setUp(self):
self.fh = StringIO()
self.vml = Vml()
self.vml._set_filehandle(self.fh)
def test_write_comment_path_1(self):
"""Test the _write_comment_path() method"""
self.vml._write_comment_path("t", "rect")
exp = """<v:path gradientshapeok="t" o:connecttype="rect"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_comment_path_2(self):
"""Test the _write_comment_path() method"""
self.vml._write_comment_path(None, "none")
exp = """<v:path o:connecttype="none"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
def test_write_button_path(self):
"""Test the _write_button_path() method"""
self.vml._write_button_path()
exp = """<v:path shadowok="f" o:extrusionok="f" strokeok="f" fillok="f" o:connecttype="rect"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteVpath |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/serve.py | {
"start": 35749,
"end": 35839
} | class ____(SettingsOverrideObject):
_default_class = ServeSitemapXMLBase
| ServeSitemapXML |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 142168,
"end": 145227
} | class ____(DialectKWArgs):
"""Defines options for a named database sequence or an identity column.
.. seealso::
:class:`.Sequence`
"""
def __init__(
self,
start: Optional[int] = None,
increment: Optional[int] = None,
minvalue: Optional[int] = None,
maxvalue: Optional[int] = None,
nominvalue: Optional[bool] = None,
nomaxvalue: Optional[bool] = None,
cycle: Optional[bool] = None,
cache: Optional[int] = None,
order: Optional[bool] = None,
**dialect_kw: Any,
) -> None:
"""Construct a :class:`.IdentityOptions` object.
See the :class:`.Sequence` documentation for a complete description
of the parameters.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.
:param minvalue: the minimum value of the sequence.
:param maxvalue: the maximum value of the sequence.
:param nominvalue: no minimum value of the sequence.
:param nomaxvalue: no maximum value of the sequence.
:param cycle: allows the sequence to wrap around when the maxvalue
or minvalue has been reached.
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword.
.. deprecated:: 2.1 Use ``oracle_order`` instead.
"""
self.start = start
self.increment = increment
self.minvalue = minvalue
self.maxvalue = maxvalue
self.nominvalue = nominvalue
self.nomaxvalue = nomaxvalue
self.cycle = cycle
self.cache = cache
if order is not None:
if "oracle_order" in dialect_kw:
raise exc.ArgumentError(
"Cannot specify both 'order' and 'oracle_order'. "
"Plese use only 'oracle_order'."
)
dialect_kw["oracle_order"] = order
self._validate_dialect_kwargs(dialect_kw)
@property
def _increment_is_negative(self) -> bool:
return self.increment is not None and self.increment < 0
@property
def order(self) -> Optional[bool]:
"""Alias of the ``dialect_kwargs`` ``'oracle_order'``.
.. deprecated:: 2.1 The 'order' attribute is deprecated.
"""
value: Optional[bool] = self.dialect_kwargs.get("oracle_order")
return value
def _as_dict(self) -> Dict[str, Any]:
return {
k: v
for k, v in {
"start": self.start,
"increment": self.increment,
"minvalue": self.minvalue,
"maxvalue": self.maxvalue,
"nominvalue": self.nominvalue,
"nomaxvalue": self.nomaxvalue,
"cycle": self.cycle,
"cache": self.cache,
}.items()
if v != None
}
| IdentityOptions |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_webagg.py | {
"start": 1132,
"end": 1807
} | class ____(core.FigureManagerWebAgg):
_toolbar2_class = core.NavigationToolbar2WebAgg
@classmethod
def pyplot_show(cls, *, block=None):
WebAggApplication.initialize()
url = "http://{address}:{port}{prefix}".format(
address=WebAggApplication.address,
port=WebAggApplication.port,
prefix=WebAggApplication.url_prefix)
if mpl.rcParams['webagg.open_in_browser']:
import webbrowser
if not webbrowser.open(url):
print(f"To view figure, visit {url}")
else:
print(f"To view figure, visit {url}")
WebAggApplication.start()
| FigureManagerWebAgg |
python | kamyu104__LeetCode-Solutions | Python/greatest-sum-divisible-by-three.py | {
"start": 29,
"end": 316
} | class ____(object):
def maxSumDivThree(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [0, 0, 0]
for num in nums:
for i in [num+x for x in dp]:
dp[i%3] = max(dp[i%3], i)
return dp[0]
| Solution |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 59426,
"end": 59783
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional["SearchMatrixPairsResponse"] = Field(default=None, description="")
| InlineResponse20023 |
python | astropy__astropy | astropy/constants/constant.py | {
"start": 8842,
"end": 9418
} | class ____(Constant):
"""An electromagnetic constant."""
@property
def cgs(self):
"""Overridden for EMConstant to raise a `TypeError`
emphasizing that there are multiple EM extensions to CGS.
"""
raise TypeError(
"Cannot convert EM constants to cgs because there "
"are different systems for E.M constants within the "
"c.g.s system (ESU, Gaussian, etc.). Instead, "
"directly use the constant with the appropriate "
"suffix (e.g. e.esu, e.gauss, etc.)."
)
| EMConstant |
python | encode__django-rest-framework | tests/test_versioning.py | {
"start": 4933,
"end": 8095
} | class ____(URLPatternsTestCase, APITestCase):
included = [
path('namespaced/', dummy_view, name='another'),
path('example/<int:pk>/', dummy_pk_view, name='example-detail')
]
urlpatterns = [
path('v1/', include((included, 'v1'), namespace='v1')),
path('another/', dummy_view, name='another'),
re_path(r'^(?P<version>[v1|v2]+)/another/$', dummy_view, name='another'),
re_path(r'^(?P<foo>.+)/unversioned/$', dummy_view, name='unversioned'),
]
def test_reverse_unversioned(self):
view = ReverseView.as_view()
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_query_param_versioning(self):
scheme = versioning.QueryParameterVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/?version=v1')
response = view(request)
assert response.data == {'url': 'http://testserver/another/?version=v1'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
@override_settings(ALLOWED_HOSTS=['*'])
def test_reverse_host_name_versioning(self):
scheme = versioning.HostNameVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/endpoint/', HTTP_HOST='v1.example.org')
response = view(request)
assert response.data == {'url': 'http://v1.example.org/another/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
def test_reverse_url_path_versioning(self):
scheme = versioning.URLPathVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
response = view(request, version='v1')
assert response.data == {'url': 'http://testserver/v1/another/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
# Test fallback when kwargs is not None
request = factory.get('/v1/endpoint/')
request.versioning_scheme = scheme()
request.version = 'v1'
reversed_url = reverse('unversioned', request=request, kwargs={'foo': 'bar'})
assert reversed_url == 'http://testserver/bar/unversioned/'
def test_reverse_namespace_versioning(self):
class FakeResolverMatch(ResolverMatch):
namespace = 'v1'
scheme = versioning.NamespaceVersioning
view = ReverseView.as_view(versioning_class=scheme)
request = factory.get('/v1/endpoint/')
request.resolver_match = FakeResolverMatch
response = view(request, version='v1')
assert response.data == {'url': 'http://testserver/v1/namespaced/'}
request = factory.get('/endpoint/')
response = view(request)
assert response.data == {'url': 'http://testserver/another/'}
| TestURLReversing |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 9543,
"end": 12433
} | class ____(ReturnsRows):
"""Mark a class as being selectable."""
__visit_name__ = "selectable"
is_selectable = True
def _refresh_for_new_column(self, column: ColumnElement[Any]) -> None:
raise NotImplementedError()
def lateral(self, name: Optional[str] = None) -> LateralFromClause:
"""Return a LATERAL alias of this :class:`_expression.Selectable`.
The return value is the :class:`_expression.Lateral` construct also
provided by the top-level :func:`_expression.lateral` function.
.. seealso::
:ref:`tutorial_lateral_correlation` - overview of usage.
"""
return Lateral._construct(self, name=name)
@util.deprecated(
"1.4",
message="The :meth:`.Selectable.replace_selectable` method is "
"deprecated, and will be removed in a future release. Similar "
"functionality is available via the sqlalchemy.sql.visitors module.",
)
@util.preload_module("sqlalchemy.sql.util")
def replace_selectable(self, old: FromClause, alias: Alias) -> Self:
"""Replace all occurrences of :class:`_expression.FromClause`
'old' with the given :class:`_expression.Alias`
object, returning a copy of this :class:`_expression.FromClause`.
"""
return util.preloaded.sql_util.ClauseAdapter(alias).traverse(self)
def corresponding_column(
self, column: KeyedColumnElement[Any], require_embedded: bool = False
) -> Optional[KeyedColumnElement[Any]]:
"""Given a :class:`_expression.ColumnElement`, return the exported
:class:`_expression.ColumnElement` object from the
:attr:`_expression.Selectable.exported_columns`
collection of this :class:`_expression.Selectable`
which corresponds to that
original :class:`_expression.ColumnElement` via a common ancestor
column.
:param column: the target :class:`_expression.ColumnElement`
to be matched.
:param require_embedded: only return corresponding columns for
the given :class:`_expression.ColumnElement`, if the given
:class:`_expression.ColumnElement`
is actually present within a sub-element
of this :class:`_expression.Selectable`.
Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`_expression.Selectable`.
.. seealso::
:attr:`_expression.Selectable.exported_columns` - the
:class:`_expression.ColumnCollection`
that is used for the operation.
:meth:`_expression.ColumnCollection.corresponding_column`
- implementation
method.
"""
return self.exported_columns.corresponding_column(
column, require_embedded
)
| Selectable |
python | getsentry__sentry | tests/sentry/issues/test_issue_search.py | {
"start": 16279,
"end": 17228
} | class ____(TestCase):
def test_user(self) -> None:
assert convert_actor_or_none_value(
["me"], [self.project], self.user, None
) == convert_user_value(["me"], [self.project], self.user, None)
def test_my_team(self) -> None:
assert convert_actor_or_none_value(
["my_teams"], [self.project], self.user, None
) == get_teams_for_users([self.project], [self.user])
def test_none(self) -> None:
assert convert_actor_or_none_value(["none"], [self.project], self.user, None) == [None]
def test_team(self) -> None:
assert convert_actor_or_none_value(
[f"#{self.team.slug}"], [self.project], self.user, None
) == [self.team]
def test_invalid_team(self) -> None:
ret = convert_actor_or_none_value(["#never_upgrade"], [self.project], self.user, None)[0]
assert ret is not None
assert ret.id == 0
| ConvertActorOrNoneValueTest |
python | openai__openai-python | src/openai/types/beta/realtime/session_update_event.py | {
"start": 883,
"end": 1051
} | class ____(BaseModel):
expires_after: Optional[SessionClientSecretExpiresAfter] = None
"""Configuration for the ephemeral token expiration."""
| SessionClientSecret |
python | ansible__ansible | test/integration/targets/protomatter/lookup_plugins/synthetic_plugin_info.py | {
"start": 137,
"end": 359
} | class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
return [_messages.PluginInfo(
resolved_name='ns.col.module',
type=_messages.PluginType.MODULE,
)]
| LookupModule |
python | pypa__warehouse | warehouse/oidc/models/github.py | {
"start": 5913,
"end": 11874
} | class ____:
"""
Common functionality for both pending and concrete GitHub OIDC publishers.
"""
repository_name: Mapped[str] = mapped_column(String, nullable=False)
repository_owner: Mapped[str] = mapped_column(String, nullable=False)
repository_owner_id: Mapped[str] = mapped_column(String, nullable=False)
workflow_filename: Mapped[str] = mapped_column(String, nullable=False)
environment: Mapped[str] = mapped_column(String, nullable=False)
__required_verifiable_claims__: dict[str, CheckClaimCallable[Any]] = {
"sub": _check_sub,
"repository": _check_repository,
"repository_owner": check_claim_binary(str.__eq__),
"repository_owner_id": check_claim_binary(str.__eq__),
"job_workflow_ref": _check_job_workflow_ref,
"jti": check_existing_jti,
"event_name": _check_event_name,
}
__required_unverifiable_claims__: set[str] = {"ref", "sha"}
__optional_verifiable_claims__: dict[str, CheckClaimCallable[Any]] = {
"environment": _check_environment,
}
__unchecked_claims__ = {
"actor",
"actor_id",
"run_id",
"run_number",
"run_attempt",
"head_ref",
"base_ref",
"ref_type",
"repository_id",
"workflow",
"repository_visibility",
"workflow_sha",
"job_workflow_sha",
"workflow_ref",
"runner_environment",
"environment_node_id",
"enterprise",
"enterprise_id",
"ref_protected",
"check_run_id",
}
# Get the most specific publisher from a list of publishers,
# where publishers constrained with an environment are more
# specific than publishers not constrained on environment.
@classmethod
def _get_publisher_for_environment(
cls, publishers: list[Self], environment: str | None
) -> Self | None:
if environment:
if specific_publisher := first_true(
publishers, pred=lambda p: p.environment == environment.lower()
):
return specific_publisher
if general_publisher := first_true(
publishers, pred=lambda p: p.environment == ""
):
return general_publisher
return None
@classmethod
def lookup_by_claims(cls, session: Session, signed_claims: SignedClaims) -> Self:
repository = signed_claims["repository"]
repository_owner, repository_name = repository.split("/", 1)
job_workflow_ref = signed_claims["job_workflow_ref"]
environment = signed_claims.get("environment")
if not (job_workflow_filename := _extract_workflow_filename(job_workflow_ref)):
raise InvalidPublisherError(
"Could not job extract workflow filename from OIDC claims"
)
query: Query = Query(cls).filter_by(
repository_name=repository_name,
repository_owner=repository_owner,
repository_owner_id=signed_claims["repository_owner_id"],
workflow_filename=job_workflow_filename,
)
publishers = query.with_session(session).all()
if publisher := cls._get_publisher_for_environment(publishers, environment):
return publisher
else:
raise InvalidPublisherError("Publisher with matching claims was not found")
@property
def _workflow_slug(self) -> str:
return f".github/workflows/{self.workflow_filename}"
@property
def publisher_name(self) -> str:
return "GitHub"
@property
def repository(self) -> str:
return f"{self.repository_owner}/{self.repository_name}"
@property
def job_workflow_ref(self) -> str:
return f"{self.repository}/{self._workflow_slug}"
@property
def sub(self) -> str:
return f"repo:{self.repository}"
@property
def publisher_base_url(self) -> str:
return f"https://github.com/{self.repository}"
@property
def jti(self) -> str:
"""Placeholder value for JTI."""
return "placeholder"
@property
def event_name(self) -> str:
"""Placeholder value for event_name (not used)"""
return "placeholder"
def publisher_url(self, claims: SignedClaims | None = None) -> str:
base = self.publisher_base_url
sha = claims.get("sha") if claims else None
if sha:
return f"{base}/commit/{sha}"
return base
@property
def attestation_identity(self) -> Publisher | None:
return GitHubIdentity(
repository=self.repository,
workflow=self.workflow_filename,
environment=self.environment if self.environment else None,
)
def stored_claims(self, claims: SignedClaims | None = None) -> dict:
claims_obj = claims if claims else {}
return {"ref": claims_obj.get("ref"), "sha": claims_obj.get("sha")}
def __str__(self) -> str:
return self.workflow_filename
def exists(self, session: Session) -> bool:
return session.query(
exists().where(
and_(
self.__class__.repository_name == self.repository_name,
self.__class__.repository_owner == self.repository_owner,
self.__class__.workflow_filename == self.workflow_filename,
self.__class__.environment == self.environment,
)
)
).scalar()
@property
def admin_details(self) -> list[tuple[str, str]]:
"""Returns GitHub publisher configuration details for admin display."""
details = [
("Repository", self.repository),
("Workflow", self.workflow_filename),
("Owner ID", self.repository_owner_id),
]
if self.environment:
details.append(("Environment", self.environment))
return details
| GitHubPublisherMixin |
python | Textualize__textual | src/textual/widgets/_placeholder.py | {
"start": 1613,
"end": 1720
} | class ____(Exception):
"""Raised when an invalid Placeholder variant is set."""
| InvalidPlaceholderVariant |
python | Pylons__pyramid | src/pyramid/static.py | {
"start": 531,
"end": 11395
} | class ____:
"""An instance of this class is a callable which can act as a
:app:`Pyramid` :term:`view callable`; this view will serve
static files from a directory on disk based on the ``root_dir``
you provide to its constructor.
The directory may contain subdirectories (recursively); the static
view implementation will descend into these directories as
necessary based on the components of the URL in order to resolve a
path into a response.
You may pass an absolute or relative filesystem path or a
:term:`asset specification` representing the directory
containing static files as the ``root_dir`` argument to this
class' constructor.
If the ``root_dir`` path is relative, and the ``package_name``
argument is ``None``, ``root_dir`` will be considered relative to
the directory in which the Python file which *calls* ``static``
resides. If the ``package_name`` name argument is provided, and a
relative ``root_dir`` is provided, the ``root_dir`` will be
considered relative to the Python :term:`package` specified by
``package_name`` (a dotted path to a Python package).
``cache_max_age`` influences the ``Expires`` and ``Max-Age``
response headers returned by the view (default is 3600 seconds or
one hour).
``use_subpath`` influences whether ``request.subpath`` will be used as
``PATH_INFO`` when calling the underlying WSGI application which actually
serves the static files. If it is ``True``, the static application will
consider ``request.subpath`` as ``PATH_INFO`` input. If it is ``False``,
the static application will consider request.environ[``PATH_INFO``] as
``PATH_INFO`` input. By default, this is ``False``.
``reload`` controls whether a cache of files is maintained or the asset
subsystem is queried per-request to determine what files are available.
By default, this is ``False`` and new files added while the process is
running are not recognized.
``content_encodings`` is a list of alternative file encodings supported
in the ``Accept-Encoding`` HTTP Header. Alternative files are found using
file extensions defined in :attr:`mimetypes.encodings_map`. An encoded
asset will be returned with the ``Content-Encoding`` header set to the
selected encoding. If the asset contains alternative encodings then the
``Accept-Encoding`` value will be added to the response's ``Vary`` header.
By default, the list is empty and no alternatives will be supported.
.. note::
If the ``root_dir`` is relative to a :term:`package`, or is a
:term:`asset specification` the :app:`Pyramid`
:class:`pyramid.config.Configurator` method can be used to override
assets within the named ``root_dir`` package-relative directory.
However, if the ``root_dir`` is absolute, configuration will not be able
to override the assets it contains.
.. versionchanged:: 2.0
Added ``reload`` and ``content_encodings`` options.
"""
def __init__(
self,
root_dir,
cache_max_age=3600,
package_name=None,
use_subpath=False,
index='index.html',
reload=False,
content_encodings=(),
):
# package_name is for bw compat; it is preferred to pass in a
# package-relative path as root_dir
# (e.g. ``anotherpackage:foo/static``).
self.cache_max_age = cache_max_age
if package_name is None:
package_name = caller_package().__name__
package_name, docroot = resolve_asset_spec(root_dir, package_name)
if package_name:
try:
__import__(package_name)
except ImportError:
warnings.warn(
f'A "pyramid.static.static_view" is being created with an'
f' asset spec referencing a package "{package_name}" that'
f' does not exist. This will break in the future.'
f' If this is done to override an asset, you must adjust'
f' this to override a location inside a real package.',
DeprecationWarning,
stacklevel=2,
)
self.use_subpath = use_subpath
self.package_name = package_name
self.docroot = docroot
self.norm_docroot = normcase(normpath(docroot))
self.index = index
self.reload = reload
self.content_encodings = _compile_content_encodings(content_encodings)
self.filemap = {}
def __call__(self, context, request):
resource_name = self.get_resource_name(request)
files = self.get_possible_files(resource_name)
filepath, content_encoding = self.find_best_match(request, files)
if filepath is None:
raise HTTPNotFound(request.url)
content_type, _ = _guess_type(resource_name)
response = FileResponse(
filepath,
request,
self.cache_max_age,
content_type,
content_encoding,
)
if len(files) > 1:
_add_vary(response, 'Accept-Encoding')
return response
def get_resource_name(self, request):
"""
Return the computed name of the requested resource.
The returned file is not guaranteed to exist.
"""
if self.use_subpath:
path_tuple = request.subpath
else:
path_tuple = traversal_path_info(request.path_info)
path = _secure_path(path_tuple)
if path is None:
raise HTTPNotFound('Out of bounds: %s' % request.url)
# normalize asset spec or fs path into resource_path
if self.package_name: # package resource
resource_path = '{}/{}'.format(self.docroot.rstrip('/'), path)
if resource_isdir(self.package_name, resource_path):
if not request.path_url.endswith('/'):
raise self.add_slash_redirect(request)
resource_path = '{}/{}'.format(
resource_path.rstrip('/'),
self.index,
)
else: # filesystem file
# os.path.normpath converts / to \ on windows
resource_path = normcase(normpath(join(self.norm_docroot, path)))
if isdir(resource_path):
if not request.path_url.endswith('/'):
raise self.add_slash_redirect(request)
resource_path = join(resource_path, self.index)
return resource_path
def find_resource_path(self, name):
"""
Return the absolute path to the resource or ``None`` if it doesn't
exist.
"""
if self.package_name:
if resource_exists(self.package_name, name):
return resource_filename(self.package_name, name)
elif exists(name):
return name
def get_possible_files(self, resource_name):
"""Return a sorted list of ``(size, encoding, path)`` entries."""
result = self.filemap.get(resource_name)
if result is not None:
return result
# XXX we could put a lock around this work but worst case scenario a
# couple requests scan the disk for files at the same time and then
# the cache is set going forward so do not bother
result = []
# add the identity
path = self.find_resource_path(resource_name)
if path:
result.append((path, None))
# add each file we find for the supported encodings
# we don't mind adding multiple files for the same encoding if there
# are copies with different extensions because we sort by size so the
# smallest is always found first and the rest ignored
for encoding, extensions in self.content_encodings.items():
for ext in extensions:
encoded_name = resource_name + ext
path = self.find_resource_path(encoded_name)
if path:
result.append((path, encoding))
# sort the files by size, smallest first
result.sort(key=lambda x: getsize(x[0]))
# only cache the results if reload is disabled
if not self.reload:
self.filemap[resource_name] = result
return result
def find_best_match(self, request, files):
"""Return ``(path | None, encoding)``."""
# if the client did not specify encodings then assume only the
# identity is acceptable
if not request.accept_encoding:
identity_path = next(
(path for path, encoding in files if encoding is None),
None,
)
return identity_path, None
# find encodings the client will accept
acceptable_encodings = {
x[0]
for x in request.accept_encoding.acceptable_offers(
[encoding for path, encoding in files if encoding is not None]
)
}
acceptable_encodings.add(None)
# return the smallest file from the acceptable encodings
# we know that files is sorted by size, smallest first
for path, encoding in files:
if encoding in acceptable_encodings:
return path, encoding
return None, None
def add_slash_redirect(self, request):
url = request.path_url + '/'
qs = request.query_string
if qs:
url = url + '?' + qs
return HTTPMovedPermanently(url)
def _compile_content_encodings(encodings):
"""
Convert mimetypes.encodings_map into a dict of
``(encoding) -> [file extensions]``.
"""
result = {}
for ext, encoding in mimetypes.encodings_map.items():
if encoding in encodings:
result.setdefault(encoding, []).append(ext)
return result
def _add_vary(response, option):
vary = response.vary or []
if not any(x.lower() == option.lower() for x in vary):
vary.append(option)
response.vary = vary
_invalid_element_chars = {'/', os.sep, '\x00'}
def _contains_invalid_element_char(item):
for invalid_element_char in _invalid_element_chars:
if invalid_element_char in item:
return True
_has_insecure_pathelement = {'..', '.', ''}.intersection
@lru_cache(1000)
def _secure_path(path_tuple):
if _has_insecure_pathelement(path_tuple):
# belt-and-suspenders security; this should never be true
# unless someone screws up the traversal_path code
# (request.subpath is computed via traversal_path too)
return None
if any([_contains_invalid_element_char(item) for item in path_tuple]):
return None
encoded = '/'.join(path_tuple) # will be unicode
return encoded
| static_view |
python | huggingface__transformers | src/transformers/models/lxmert/modeling_lxmert.py | {
"start": 38064,
"end": 51892
} | class ____(LxmertPreTrainedModel):
# help saving them
_tied_weights_keys = {
"cls.predictions.decoder.weight": "lxmert.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
# Configuration
self.config = config
self.num_qa_labels = config.num_qa_labels
self.visual_loss_normalizer = config.visual_loss_normalizer
# Use of pretraining tasks
self.task_mask_lm = config.task_mask_lm
self.task_obj_predict = config.task_obj_predict
self.task_matched = config.task_matched
self.task_qa = config.task_qa
# Lxmert backbone
self.lxmert = LxmertModel(config)
# Pre-training heads
self.cls = LxmertPreTrainingHeads(config)
if self.task_obj_predict:
self.obj_predict_head = LxmertVisualObjHead(config)
if self.task_qa:
self.answer_head = LxmertVisualAnswerHead(config, self.num_qa_labels)
# Weight initialization
# Initialize weights and apply final processing
self.post_init()
# Loss functions
self.loss_fcts = {
"l2": SmoothL1Loss(reduction="none"),
"visual_ce": CrossEntropyLoss(reduction="none"),
"ce": CrossEntropyLoss(),
}
visual_losses = {}
if config.visual_obj_loss:
visual_losses["obj"] = {
"shape": (-1,),
"num": config.num_object_labels,
"loss": "visual_ce",
}
if config.visual_attr_loss:
visual_losses["attr"] = {
"shape": (-1,),
"num": config.num_attr_labels,
"loss": "visual_ce",
}
if config.visual_feat_loss:
visual_losses["feat"] = {
"shape": (-1, config.visual_feat_dim),
"num": config.visual_feat_dim,
"loss": "l2",
}
self.visual_losses = visual_losses
def resize_token_embeddings(
self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True
) -> nn.Embedding:
# Adding the following steps to resize bias to match the shape of resized embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self.cls.predictions.bias = self._resize_bias(self.cls.predictions.bias, new_num_tokens)
return new_embeddings
def _resize_bias(self, bias, new_num_tokens: int):
old_num_tokens = bias.shape[0]
if new_num_tokens <= old_num_tokens:
new_bias = bias[:new_num_tokens]
else:
extra_bias = torch.zeros(new_num_tokens - old_num_tokens, device=bias.device)
new_bias = torch.cat([bias, extra_bias])
new_bias = nn.Parameter(new_bias)
return new_bias
def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
will add newly initialized weights. Reducing the size will remove weights from the end
Args:
num_labels (`int`, *optional*):
New number of labels in the linear layer weight matrix. Increasing the size will add newly initialized
weights at the end. Reducing the size will remove weights from the end. If not provided or `None`, just
returns a pointer to the qa labels ``torch.nn.Linear``` module of the model without doing anything.
Return:
`torch.nn.Linear`: Pointer to the resized Linear layer or the old Linear layer
"""
cur_qa_logit_layer = self.get_qa_logit_layer()
if num_labels is None or cur_qa_logit_layer is None:
return
new_qa_logit_layer = self._resize_qa_labels(num_labels)
self.config.num_qa_labels = num_labels
self.num_qa_labels = num_labels
return new_qa_logit_layer
def _resize_qa_labels(self, num_labels):
cur_qa_logit_layer = self.get_qa_logit_layer()
new_qa_logit_layer = self._get_resized_qa_labels(cur_qa_logit_layer, num_labels)
self._set_qa_logit_layer(new_qa_logit_layer)
return self.get_qa_logit_layer()
def get_qa_logit_layer(self) -> nn.Module:
"""
Returns the linear layer that produces question answering logits.
Returns:
`nn.Module`: A torch module mapping the question answering prediction hidden states or `None` if LXMERT
does not have a visual answering head.
"""
if hasattr(self, "answer_head"):
return self.answer_head.logit_fc[-1]
def _set_qa_logit_layer(self, qa_logit_layer):
self.answer_head.logit_fc[-1] = qa_logit_layer
def _get_resized_qa_labels(self, cur_qa_logit_layer, num_labels):
if num_labels is None:
return cur_qa_logit_layer
cur_qa_labels, hidden_dim = cur_qa_logit_layer.weight.size()
if cur_qa_labels == num_labels:
return cur_qa_logit_layer
# Build new linear output
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels)
else:
new_qa_logit_layer = nn.Linear(hidden_dim, num_labels, bias=False)
new_qa_logit_layer.to(cur_qa_logit_layer.weight.device)
# initialize all new labels
self._init_weights(new_qa_logit_layer)
# Copy labels from the previous weights
num_labels_to_copy = min(cur_qa_labels, num_labels)
new_qa_logit_layer.weight.data[:num_labels_to_copy, :] = cur_qa_logit_layer.weight.data[:num_labels_to_copy, :]
if getattr(cur_qa_logit_layer, "bias", None) is not None:
new_qa_logit_layer.bias.data[:num_labels_to_copy] = cur_qa_logit_layer.bias.data[:num_labels_to_copy]
return new_qa_logit_layer
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
visual_feats: Optional[torch.FloatTensor] = None,
visual_pos: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
visual_attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
obj_labels: Optional[dict[str, tuple[torch.FloatTensor, torch.FloatTensor]]] = None,
matched_label: Optional[torch.LongTensor] = None,
ans: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[LxmertForPreTrainingOutput, tuple[torch.FloatTensor]]:
r"""
visual_feats (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`):
This input represents visual features. They ROI pooled object features from bounding boxes using a
faster-RCNN model)
These are currently not provided by the transformers library.
visual_pos (`torch.FloatTensor` of shape `(batch_size, num_visual_features, visual_pos_dim)`):
This input represents spatial features corresponding to their relative (via index) visual features. The
pre-trained LXMERT model expects these spatial features to be normalized bounding boxes on a scale of 0 to
1.
These are currently not provided by the transformers library.
visual_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
obj_labels (`dict[Str: tuple[Torch.FloatTensor, Torch.FloatTensor]]`, *optional*):
each key is named after each one of the visual losses and each element of the tuple is of the shape
`(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and
the label score respectively
matched_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the whether or not the text input matches the image (classification) loss. Input
should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates that the sentence does not match the image,
- 1 indicates that the sentence does match the image.
ans (`Torch.Tensor` of shape `(batch_size)`, *optional*):
a one hot representation hof the correct answer *optional*
"""
if "masked_lm_labels" in kwargs:
warnings.warn(
"The `masked_lm_labels` argument is deprecated and will be removed in a future version, use `labels`"
" instead.",
FutureWarning,
)
labels = kwargs.pop("masked_lm_labels")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
device = input_ids.device if input_ids is not None else inputs_embeds.device
lxmert_output = self.lxmert(
input_ids=input_ids,
visual_feats=visual_feats,
visual_pos=visual_pos,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
visual_attention_mask=visual_attention_mask,
inputs_embeds=inputs_embeds,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
lang_output, visual_output, pooled_output = (
lxmert_output[0],
lxmert_output[1],
lxmert_output[2],
)
lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output)
if self.task_qa:
answer_score = self.answer_head(pooled_output)
else:
answer_score = pooled_output[0][0]
total_loss = (
None
if (labels is None and matched_label is None and obj_labels is None and ans is None)
else torch.tensor(0.0, device=device)
)
if labels is not None and self.task_mask_lm:
masked_lm_loss = self.loss_fcts["ce"](
lang_prediction_scores.view(-1, self.config.vocab_size),
labels.view(-1),
)
total_loss += masked_lm_loss
if matched_label is not None and self.task_matched:
matched_loss = self.loss_fcts["ce"](cross_relationship_score.view(-1, 2), matched_label.view(-1))
total_loss += matched_loss
if obj_labels is not None and self.task_obj_predict:
total_visual_loss = torch.tensor(0.0, device=input_ids.device)
visual_prediction_scores_dict = self.obj_predict_head(visual_output)
for key, key_info in self.visual_losses.items():
label, mask_conf = obj_labels[key]
output_dim = key_info["num"]
loss_fct_name = key_info["loss"]
label_shape = key_info["shape"]
weight = self.visual_loss_normalizer
visual_loss_fct = self.loss_fcts[loss_fct_name]
visual_prediction_scores = visual_prediction_scores_dict[key]
visual_loss = visual_loss_fct(
visual_prediction_scores.view(-1, output_dim),
label.view(label_shape),
)
if visual_loss.dim() > 1: # Regression Losses
visual_loss = visual_loss.mean(1)
visual_loss = (visual_loss * mask_conf.view(-1)).mean() * weight
total_visual_loss += visual_loss
total_loss += total_visual_loss
if ans is not None and self.task_qa:
answer_loss = self.loss_fcts["ce"](answer_score.view(-1, self.num_qa_labels), ans.view(-1))
total_loss += answer_loss
if not return_dict:
output = (
lang_prediction_scores,
cross_relationship_score,
answer_score,
) + lxmert_output[3:]
return ((total_loss,) + output) if total_loss is not None else output
return LxmertForPreTrainingOutput(
loss=total_loss,
prediction_logits=lang_prediction_scores,
cross_relationship_score=cross_relationship_score,
question_answering_score=answer_score,
language_hidden_states=lxmert_output.language_hidden_states,
vision_hidden_states=lxmert_output.vision_hidden_states,
language_attentions=lxmert_output.language_attentions,
vision_attentions=lxmert_output.vision_attentions,
cross_encoder_attentions=lxmert_output.cross_encoder_attentions,
)
@auto_docstring(
custom_intro="""
Lxmert Model with a visual-answering head on top for downstream QA tasks
"""
)
| LxmertForPreTraining |
python | tensorflow__tensorflow | tensorflow/tools/compatibility/ast_edits_test.py | {
"start": 1643,
"end": 1891
} | class ____(ast_edits.NoUpdateSpec):
"""A specification which deprecates 'a.b'."""
def __init__(self):
ast_edits.NoUpdateSpec.__init__(self)
self.module_deprecations.update({"a.b": (ast_edits.ERROR, "a.b is evil.")})
| ModuleDeprecationSpec |
python | pytorch__pytorch | torch/distributions/relaxed_bernoulli.py | {
"start": 590,
"end": 4255
} | class ____(Distribution):
r"""
Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs`
or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli
distribution.
Samples are logits of values in (0, 1). See [1] for more details.
Args:
temperature (Tensor): relaxation temperature
probs (Number, Tensor): the probability of sampling `1`
logits (Number, Tensor): the log-odds of sampling `1`
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random
Variables (Maddison et al., 2017)
[2] Categorical Reparametrization with Gumbel-Softmax
(Jang et al., 2017)
"""
# pyrefly: ignore [bad-override]
arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real}
support = constraints.real
def __init__(
self,
temperature: Tensor,
probs: Optional[Union[Tensor, Number]] = None,
logits: Optional[Union[Tensor, Number]] = None,
validate_args: Optional[bool] = None,
) -> None:
self.temperature = temperature
if (probs is None) == (logits is None):
raise ValueError(
"Either `probs` or `logits` must be specified, but not both."
)
if probs is not None:
is_scalar = isinstance(probs, _Number)
# pyrefly: ignore [read-only]
(self.probs,) = broadcast_all(probs)
else:
assert logits is not None # helps mypy
is_scalar = isinstance(logits, _Number)
# pyrefly: ignore [read-only]
(self.logits,) = broadcast_all(logits)
self._param = self.probs if probs is not None else self.logits
if is_scalar:
batch_shape = torch.Size()
else:
batch_shape = self._param.size()
super().__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(LogitRelaxedBernoulli, _instance)
batch_shape = torch.Size(batch_shape)
new.temperature = self.temperature
if "probs" in self.__dict__:
new.probs = self.probs.expand(batch_shape)
new._param = new.probs
if "logits" in self.__dict__:
new.logits = self.logits.expand(batch_shape)
new._param = new.logits
super(LogitRelaxedBernoulli, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._param.new(*args, **kwargs)
@lazy_property
def logits(self) -> Tensor:
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self) -> Tensor:
return logits_to_probs(self.logits, is_binary=True)
@property
def param_shape(self) -> torch.Size:
return self._param.size()
def rsample(self, sample_shape: _size = torch.Size()) -> Tensor:
shape = self._extended_shape(sample_shape)
probs = clamp_probs(self.probs.expand(shape))
uniforms = clamp_probs(
torch.rand(shape, dtype=probs.dtype, device=probs.device)
)
return (
uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()
) / self.temperature
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
diff = logits - value.mul(self.temperature)
return self.temperature.log() + diff - 2 * diff.exp().log1p()
| LogitRelaxedBernoulli |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc8628/errors.py | {
"start": 950,
"end": 1235
} | class ____(OAuth2Error):
"""
A variant of "authorization_pending", the authorization request is
still pending and polling should continue, but the interval MUST
be increased by 5 seconds for this and all subsequent requests.
"""
error = "slow_down"
| SlowDownError |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/polymorphic_function_xla_test.py | {
"start": 1000,
"end": 1663
} | class ____(xla_test.XLATestCase):
def testVarInitializedInFunction(self):
with self.test_scope():
v_holder = []
@polymorphic_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| FunctionTests |
python | huggingface__transformers | src/transformers/models/tapas/modeling_tapas.py | {
"start": 1588,
"end": 2648
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the
semi-supervised regression loss and (optionally) supervised loss for aggregations.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Prediction scores of the cell selection head, for every token.
logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
Prediction scores of the aggregation head, for every aggregation operator.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
logits_aggregation: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
| TableQuestionAnsweringOutput |
python | getsentry__sentry | src/sentry/api/serializers/models/actor.py | {
"start": 100,
"end": 232
} | class ____(TypedDict):
type: Literal["user", "team"]
id: str
name: str
email: NotRequired[str]
| ActorSerializerResponse |
python | mlflow__mlflow | tests/helper_functions.py | {
"start": 19458,
"end": 27085
} | class ____(str):
def __eq__(self, other):
return self in other
def assert_array_almost_equal(actual_array, desired_array, rtol=1e-6):
import numpy as np
elem0 = actual_array[0]
if isinstance(elem0, numbers.Number) or (
isinstance(elem0, (list, np.ndarray)) and isinstance(elem0[0], numbers.Number)
):
np.testing.assert_allclose(actual_array, desired_array, rtol=rtol)
else:
np.testing.assert_array_equal(actual_array, desired_array)
def _mlflow_major_version_string():
from mlflow.utils.environment import _generate_mlflow_version_pinning
return _generate_mlflow_version_pinning()
@contextmanager
def mock_http_request_200():
with mock.patch(
"mlflow.utils.rest_utils.http_request",
return_value=mock.MagicMock(status_code=200, text="{}"),
) as m:
yield m
def mock_http_200(f):
@functools.wraps(f)
@mock.patch(
"mlflow.utils.rest_utils.http_request",
return_value=mock.MagicMock(status_code=200, text="{}"),
)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
@contextmanager
def mock_http_request_403_200():
with mock.patch(
"mlflow.utils.rest_utils.http_request",
side_effect=[
mock.MagicMock(status_code=403, text='{"error_code": "ENDPOINT_NOT_FOUND"}'),
mock.MagicMock(status_code=200, text="{}"),
],
) as m:
yield m
def clear_hub_cache():
"""
Frees up disk space for cached huggingface transformers models and components.
This function will remove all files within the cache if the total size of objects exceeds
1 GB on disk. It is used only in CI testing to alleviate the disk burden on the runners as
they have limited allocated space and will terminate if the available disk space drops too low.
"""
try:
from huggingface_hub import scan_cache_dir
full_cache = scan_cache_dir()
cache_size_in_gb = full_cache.size_on_disk / 1000**3
if cache_size_in_gb > 1:
commits_to_purge = [
rev.commit_hash for repo in full_cache.repos for rev in repo.revisions
]
delete_strategy = full_cache.delete_revisions(*commits_to_purge)
delete_strategy.execute()
except ImportError:
# Local import check for mlflow-skinny not including huggingface_hub
pass
except Exception as e:
_logger.warning(f"Failed to clear cache: {e}", exc_info=True)
def flaky(max_tries=3):
"""
Annotation decorator for retrying flaky functions up to max_tries times, and raise the Exception
if it fails after max_tries attempts.
Args:
max_tries: Maximum number of times to retry the function.
Returns:
Decorated function.
"""
def flaky_test_func(test_func):
@wraps(test_func)
def decorated_func(*args, **kwargs):
for i in range(max_tries):
try:
return test_func(*args, **kwargs)
except Exception as e:
_logger.warning(f"Attempt {i + 1} failed with error: {e}")
if i == max_tries - 1:
raise
time.sleep(3)
return decorated_func
return flaky_test_func
@contextmanager
def start_mock_openai_server():
"""
Start a fake service that mimics the OpenAI endpoints such as /chat/completions.
Yields:
The base URL of the mock OpenAI server.
"""
port = get_safe_port()
script_path = Path(__file__).parent / "openai" / "mock_openai.py"
with subprocess.Popen(
[sys.executable, script_path, "--host", "localhost", "--port", str(port)]
) as proc:
try:
base_url = f"http://localhost:{port}"
for _ in range(10):
try:
resp = requests.get(f"{base_url}/health")
except requests.ConnectionError:
time.sleep(2)
continue
if resp.ok:
break
else:
proc.kill()
proc.wait()
raise RuntimeError("Failed to start mock OpenAI server")
yield base_url
finally:
proc.kill()
def _is_hf_hub_healthy() -> bool:
"""
Check if the Hugging Face Hub is healthy by attempting to load a small dataset.
"""
try:
import datasets
from huggingface_hub import HfApi
except ImportError:
# Cannot import datasets or huggingface_hub, so we assume the hub is healthy.
return True
try:
for dataset in HfApi().list_datasets(filter="size_categories:n<1K", limit=10):
# Gated datasets (e.g., https://huggingface.co/datasets/PatronusAI/TRAIL) require
# authentication to access.
if not dataset.gated:
datasets.load_dataset(dataset.id)
return True
return True
except requests.exceptions.RequestException:
return False
except Exception as e:
_logger.warning(f"Unexpected error while checking Hugging Face Hub health: {e}. ")
# For any other exceptions, we assume the hub is healthy.
return True
def _iter_pr_files() -> Iterator[str]:
if "GITHUB_ACTIONS" not in os.environ:
return
if os.environ.get("GITHUB_EVENT_NAME") != "pull_request":
return
with open(os.environ["GITHUB_EVENT_PATH"]) as f:
pr_data = json.load(f)
pull_number = pr_data["pull_request"]["number"]
repo = pr_data["repository"]["full_name"]
page = 1
per_page = 100
headers = {"Authorization": token} if (token := os.environ.get("GITHUB_TOKEN")) else None
while True:
resp = requests.get(
f"https://api.github.com/repos/{repo}/pulls/{pull_number}/files",
params={"per_page": per_page, "page": page},
headers=headers,
)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
_logger.warning(
f"Failed to fetch PR files: {e}. Skipping the check for Hugging Face Hub health."
)
return
files = [f["filename"] for f in resp.json()]
yield from files
if len(files) < per_page:
break
page += 1
@functools.lru_cache(maxsize=1)
def _should_skip_hf_test() -> bool:
if "CI" not in os.environ:
# This is not a CI run. Do not skip tests.
return False
if any(("huggingface" in f or "transformers" in f) for f in _iter_pr_files()):
# This PR modifies huggingface-related files. Do not skip tests.
return False
# Skip tests if the Hugging Face Hub is unhealthy.
return not _is_hf_hub_healthy()
def skip_if_hf_hub_unhealthy():
return pytest.mark.skipif(
_should_skip_hf_test(),
reason=(
"Skipping test because Hugging Face Hub is unhealthy. "
"See https://status.huggingface.co/ for more information."
),
)
def get_logged_model_by_name(name: str) -> LoggedModel | None:
"""
Get a logged model by name. If multiple logged models with
the same name exist, get the latest one.
Args:
name: The name of the logged model.
Returns:
The logged model.
"""
logged_models = mlflow.search_logged_models(
filter_string=f"name='{name}'", output_format="list", max_results=1
)
return logged_models[0] if len(logged_models) >= 1 else None
| AnyStringWith |
python | plotly__plotly.py | plotly/graph_objs/isosurface/legendgrouptitle/_font.py | {
"start": 233,
"end": 9942
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface.legendgrouptitle"
_path_str = "isosurface.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.isosurface.leg
endgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 26584,
"end": 31561
} | class ____(metaclass=_MetaOptions):
"""A cacheable option dictionary with defaults."""
__slots__ = ()
_cache_attrs: Tuple[str, ...]
def __init_subclass__(cls) -> None:
dict_ = cls.__dict__
cls._cache_attrs = tuple(
sorted(
d
for d in dict_
if not d.startswith("__")
and d not in ("_cache_key_traversal",)
)
)
super().__init_subclass__()
def __init__(self, **kw: Any) -> None:
self.__dict__.update(kw)
def __add__(self, other):
o1 = self.__class__.__new__(self.__class__)
o1.__dict__.update(self.__dict__)
if set(other).difference(self._cache_attrs):
raise TypeError(
"dictionary contains attributes not covered by "
"Options class %s: %r"
% (self, set(other).difference(self._cache_attrs))
)
o1.__dict__.update(other)
return o1
def __eq__(self, other):
# TODO: very inefficient. This is used only in test suites
# right now.
for a, b in zip_longest(self._cache_attrs, other._cache_attrs):
if getattr(self, a) != getattr(other, b):
return False
return True
def __repr__(self) -> str:
# TODO: fairly inefficient, used only in debugging right now.
return "%s(%s)" % (
self.__class__.__name__,
", ".join(
"%s=%r" % (k, self.__dict__[k])
for k in self._cache_attrs
if k in self.__dict__
),
)
@classmethod
def isinstance(cls, klass: Type[Any]) -> bool:
return issubclass(cls, klass)
@hybridmethod
def add_to_element(self, name: str, value: str) -> Any:
return self + {name: getattr(self, name) + value}
@hybridmethod
def _state_dict_inst(self) -> Mapping[str, Any]:
return self.__dict__
_state_dict_const: util.immutabledict[str, Any] = util.EMPTY_DICT
@_state_dict_inst.classlevel
def _state_dict(cls) -> Mapping[str, Any]:
return cls._state_dict_const
@classmethod
def safe_merge(cls, other: "Options") -> Any:
d = other._state_dict()
# only support a merge with another object of our class
# and which does not have attrs that we don't. otherwise
# we risk having state that might not be part of our cache
# key strategy
if (
cls is not other.__class__
and other._cache_attrs
and set(other._cache_attrs).difference(cls._cache_attrs)
):
raise TypeError(
"other element %r is not empty, is not of type %s, "
"and contains attributes not covered here %r"
% (
other,
cls,
set(other._cache_attrs).difference(cls._cache_attrs),
)
)
return cls + d
@classmethod
def from_execution_options(
cls,
key: str,
attrs: set[str],
exec_options: Mapping[str, Any],
statement_exec_options: Mapping[str, Any],
) -> Tuple["Options", Mapping[str, Any]]:
"""process Options argument in terms of execution options.
e.g.::
(
load_options,
execution_options,
) = QueryContext.default_load_options.from_execution_options(
"_sa_orm_load_options",
{"populate_existing", "autoflush", "yield_per"},
execution_options,
statement._execution_options,
)
get back the Options and refresh "_sa_orm_load_options" in the
exec options dict w/ the Options as well
"""
# common case is that no options we are looking for are
# in either dictionary, so cancel for that first
check_argnames = attrs.intersection(
set(exec_options).union(statement_exec_options)
)
existing_options = exec_options.get(key, cls)
if check_argnames:
result = {}
for argname in check_argnames:
local = "_" + argname
if argname in exec_options:
result[local] = exec_options[argname]
elif argname in statement_exec_options:
result[local] = statement_exec_options[argname]
new_options = existing_options + result
exec_options = util.immutabledict().merge_with(
exec_options, {key: new_options}
)
return new_options, exec_options
else:
return existing_options, exec_options
if TYPE_CHECKING:
def __getattr__(self, key: str) -> Any: ...
def __setattr__(self, key: str, value: Any) -> None: ...
def __delattr__(self, key: str) -> None: ...
| Options |
python | allegroai__clearml | clearml/utilities/plotlympl/mplexporter/renderers/fake_renderer.py | {
"start": 2132,
"end": 3343
} | class ____(FakeRenderer):
"""
Renderer with the full complement of methods.
When the following are left undefined, they will be implemented via
other methods in the class. They can be defined explicitly for
more efficient or specialized use within the renderer implementation.
"""
def draw_line(
self,
data: numpy.ndarray,
coordinates: str,
style: str,
label: str,
mplobj: Optional[Any] = None,
) -> None:
self.output += " draw line with {0} points\n".format(data.shape[0])
def draw_markers(
self,
data: numpy.ndarray,
coordinates: str,
style: str,
label: str,
mplobj: Optional[Any] = None,
) -> None:
self.output += " draw {0} markers\n".format(data.shape[0])
def draw_path_collection(
self,
paths: Any,
path_coordinates: Any,
path_transforms: Any,
offsets: Any,
offset_coordinates: str,
offset_order: Any,
styles: Any,
mplobj: Optional[Any] = None,
) -> None:
self.output += " draw path collection with {0} offsets\n".format(offsets.shape[0])
| FullFakeRenderer |
python | dagster-io__dagster | .buildkite/dagster-buildkite/dagster_buildkite/steps/packages.py | {
"start": 2584,
"end": 38558
} | class ____:
"""Main spec for testing Dagster Python packages using tox.
Args:
directory (str): Python directory to test, relative to the repository root. Should contain a
tox.ini file.
name (str, optional): Used in the buildkite label. Defaults to None
(uses the package name as the label).
package_type (str, optional): Used to determine the emoji attached to the buildkite label.
Possible values are "core", "example", "extension", and "infrastructure". By default it
is inferred from the location of the passed directory.
unsupported_python_versions (list[AvailablePythonVersion], optional): Python versions that
are not supported by this package. The versions for which pytest will be run are
the versions determined for the commit minus this list. If this result is empty, then
the lowest supported version will be tested. Defaults to None (all versions are supported).
pytest_extra_cmds (Callable[str, list[str]], optional): Optional specification of
commands to run before the main pytest invocation through tox. Can be either a list of
commands or a function. Function form takes two arguments, the python version being
tested and the tox factor (if any), and returns a list of shell commands to execute.
Defaults to None (no additional commands).
pytest_step_dependencies (Callable[str, list[str]], optional): Optional specification of
Buildkite dependencies (e.g. on test image build step) for pytest steps. Can be either a
list of commands or a function. Function form takes two arguments, the python version
being tested and the tox factor (if any), and returns a list of Buildkite step names.
Defaults to None (no additional commands).
pytest_tox_factors: (list[ToxFactor], optional): list of additional tox environment factors to
use when iterating pytest tox environments. A separate pytest step is generated for each
element of the product of versions tested and these factors. For example, if we are
testing Python 3.7 and 3.8 and pass factors `[ToxFactor("pytest"), ToxFactor("integration")]`,
then four steps will be generated corresponding to environments "py37-pytest", "py37-integration",
"py38-pytest", "py38-integration". Defaults to None.
env_vars (list[str], optional): Additional environment variables to pass through to each
test environment. These must also be listed in the target toxfile under `passenv`.
Defaults to None.
tox_file (str, optional): The tox file to use. Defaults to {directory}/tox.ini.
retries (int, optional): Whether to retry these tests on failure
for packages of type "core" or "library", disabled for other packages.
timeout_in_minutes (int, optional): Fail after this many minutes.
queue (BuildkiteQueue, optional): Schedule steps to this queue.
run_pytest (bool, optional): Whether to run pytest. Enabled by default.
splits (int, optional): Number of splits to use when no tox factors are defined.
This allows parallelizing tests even when no specific tox factors are specified. Defaults to 1.
"""
directory: str
name: Optional[str] = None
package_type: Optional[str] = None
unsupported_python_versions: Optional[
Union[list[AvailablePythonVersion], UnsupportedVersionsFunction]
] = None
pytest_extra_cmds: Optional[Union[list[str], PytestExtraCommandsFunction]] = None
pytest_step_dependencies: Optional[Union[list[str], PytestDependenciesFunction]] = None
pytest_tox_factors: Optional[list[ToxFactor]] = None
env_vars: Optional[list[str]] = None
tox_file: Optional[str] = None
retries: Optional[int] = None
timeout_in_minutes: Optional[int] = None
queue: Optional[BuildkiteQueue] = None
run_pytest: bool = True
splits: int = 1
always_run_if: Optional[Callable[[], bool]] = None
skip_if: Optional[Callable[[], Optional[str]]] = None
def __post_init__(self):
if not self.name:
self.name = os.path.basename(self.directory)
if not self.package_type:
self.package_type = _infer_package_type(self.directory)
self._should_skip = None
self._skip_reason = None
def build_steps(self) -> list[TopLevelStepConfiguration]:
base_name = self.name or os.path.basename(self.directory)
steps: list[GroupLeafStepConfiguration] = []
if self.run_pytest:
default_python_versions = AvailablePythonVersion.get_pytest_defaults()
tox_factors: Sequence[Optional[ToxFactor]] = (
self.pytest_tox_factors if self.pytest_tox_factors else [None]
)
for other_factor in tox_factors:
if callable(self.unsupported_python_versions):
unsupported_python_versions = self.unsupported_python_versions(other_factor)
else:
unsupported_python_versions = self.unsupported_python_versions or []
supported_python_versions = [
v
for v in AvailablePythonVersion.get_all()
if v not in unsupported_python_versions
]
pytest_python_versions = [
AvailablePythonVersion(v)
for v in sorted(
set(e.value for e in default_python_versions)
- set(e.value for e in unsupported_python_versions)
)
]
# Use highest supported python version if no defaults_match
if len(pytest_python_versions) == 0:
pytest_python_versions = [supported_python_versions[-1]]
for py_version in pytest_python_versions:
version_factor = AvailablePythonVersion.to_tox_factor(py_version)
if other_factor is None:
tox_env = version_factor
splits = self.splits
else:
tox_env = f"{version_factor}-{other_factor.factor}"
splits = other_factor.splits
if isinstance(self.pytest_extra_cmds, list):
base_extra_commands_pre = self.pytest_extra_cmds
elif callable(self.pytest_extra_cmds):
base_extra_commands_pre = self.pytest_extra_cmds(py_version, other_factor)
else:
base_extra_commands_pre = []
dependencies = []
if not self.skip_reason:
if isinstance(self.pytest_step_dependencies, list):
dependencies = self.pytest_step_dependencies
elif callable(self.pytest_step_dependencies):
dependencies = self.pytest_step_dependencies(py_version, other_factor)
# Generate multiple steps if splits > 1
for split_index in range(1, splits + 1):
if splits > 1:
split_label = f"{base_name} ({split_index}/{splits})"
pytest_args = [f"--split {split_index}/{splits}"]
extra_commands_pre = base_extra_commands_pre
else:
split_label = base_name
pytest_args = None
extra_commands_pre = base_extra_commands_pre
steps.append(
build_tox_step(
self.directory,
tox_env,
base_label=split_label,
command_type="pytest",
python_version=py_version,
env_vars=self.env_vars,
extra_commands_pre=extra_commands_pre,
dependencies=dependencies,
tox_file=self.tox_file,
timeout_in_minutes=self.timeout_in_minutes,
queue=self.queue,
retries=self.retries,
skip_reason=self.skip_reason,
pytest_args=pytest_args,
)
)
emoji = _PACKAGE_TYPE_TO_EMOJI_MAP[self.package_type] # type: ignore[index]
if len(steps) >= 2:
return [
GroupStepBuilder(
name=f"{emoji} {base_name}",
key=base_name,
steps=steps,
).build()
]
elif len(steps) == 1:
only_step = steps[0]
if not is_command_step(only_step):
raise ValueError("Expected only step to be a CommandStep")
return [only_step]
else:
return []
@property
def skip_reason(self) -> Optional[str]:
"""Provides a message if this package's steps should be skipped on this run, and no message if the package's steps should be run.
We actually use this to determine whether or not to run the package.
Because we use an archaic version of python to build our images, we can't use `cached_property`, and so we reinvent the wheel here with
self._should_skip and self._skip_reason. When we determine definitively that a package should or shouldn't be skipped, we cache the result on self._should_skip
as a boolean (it starts out as None), and cache the skip reason (or lack thereof) on self._skip_reason.
"""
# If self._should_skip is not None, then the result is cached on self._skip_reason and we can return it.
if self._should_skip is not None:
if self._should_skip is True:
assert self._skip_reason is not None, (
"Expected skip reason to be set if self._should_skip is True."
)
return self._skip_reason
self._skip_reason = skip_reason(self.directory, self.name, self.always_run_if, self.skip_if)
self._should_skip = self._skip_reason is not None
return self._skip_reason
def build_example_packages_steps() -> list[StepConfiguration]:
custom_example_pkg_roots = [pkg.directory for pkg in EXAMPLE_PACKAGES_WITH_CUSTOM_CONFIG]
example_packages_with_standard_config = [
PackageSpec(pkg)
for pkg in (
_get_uncustomized_pkg_roots("examples", custom_example_pkg_roots)
+ _get_uncustomized_pkg_roots("examples/experimental", custom_example_pkg_roots)
)
if pkg not in ("examples/deploy_ecs", "examples/starlift-demo")
]
example_packages = EXAMPLE_PACKAGES_WITH_CUSTOM_CONFIG + example_packages_with_standard_config
return build_steps_from_package_specs(example_packages)
def build_library_packages_steps() -> list[StepConfiguration]:
custom_library_pkg_roots = [pkg.directory for pkg in LIBRARY_PACKAGES_WITH_CUSTOM_CONFIG]
library_packages_with_standard_config = [
*[
PackageSpec(pkg)
for pkg in _get_uncustomized_pkg_roots("python_modules", custom_library_pkg_roots)
],
*[
PackageSpec(pkg)
for pkg in _get_uncustomized_pkg_roots(
"python_modules/libraries", custom_library_pkg_roots
)
],
]
return build_steps_from_package_specs(
LIBRARY_PACKAGES_WITH_CUSTOM_CONFIG + library_packages_with_standard_config
)
def build_steps_from_package_specs(
package_specs: list[PackageSpec],
) -> list[StepConfiguration]:
steps: list[StepConfiguration] = []
all_packages = sorted(
package_specs,
key=lambda p: f"{_PACKAGE_TYPE_ORDER.index(p.package_type)} {p.name}", # type: ignore[arg-type]
)
for pkg in all_packages:
steps += pkg.build_steps()
return steps
_PACKAGE_TYPE_ORDER = ["core", "extension", "example", "infrastructure", "unknown"]
# Find packages under a root subdirectory that are not configured above.
def _get_uncustomized_pkg_roots(root: str, custom_pkg_roots: list[str]) -> list[str]:
all_files_in_root = [
os.path.relpath(p, GIT_REPO_ROOT) for p in glob(os.path.join(GIT_REPO_ROOT, root, "*"))
]
return [
p for p in all_files_in_root if p not in custom_pkg_roots and os.path.exists(f"{p}/tox.ini")
]
# ########################
# ##### PACKAGES WITH CUSTOM STEPS
# ########################
def airflow_extra_cmds(version: AvailablePythonVersion, _) -> list[str]:
return [
'export AIRFLOW_HOME="/airflow"',
"mkdir -p $${AIRFLOW_HOME}",
]
airline_demo_extra_cmds = [
"pushd examples/airline_demo",
# Run the postgres db. We are in docker running docker
# so this will be a sibling container.
"docker-compose up -d --remove-orphans", # clean up in hooks/pre-exit
# Can't use host networking on buildkite and communicate via localhost
# between these sibling containers, so pass along the ip.
*network_buildkite_container("postgres"),
*connect_sibling_docker_container(
"postgres", "test-postgres-db-airline", "POSTGRES_TEST_DB_HOST"
),
"popd",
]
def dagster_graphql_extra_cmds(_, tox_factor: Optional[ToxFactor]) -> list[str]:
if tox_factor and tox_factor.factor.startswith("postgres"):
return [
"pushd python_modules/dagster-graphql/dagster_graphql_tests/graphql/",
"docker-compose up -d --remove-orphans", # clean up in hooks/pre-exit,
# Can't use host networking on buildkite and communicate via localhost
# between these sibling containers, so pass along the ip.
*network_buildkite_container("postgres"),
*connect_sibling_docker_container(
"postgres", "test-postgres-db-graphql", "POSTGRES_TEST_DB_HOST"
),
"popd",
]
else:
return []
deploy_docker_example_extra_cmds = [
"pushd examples/deploy_docker/from_source",
"./build.sh",
"docker-compose up -d --remove-orphans", # clean up in hooks/pre-exit
*network_buildkite_container("docker_example_network"),
*connect_sibling_docker_container(
"docker_example_network",
"docker_example_webserver",
"DEPLOY_DOCKER_WEBSERVER_HOST",
),
"popd",
]
def celery_extra_cmds(version: AvailablePythonVersion, _) -> list[str]:
return [
"export DAGSTER_DOCKER_IMAGE_TAG=$${BUILDKITE_BUILD_ID}-" + version.value,
'export DAGSTER_DOCKER_REPOSITORY="$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com"',
"pushd python_modules/libraries/dagster-celery",
# Run the rabbitmq db. We are in docker running docker
# so this will be a sibling container.
"docker-compose up -d --remove-orphans", # clean up in hooks/pre-exit,
# Can't use host networking on buildkite and communicate via localhost
# between these sibling containers, so pass along the ip.
*network_buildkite_container("rabbitmq"),
*connect_sibling_docker_container(
"rabbitmq", "test-rabbitmq", "DAGSTER_CELERY_BROKER_HOST"
),
"popd",
]
def docker_extra_cmds(version: AvailablePythonVersion, _) -> list[str]:
return [
"export DAGSTER_DOCKER_IMAGE_TAG=$${BUILDKITE_BUILD_ID}-" + version.value,
'export DAGSTER_DOCKER_REPOSITORY="$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com"',
]
ui_extra_cmds = ["make rebuild_ui"]
def has_dg_or_component_integration_changes() -> bool:
"""Check for changes in dagster-dg-cli or in integrations that implement components."""
return has_dg_changes() or has_component_integration_changes()
mysql_extra_cmds = [
"pushd python_modules/libraries/dagster-mysql/dagster_mysql_tests/",
"docker-compose up -d --remove-orphans", # clean up in hooks/pre-exit,
*network_buildkite_container("mysql"),
*network_buildkite_container("mysql_pinned"),
*network_buildkite_container("mysql_pinned_backcompat"),
*connect_sibling_docker_container("mysql", "test-mysql-db", "MYSQL_TEST_DB_HOST"),
*connect_sibling_docker_container(
"mysql_pinned", "test-mysql-db-pinned", "MYSQL_TEST_PINNED_DB_HOST"
),
*connect_sibling_docker_container(
"mysql_pinned_backcompat",
"test-mysql-db-pinned-backcompat",
"MYSQL_TEST_PINNED_BACKCOMPAT_DB_HOST",
),
"popd",
]
def k8s_extra_cmds(version: AvailablePythonVersion, _) -> list[str]:
return [
"export DAGSTER_DOCKER_IMAGE_TAG=$${BUILDKITE_BUILD_ID}-" + version.value,
'export DAGSTER_DOCKER_REPOSITORY="$${AWS_ACCOUNT_ID}.dkr.ecr.us-west-2.amazonaws.com"',
]
gcp_creds_extra_cmds = (
[
rf"aws s3 cp s3://\${{BUILDKITE_SECRETS_BUCKET}}/{GCP_CREDS_FILENAME} "
+ GCP_CREDS_LOCAL_FILE,
"export GOOGLE_APPLICATION_CREDENTIALS=" + GCP_CREDS_LOCAL_FILE,
]
if not os.getenv("CI_DISABLE_INTEGRATION_TESTS")
else []
)
# Some Dagster packages have more involved test configs or support only certain Python version;
# special-case those here
EXAMPLE_PACKAGES_WITH_CUSTOM_CONFIG: list[PackageSpec] = [
PackageSpec(
"examples/assets_smoke_test",
),
PackageSpec(
"examples/deploy_docker",
pytest_extra_cmds=deploy_docker_example_extra_cmds,
),
PackageSpec(
"examples/docs_snippets",
# The docs_snippets test suite also installs a ton of packages in the same environment,
# which is liable to cause dependency collisions. It's not necessary to test all these
# snippets in all python versions since we are testing the core code exercised by the
# snippets against all supported python versions.
unsupported_python_versions=AvailablePythonVersion.get_all_except_default(),
pytest_tox_factors=[
ToxFactor("all"),
ToxFactor("integrations"),
ToxFactor("docs_snapshot_test", splits=3),
],
always_run_if=has_dg_changes,
),
PackageSpec(
"examples/project_fully_featured",
unsupported_python_versions=[
AvailablePythonVersion.V3_12, # duckdb
AvailablePythonVersion.V3_13, # duckdb
],
),
PackageSpec(
"examples/with_great_expectations",
),
PackageSpec(
"examples/with_pyspark",
),
PackageSpec(
"examples/with_pyspark_emr",
),
PackageSpec(
"examples/with_wandb",
unsupported_python_versions=[
# dagster-wandb dep
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
),
# The 6 tutorials referenced in cloud onboarding cant test "source" due to dagster-cloud dep
PackageSpec(
"examples/assets_modern_data_stack",
pytest_tox_factors=[ToxFactor("pypi")],
),
PackageSpec(
"examples/assets_dbt_python",
pytest_tox_factors=[ToxFactor("pypi")],
unsupported_python_versions=[
AvailablePythonVersion.V3_12, # duckdb
AvailablePythonVersion.V3_13, # duckdb
],
),
PackageSpec(
"examples/assets_dynamic_partitions",
unsupported_python_versions=[
AvailablePythonVersion.V3_12, # duckdb
AvailablePythonVersion.V3_13, # duckdb
],
),
PackageSpec(
"examples/quickstart_etl",
pytest_tox_factors=[ToxFactor("pypi")],
),
PackageSpec(
"examples/use_case_repository",
pytest_tox_factors=[ToxFactor("source")],
),
# Federation tutorial spins up multiple airflow instances, slow to run - use docker queue to ensure
# beefier instance
PackageSpec(
"examples/airlift-federation-tutorial",
always_run_if=has_dagster_airlift_changes,
timeout_in_minutes=30,
queue=BuildkiteQueue.DOCKER,
unsupported_python_versions=[
# airflow
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
),
PackageSpec(
"examples/airlift-migration-tutorial",
always_run_if=has_dagster_airlift_changes,
unsupported_python_versions=[
# airflow
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
),
]
def _unsupported_dagster_python_versions(
tox_factor: Optional[ToxFactor],
) -> list[AvailablePythonVersion]:
if tox_factor and tox_factor.factor == "general_tests_old_protobuf":
return [
AvailablePythonVersion.V3_11,
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
]
if tox_factor and tox_factor.factor in {
"type_signature_tests",
}:
return [AvailablePythonVersion.V3_12]
return []
def test_subfolders(tests_folder_name: str) -> Iterable[str]:
tests_path = (
Path(__file__).parent
/ Path("../../../../python_modules/dagster/dagster_tests/")
/ Path(tests_folder_name)
)
for subfolder in tests_path.iterdir():
if subfolder.suffix == ".py" and subfolder.stem != "__init__":
raise Exception(
f"If you are splitting a test folder into parallel subfolders "
f"there should be no python files in the root of the folder. Found {subfolder}."
)
if subfolder.is_dir():
yield subfolder.name
def tox_factors_for_folder(tests_folder_name: str) -> list[ToxFactor]:
return [
ToxFactor(f"{tests_folder_name}__{subfolder_name}")
for subfolder_name in test_subfolders(tests_folder_name)
]
LIBRARY_PACKAGES_WITH_CUSTOM_CONFIG: list[PackageSpec] = [
PackageSpec(
"python_modules/automation",
# automation is internal code that doesn't need to be tested in every python version. The
# test suite also installs a ton of packages in the same environment, which is liable to
# cause dependency collisions.
unsupported_python_versions=AvailablePythonVersion.get_all_except_default(),
retries=0,
),
PackageSpec("python_modules/dagster-webserver", pytest_extra_cmds=ui_extra_cmds),
PackageSpec(
"python_modules/dagster",
env_vars=["AWS_ACCOUNT_ID"],
pytest_tox_factors=[
ToxFactor("api_tests"),
ToxFactor("asset_defs_tests"),
ToxFactor("cli_tests", splits=2),
ToxFactor("components_tests"),
ToxFactor("core_tests"),
ToxFactor("daemon_sensor_tests", splits=2),
ToxFactor("daemon_tests", splits=2),
ToxFactor("declarative_automation_tests", splits=2),
ToxFactor("definitions_tests"),
ToxFactor("general_tests"),
ToxFactor("general_tests_old_protobuf"),
ToxFactor("launcher_tests"),
ToxFactor("logging_tests"),
ToxFactor("model_tests"),
ToxFactor("scheduler_tests"),
ToxFactor("storage_tests", splits=2),
ToxFactor("storage_tests_sqlalchemy_1_3", splits=2),
ToxFactor("storage_tests_sqlalchemy_1_4", splits=2),
ToxFactor("utils_tests"),
ToxFactor("type_signature_tests"),
]
+ tox_factors_for_folder("execution_tests"),
unsupported_python_versions=_unsupported_dagster_python_versions,
),
PackageSpec(
"python_modules/dagster-graphql",
pytest_extra_cmds=dagster_graphql_extra_cmds,
pytest_tox_factors=[
ToxFactor("not_graphql_context_test_suite", splits=2),
ToxFactor("sqlite_instance_multi_location"),
ToxFactor("sqlite_instance_managed_grpc_env", splits=2),
ToxFactor("sqlite_instance_deployed_grpc_env", splits=2),
ToxFactor("sqlite_instance_code_server_cli_grpc_env", splits=2),
ToxFactor("graphql_python_client"),
ToxFactor("postgres-graphql_context_variants"),
ToxFactor("postgres-instance_multi_location"),
ToxFactor("postgres-instance_managed_grpc_env", splits=2),
ToxFactor("postgres-instance_deployed_grpc_env", splits=2),
],
unsupported_python_versions=(
lambda tox_factor: (
[AvailablePythonVersion.V3_11]
if (
tox_factor
and tox_factor.factor
in {
# test suites particularly likely to crash and/or hang
# due to https://github.com/grpc/grpc/issues/31885
"sqlite_instance_managed_grpc_env",
"sqlite_instance_deployed_grpc_env",
"sqlite_instance_code_server_cli_grpc_env",
"sqlite_instance_multi_location",
"postgres-instance_multi_location",
"postgres-instance_managed_grpc_env",
"postgres-instance_deployed_grpc_env",
}
)
else []
)
),
timeout_in_minutes=30,
),
PackageSpec(
"python_modules/dagster-test",
unsupported_python_versions=[
# dagster-airflow
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
),
PackageSpec(
"python_modules/libraries/dagster-dbt",
pytest_tox_factors=[
ToxFactor(f"{deps_factor}-{command_factor}", splits=3)
for deps_factor in ["dbt17", "dbt18", "dbt19", "dbt110"]
for command_factor in ["cloud", "core-main", "core-derived-metadata"]
],
# dbt-core 1.7's protobuf<5 constraint conflicts with the grpc requirement for Python 3.13
unsupported_python_versions=(
lambda tox_factor: [AvailablePythonVersion.V3_13]
if tox_factor and tox_factor.factor.startswith("dbt17")
else []
),
),
PackageSpec(
"python_modules/libraries/dagster-dbt/",
skip_if=skip_if_not_dagster_dbt_commit,
name="dagster-dbt-fusion",
pytest_tox_factors=[ToxFactor("dbtfusion-snowflake")],
env_vars=[
"SNOWFLAKE_ACCOUNT",
"SNOWFLAKE_USER",
"SNOWFLAKE_BUILDKITE_PASSWORD",
],
),
PackageSpec(
"python_modules/libraries/dagster-snowflake",
env_vars=[
"SNOWFLAKE_ACCOUNT",
"SNOWFLAKE_USER",
"SNOWFLAKE_BUILDKITE_PASSWORD",
],
),
PackageSpec(
"python_modules/libraries/dagster-airlift",
unsupported_python_versions=[
# airflow
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
env_vars=[
"AIRLIFT_MWAA_TEST_ENV_NAME",
"AIRLIFT_MWAA_TEST_PROFILE",
"AIRLIFT_MWAA_TEST_REGION",
],
),
PackageSpec(
"python_modules/libraries/dagster-airbyte",
pytest_tox_factors=[ToxFactor("unit"), ToxFactor("integration")],
),
# PackageSpec(
# "python_modules/libraries/dagster-airflow",
# # omit python 3.10 until we add support
# unsupported_python_versions=[
# AvailablePythonVersion.V3_10,
# AvailablePythonVersion.V3_11,
# AvailablePythonVersion.V3_12,
# AvailablePythonVersion.V3_13,
# ],
# env_vars=[
# "AIRFLOW_HOME",
# "AWS_ACCOUNT_ID",
# "AWS_ACCESS_KEY_ID",
# "AWS_SECRET_ACCESS_KEY",
# "BUILDKITE_SECRETS_BUCKET",
# "GOOGLE_APPLICATION_CREDENTIALS",
# ],
# pytest_extra_cmds=airflow_extra_cmds,
# pytest_tox_factors=[
# ToxFactor("default-airflow2"),
# ToxFactor("localdb-airflow2"),
# ToxFactor("persistentdb-airflow2"),
# ],
# ),
PackageSpec(
"python_modules/libraries/dagster-dg-cli",
pytest_tox_factors=[
ToxFactor("general", splits=3),
ToxFactor("docs"),
ToxFactor("plus"),
],
env_vars=["SHELL"],
always_run_if=has_dg_or_component_integration_changes,
),
PackageSpec(
"python_modules/libraries/dagster-dg-cli",
name="dagster-dg-cli-mcp",
pytest_tox_factors=[ToxFactor("mcp")],
always_run_if=has_dg_or_component_integration_changes,
),
PackageSpec(
"python_modules/libraries/dagster-aws",
env_vars=["AWS_DEFAULT_REGION", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"],
),
PackageSpec(
"python_modules/libraries/dagster-azure",
env_vars=["AZURE_STORAGE_ACCOUNT_KEY"],
),
PackageSpec(
"python_modules/libraries/dagster-celery",
env_vars=["AWS_ACCOUNT_ID", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"],
pytest_extra_cmds=celery_extra_cmds,
),
PackageSpec(
"python_modules/libraries/dagster-celery-docker",
env_vars=["AWS_ACCOUNT_ID", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"],
pytest_extra_cmds=celery_extra_cmds,
pytest_step_dependencies=test_project_depends_fn,
),
PackageSpec(
"python_modules/libraries/dagster-dask",
env_vars=["AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID", "AWS_DEFAULT_REGION"],
),
PackageSpec(
"python_modules/libraries/dagster-databricks",
),
PackageSpec(
"python_modules/libraries/dagster-docker",
env_vars=["AWS_ACCOUNT_ID", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"],
pytest_extra_cmds=docker_extra_cmds,
pytest_step_dependencies=test_project_depends_fn,
),
PackageSpec(
"python_modules/libraries/dagster-duckdb",
unsupported_python_versions=[
# duckdb
AvailablePythonVersion.V3_12,
],
),
PackageSpec(
"python_modules/libraries/dagster-duckdb-pandas",
unsupported_python_versions=[
# duckdb
AvailablePythonVersion.V3_12,
],
),
PackageSpec(
"python_modules/libraries/dagster-duckdb-polars",
unsupported_python_versions=[
# duckdb
AvailablePythonVersion.V3_12,
],
),
PackageSpec(
"python_modules/libraries/dagster-duckdb-pyspark",
unsupported_python_versions=[
# duckdb
AvailablePythonVersion.V3_12,
],
),
PackageSpec(
"python_modules/libraries/dagster-pandas",
unsupported_python_versions=[
AvailablePythonVersion.V3_12,
],
),
PackageSpec(
"python_modules/libraries/dagster-gcp",
env_vars=[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"BUILDKITE_SECRETS_BUCKET",
"GCP_PROJECT_ID",
],
pytest_extra_cmds=gcp_creds_extra_cmds,
# Remove once https://github.com/dagster-io/dagster/issues/2511 is resolved
retries=2,
),
PackageSpec(
"python_modules/libraries/dagster-gcp-pandas",
env_vars=[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"BUILDKITE_SECRETS_BUCKET",
"GCP_PROJECT_ID",
],
pytest_extra_cmds=gcp_creds_extra_cmds,
retries=2,
),
PackageSpec(
"python_modules/libraries/dagster-gcp-pyspark",
env_vars=[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"BUILDKITE_SECRETS_BUCKET",
"GCP_PROJECT_ID",
],
pytest_extra_cmds=gcp_creds_extra_cmds,
),
PackageSpec(
"python_modules/libraries/dagster-ge",
),
PackageSpec(
"python_modules/libraries/dagster-k8s",
env_vars=[
"AWS_ACCOUNT_ID",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"BUILDKITE_SECRETS_BUCKET",
],
pytest_tox_factors=[
ToxFactor("default"),
ToxFactor("old_kubernetes"),
],
pytest_extra_cmds=k8s_extra_cmds,
),
PackageSpec(
"python_modules/libraries/dagster-mlflow",
),
PackageSpec(
"python_modules/libraries/dagster-mysql",
pytest_extra_cmds=mysql_extra_cmds,
pytest_tox_factors=[
ToxFactor("storage_tests", splits=2),
ToxFactor("storage_tests_sqlalchemy_1_3", splits=2),
],
always_run_if=has_storage_test_fixture_changes,
),
PackageSpec(
"python_modules/libraries/dagster-snowflake-pandas",
env_vars=["SNOWFLAKE_ACCOUNT", "SNOWFLAKE_BUILDKITE_PASSWORD"],
),
PackageSpec(
"python_modules/libraries/dagster-snowflake-pyspark",
env_vars=["SNOWFLAKE_ACCOUNT", "SNOWFLAKE_BUILDKITE_PASSWORD"],
),
PackageSpec(
"python_modules/libraries/dagster-snowflake-polars",
env_vars=["SNOWFLAKE_ACCOUNT", "SNOWFLAKE_BUILDKITE_PASSWORD"],
),
PackageSpec(
"python_modules/libraries/dagster-postgres",
pytest_tox_factors=[
ToxFactor("storage_tests"),
ToxFactor("storage_tests_sqlalchemy_1_3"),
],
always_run_if=has_storage_test_fixture_changes,
),
PackageSpec(
"python_modules/libraries/dagster-twilio",
env_vars=["TWILIO_TEST_ACCOUNT_SID", "TWILIO_TEST_AUTH_TOKEN"],
# Remove once https://github.com/dagster-io/dagster/issues/2511 is resolved
retries=2,
),
PackageSpec(
"python_modules/libraries/dagster-wandb",
unsupported_python_versions=[
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
),
PackageSpec(
"python_modules/libraries/dagstermill",
pytest_tox_factors=[ToxFactor("papermill1", splits=2), ToxFactor("papermill2", splits=2)],
retries=2, # Workaround for flaky kernel issues
unsupported_python_versions=(
lambda tox_factor: (
[AvailablePythonVersion.V3_12, AvailablePythonVersion.V3_13]
if (tox_factor and tox_factor.factor == "papermill1")
else []
)
),
),
PackageSpec(
"python_modules/libraries/dagster-airlift/perf-harness",
always_run_if=has_dagster_airlift_changes,
unsupported_python_versions=[
# airflow
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
),
PackageSpec(
"python_modules/libraries/dagster-airlift/kitchen-sink",
always_run_if=has_dagster_airlift_changes,
unsupported_python_versions=[
# airflow
AvailablePythonVersion.V3_12,
AvailablePythonVersion.V3_13,
],
queue=BuildkiteQueue.DOCKER,
splits=2,
),
# Runs against live dbt cloud instance, we only want to run on commits and on the
# nightly build
PackageSpec(
"python_modules/libraries/dagster-dbt/kitchen-sink",
skip_if=skip_if_not_dagster_dbt_cloud_commit,
name="dagster-dbt-cloud-live",
env_vars=[
"KS_DBT_CLOUD_ACCOUNT_ID",
"KS_DBT_CLOUD_ACCESS_URL",
"KS_DBT_CLOUD_TOKEN",
"KS_DBT_CLOUD_PROJECT_ID",
"KS_DBT_CLOUD_ENVIRONMENT_ID",
],
),
PackageSpec(
".buildkite/dagster-buildkite",
run_pytest=False,
),
]
| PackageSpec |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 49902,
"end": 53574
} | class ____(GoogleCloudBaseOperator):
"""
Creates a Memcached instance based on the specified tier and memory size.
By default, the instance is accessible from the project's `default network
<https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreMemcachedCreateInstanceOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: Required. The logical name of the Memcached instance in the customer project with the
following restrictions:
- Must contain only lowercase letters, numbers, and hyphens.
- Must start with a letter.
- Must be between 1-40 characters.
- Must end with a number or a letter.
- Must be unique within the customer project / location
:param instance: Required. A Memcached [Instance] resource
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.memcache_v1beta2.types.cloud_memcache.Instance`
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
"""
template_fields: Sequence[str] = (
"location",
"instance_id",
"instance",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
)
operator_extra_links = (MemcachedInstanceDetailsLink(),)
def __init__(
self,
location: str,
instance_id: str,
instance: dict | cloud_memcache.Instance,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.location = location
self.instance_id = instance_id
self.instance = instance
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"instance_id": self.instance_id,
"location_id": self.location,
}
def execute(self, context: Context):
hook = CloudMemorystoreMemcachedHook(gcp_conn_id=self.gcp_conn_id)
result = hook.create_instance(
location=self.location,
instance_id=self.instance_id,
instance=self.instance,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
MemcachedInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return cloud_memcache.Instance.to_dict(result)
| CloudMemorystoreMemcachedCreateInstanceOperator |
python | readthedocs__readthedocs.org | readthedocs/core/forms.py | {
"start": 6916,
"end": 7440
} | class ____(forms.Select):
"""
Rich content dropdown field widget type used for complex content.
This class is mostly used for special casing in Crispy form templates, it
doesn't do anything special. This widget type requires use of the
:py:class:`RichChoice` data class. Usage might look something comparable to:
choice = RichChoice(...)
field = forms.ChoiceField(
...,
widget=RichSelect(),
choices=[(choice.value, choice)]
)
"""
| RichSelect |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dep_with_variants_if_develop_root/package.py | {
"start": 216,
"end": 441
} | class ____(Package):
"""Package that adds a dependency with many variants only at @develop"""
homepage = "https://dev.null"
version("1.0")
depends_on("dep-with-variants-if-develop")
| DepWithVariantsIfDevelopRoot |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor_util_test.py | {
"start": 50788,
"end": 52542
} | class ____(test.TestCase):
@contextlib.contextmanager
def disableSetStaticShape(self):
flag_old = shape_util._ENABLE_MAYBE_SET_STATIC_SHAPE
shape_util._ENABLE_MAYBE_SET_STATIC_SHAPE = False
try:
yield
finally:
shape_util._ENABLE_MAYBE_SET_STATIC_SHAPE = flag_old
def testMaybeSetStaticShape(self):
shape = constant_op.constant([2, 5], dtype=dtypes.int32)
def reshape():
v = array_ops.zeros([10])
return array_ops.reshape(v, shape)
# This test needs a placeholder which means we need to construct a graph.
with ops.Graph().as_default():
with self.disableSetStaticShape():
graph_without_shape_propagation = func_graph.func_graph_from_py_func(
"without_shape_propagation", reshape, [], {})
graph_with_shape_propagation = func_graph.func_graph_from_py_func(
"with_shape_propagation", reshape, [], {})
self.assertCountEqual(
[op.type for op in graph_without_shape_propagation.get_operations()],
[op.type for op in graph_with_shape_propagation.get_operations()])
def testMaybeSetStaticShapeScalarShape(self):
def reshape():
v = array_ops.placeholder(dtypes.float32)
t = array_ops.reshape(v, [-1])
return t
with self.disableSetStaticShape():
graph_without_shape_propagation = func_graph.func_graph_from_py_func(
"without_shape_propagation", reshape, [], {})
graph_with_shape_propagation = func_graph.func_graph_from_py_func(
"with_shape_propagation", reshape, [], {})
self.assertCountEqual(
[op.type for op in graph_without_shape_propagation.get_operations()],
[op.type for op in graph_with_shape_propagation.get_operations()])
| MaybeSetStaticShapeTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/langchain_helpers/agents/tools.py | {
"start": 1114,
"end": 1383
} | class ____(BaseModel):
"""Configuration for LlamaIndex index tool."""
model_config = ConfigDict(arbitrary_types_allowed=True)
query_engine: BaseQueryEngine
name: str
description: str
tool_kwargs: Dict = Field(default_factory=dict)
| IndexToolConfig |
python | pytorch__pytorch | torch/_inductor/fx_passes/group_batch_fusion.py | {
"start": 4954,
"end": 5092
} | class ____(GroupBatchFusionBase):
"""
Fuse ops in a batch way, e.g, fuse mm/addmm of same input shapes with bmm.
"""
| BatchFusion |
python | mlflow__mlflow | mlflow/types/schema.py | {
"start": 30063,
"end": 32218
} | class ____:
"""
Representation of the shape and type of a Tensor.
"""
def __init__(self, dtype: np.dtype, shape: tuple[Any, ...] | list[Any]):
if not isinstance(dtype, np.dtype):
raise TypeError(
f"Expected `dtype` to be instance of `{np.dtype}`, received `{dtype.__class__}`"
)
# Throw if size information exists flexible numpy data types
if dtype.char in ["U", "S"] and not dtype.name.isalpha():
raise MlflowException(
"MLflow does not support size information in flexible numpy data types. Use"
f' np.dtype("{dtype.name.rstrip(string.digits)}") instead'
)
if not isinstance(shape, (tuple, list)):
raise TypeError(
"Expected `shape` to be instance of `{}` or `{}`, received `{}`".format(
tuple, list, shape.__class__
)
)
self._dtype = dtype
self._shape = tuple(shape)
@property
def dtype(self) -> np.dtype:
"""
A unique character code for each of the 21 different numpy built-in types.
See https://numpy.org/devdocs/reference/generated/numpy.dtype.html#numpy.dtype for details.
"""
return self._dtype
@property
def shape(self) -> tuple[int, ...]:
"""The tensor shape"""
return self._shape
def to_dict(self) -> dict[str, Any]:
return {"dtype": self._dtype.name, "shape": self._shape}
@classmethod
def from_json_dict(cls, **kwargs):
"""
Deserialize from a json loaded dictionary.
The dictionary is expected to contain `dtype` and `shape` keys.
"""
if not {"dtype", "shape"} <= set(kwargs.keys()):
raise MlflowException(
"Missing keys in TensorSpec JSON. Expected to find keys `dtype` and `shape`"
)
tensor_type = np.dtype(kwargs["dtype"])
tensor_shape = tuple(kwargs["shape"])
return cls(tensor_type, tensor_shape)
def __repr__(self) -> str:
return f"Tensor({self.dtype.name!r}, {self.shape!r})"
| TensorInfo |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_errorbars01.py | {
"start": 315,
"end": 1569
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_errorbars01.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [63386752, 63388288]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
"y_error_bars": {"type": "standard_error"},
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/deployments/steps.py | {
"start": 530,
"end": 7927
} | class ____(TypedDict):
"""
The output of the `pull_from_gcs` step.
"""
bucket: str
folder: str
directory: str
def push_to_gcs(
bucket: str,
folder: str,
project: Optional[str] = None,
credentials: Optional[Dict] = None,
ignore_file=".prefectignore",
) -> PushToGcsOutput:
"""
Pushes the contents of the current working directory to a GCS bucket,
excluding files and folders specified in the ignore_file.
Args:
bucket: The name of the GCS bucket where files will be uploaded.
folder: The folder in the GCS bucket where files will be uploaded.
project: The GCP project the bucket belongs to. If not provided, the project
will be inferred from the credentials or the local environment.
credentials: A dictionary containing the service account information and project
used for authentication. If not provided, the application default
credentials will be used.
ignore_file: The name of the file containing ignore patterns.
Returns:
A dictionary containing the bucket and folder where files were uploaded.
Examples:
Push to a GCS bucket:
```yaml
build:
- prefect_gcp.deployments.steps.push_to_gcs:
requires: prefect-gcp
bucket: my-bucket
folder: my-project
```
Push to a GCS bucket using credentials stored in a block:
```yaml
build:
- prefect_gcp.deployments.steps.push_to_gcs:
requires: prefect-gcp
bucket: my-bucket
folder: my-folder
credentials: "{{ prefect.blocks.gcp-credentials.dev-credentials }}"
```
Push to a GCS bucket using credentials stored in a service account
file:
```yaml
build:
- prefect_gcp.deployments.steps.push_to_gcs:
requires: prefect-gcp
bucket: my-bucket
folder: my-folder
credentials:
project: my-project
service_account_file: /path/to/service_account.json
```
"""
project = credentials.get("project") if credentials else None
gcp_creds = None
if credentials is not None:
if credentials.get("service_account_info") is not None:
gcp_creds = Credentials.from_service_account_info(
credentials.get("service_account_info"),
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
elif credentials.get("service_account_file") is not None:
gcp_creds = Credentials.from_service_account_file(
credentials.get("service_account_file"),
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
gcp_creds = gcp_creds or google.auth.default()[0]
storage_client = StorageClient(credentials=gcp_creds, project=project)
bucket_resource = storage_client.bucket(bucket)
local_path = Path.cwd()
included_files = None
if ignore_file and Path(ignore_file).exists():
with open(ignore_file, "r") as f:
ignore_patterns = f.readlines()
included_files = filter_files(str(local_path), ignore_patterns)
for local_file_path in local_path.expanduser().rglob("*"):
relative_local_file_path = local_file_path.relative_to(local_path)
if (
included_files is not None
and str(relative_local_file_path) not in included_files
):
continue
elif not local_file_path.is_dir():
remote_file_path = (folder / relative_local_file_path).as_posix()
blob_resource = bucket_resource.blob(remote_file_path)
blob_resource.upload_from_filename(local_file_path)
return {
"bucket": bucket,
"folder": folder,
}
def pull_from_gcs(
bucket: str,
folder: str,
project: Optional[str] = None,
credentials: Optional[Dict] = None,
) -> PullFromGcsOutput:
"""
Pulls the contents of a project from an GCS bucket to the current working directory.
Args:
bucket: The name of the GCS bucket where files are stored.
folder: The folder in the GCS bucket where files are stored.
project: The GCP project the bucket belongs to. If not provided, the project will be
inferred from the credentials or the local environment.
credentials: A dictionary containing the service account information and project
used for authentication. If not provided, the application default
credentials will be used.
Returns:
A dictionary containing the bucket, folder, and local directory where files were downloaded.
Examples:
Pull from GCS using the default environment credentials:
```yaml
build:
- prefect_gcp.deployments.steps.pull_from_gcs:
requires: prefect-gcp
bucket: my-bucket
folder: my-folder
```
Pull from GCS using credentials stored in a block:
```yaml
build:
- prefect_gcp.deployments.steps.pull_from_gcs:
requires: prefect-gcp
bucket: my-bucket
folder: my-folder
credentials: "{{ prefect.blocks.gcp-credentials.dev-credentials }}"
```
Pull from to an GCS bucket using credentials stored in a service account file:
```yaml
build:
- prefect_gcp.deployments.steps.pull_from_gcs:
requires: prefect-gcp
bucket: my-bucket
folder: my-folder
credentials:
project: my-project
service_account_file: /path/to/service_account.json
```
""" # noqa
local_path = Path.cwd()
project = credentials.get("project") if credentials else None
gcp_creds = None
if credentials is not None:
if credentials.get("service_account_info") is not None:
gcp_creds = Credentials.from_service_account_info(
credentials.get("service_account_info"),
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
elif credentials.get("service_account_file") is not None:
gcp_creds = Credentials.from_service_account_file(
credentials.get("service_account_file"),
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
gcp_creds = gcp_creds or google.auth.default()[0]
storage_client = StorageClient(credentials=gcp_creds, project=project)
blobs = storage_client.list_blobs(bucket, prefix=folder)
for blob in blobs:
if blob.name.endswith("/"):
# object is a folder and will be created if it contains any objects
continue
local_blob_download_path = PurePosixPath(
local_path
/ relative_path_to_current_platform(blob.name).relative_to(folder)
)
Path.mkdir(Path(local_blob_download_path.parent), parents=True, exist_ok=True)
blob.download_to_filename(local_blob_download_path)
return {
"bucket": bucket,
"folder": folder,
"directory": str(local_path),
}
| PullFromGcsOutput |
python | getlogbook__logbook | src/logbook/queues.py | {
"start": 12736,
"end": 16109
} | class ____(SubscriberBase):
"""A helper that acts as ZeroMQ subscriber and will dispatch received
log records to the active handler setup. There are multiple ways to
use this class.
It can be used to receive log records from a queue::
subscriber = ZeroMQSubscriber("tcp://127.0.0.1:5000")
record = subscriber.recv()
But it can also be used to receive and dispatch these in one go::
with target_handler:
subscriber = ZeroMQSubscriber("tcp://127.0.0.1:5000")
subscriber.dispatch_forever()
This will take all the log records from that queue and dispatch them
over to `target_handler`. If you want you can also do that in the
background::
subscriber = ZeroMQSubscriber("tcp://127.0.0.1:5000")
controller = subscriber.dispatch_in_background(target_handler)
The controller returned can be used to shut down the background
thread::
controller.stop()
If `multi` is set to `True`, the subscriber will use a `PULL` socket
and listen to records published by a `PUSH` socket (usually via a
:class:`ZeroMQHandler` with `multi` set to `True`). This allows a
single subscriber to dispatch multiple handlers.
"""
def __init__(self, uri=None, context=None, multi=False):
try:
import zmq
except ImportError:
raise RuntimeError(
"The pyzmq library is required for the ZeroMQSubscriber."
)
self._zmq = zmq
#: the zero mq context
self.context = context or zmq.Context()
if multi:
#: the zero mq socket.
self.socket = self.context.socket(zmq.PULL)
if uri is not None:
self.socket.bind(uri)
else:
#: the zero mq socket.
self.socket = self.context.socket(zmq.SUB)
if uri is not None:
self.socket.connect(uri)
self.socket.setsockopt_unicode(zmq.SUBSCRIBE, "")
def __del__(self):
try:
self.close()
except AttributeError:
# subscriber partially created
pass
def close(self):
"""Closes the zero mq socket."""
self.socket.close()
def recv(self, timeout=None):
"""Receives a single record from the socket. Timeout of 0 means
nonblocking, `None` means blocking and otherwise it's a timeout in
seconds after which the function just returns with `None`.
"""
if timeout is None:
rv = self.socket.recv()
elif not timeout:
rv = self.socket.recv(self._zmq.NOBLOCK)
if rv is None:
return
else:
if not self._zmq.select([self.socket], [], [], timeout)[0]:
return
rv = self.socket.recv(self._zmq.NOBLOCK)
rv = rv.decode("utf-8")
return LogRecord.from_dict(json.loads(rv))
def _fix_261_mplog():
"""necessary for older python's to disable a broken monkeypatch
in the logging module. See multiprocessing/util.py for the
hasattr() check. At least in Python 2.6.1 the multiprocessing
module is not imported by logging and as such the test in
the util fails.
"""
import logging
import multiprocessing
logging.multiprocessing = multiprocessing
| ZeroMQSubscriber |
python | getsentry__sentry | src/sentry/migrations/0912_make_organizationmemberteam_replica_is_active_true.py | {
"start": 155,
"end": 1488
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0911_increase_email_model_email_field_length"),
]
operations = [
migrations.AlterField(
model_name="organizationmemberteamreplica",
name="is_active",
field=models.BooleanField(db_default=True),
),
]
| Migration |
python | getsentry__sentry | src/sentry/utils/assets.py | {
"start": 201,
"end": 2526
} | class ____:
commit_sha: str
"""
The commit SHA of the currently deployed frontend version.
"""
entrypoints: dict[str, str]
"""
A mapping of unversioned entrypoint names to versioned entrypoints,
containing a content-hash suffix.
"""
@ttl_cache(ttl=60)
def _frontend_versions() -> FrontendVersions | None:
config = os.path.join(settings.CONF_DIR, "settings", "frontend", "frontend-versions.json")
try:
with open(config) as f:
return FrontendVersions(**json.load(f)) # getsentry path
except OSError:
return None # common case for self-hosted
def get_frontend_commit_sha() -> str | None:
"""
Returns the commit SHA of the currently configured frontend-versions.
"""
if versions := _frontend_versions():
return versions.commit_sha
return None
def get_frontend_app_asset_url(module: str, key: str) -> str:
"""
Returns an asset URL that is unversioned. These assets should have a
`Cache-Control: max-age=0, must-revalidate` so that clients must validate with the origin
server before using their locally cached asset.
Example:
{% frontend_app_asset_url 'sentry' 'entrypoints/sentry.css' %}
=> "/_static/dist/sentry/entrypoints/sentry.css"
"""
if not key.startswith("entrypoints/"):
raise AssertionError(f"unexpected key: {key}")
asset_path, key = key.split("/", 1)
versions = _frontend_versions()
# When a frontend entrypoint versions config is provided use to map the
# asset file to a hashed entrypoint
if versions:
asset_path = "entrypoints-hashed"
key = versions.entrypoints[key]
return "/".join(
(
settings.STATIC_FRONTEND_APP_URL.rstrip("/"),
module,
asset_path,
key,
)
)
def get_frontend_dist_prefix() -> str:
return f"{settings.STATIC_FRONTEND_APP_URL.rstrip('/')}/sentry/"
def get_asset_url(module: str, path: str) -> str:
"""
Returns a versioned asset URL (located within Sentry's static files).
Example:
{% asset_url 'sentry' 'images/sentry.png' %}
=> "/_static/74d127b78dc7daf2c51f/sentry/images/sentry.png"
"""
return "{}/{}/{}".format(settings.STATIC_URL.rstrip("/"), module, path.lstrip("/"))
| FrontendVersions |
python | walkccc__LeetCode | solutions/3013. Divide an Array Into Subarrays With Minimum Cost II/3013.py | {
"start": 42,
"end": 1559
} | class ____:
def minimumCost(self, nums: list[int], k: int, dist: int) -> int:
# Equivalently, the problem is to find nums[0] + the minimum sum of the top
# k - 1 numbers in nums[i..i + dist], where i > 0 and i + dist < n.
windowSum = sum(nums[i] for i in range(1, dist + 2))
selected = SortedList(nums[i] for i in range(1, dist + 2))
candidates = SortedList()
def balance() -> int:
"""
Returns the updated `windowSum` by balancing the multiset `selected` to
keep the top k - 1 numbers.
"""
nonlocal windowSum
while len(selected) < k - 1:
minCandidate = candidates[0]
windowSum += minCandidate
selected.add(minCandidate)
candidates.remove(minCandidate)
while len(selected) > k - 1:
maxSelected = selected[-1]
windowSum -= maxSelected
selected.remove(maxSelected)
candidates.add(maxSelected)
return windowSum
windowSum = balance()
minWindowSum = windowSum
for i in range(dist + 2, len(nums)):
outOfScope = nums[i - dist - 1]
if outOfScope in selected:
windowSum -= outOfScope
selected.remove(outOfScope)
else:
candidates.remove(outOfScope)
if nums[i] < selected[-1]: # nums[i] is a better number.
windowSum += nums[i]
selected.add(nums[i])
else:
candidates.add(nums[i])
windowSum = balance()
minWindowSum = min(minWindowSum, windowSum)
return nums[0] + minWindowSum
| Solution |
python | keras-team__keras | keras/src/regularizers/regularizers.py | {
"start": 8769,
"end": 11799
} | class ____(Regularizer):
"""Regularizer that encourages input vectors to be orthogonal to each other.
It can be applied to either the rows of a matrix (`mode="rows"`) or its
columns (`mode="columns"`). When applied to a `Dense` kernel of shape
`(input_dim, units)`, rows mode will seek to make the feature vectors
(i.e. the basis of the output space) orthogonal to each other.
Arguments:
factor: Float. The regularization factor. The regularization penalty
will be proportional to `factor` times the mean of the dot products
between the L2-normalized rows (if `mode="rows"`, or columns if
`mode="columns"`) of the inputs, excluding the product of each
row/column with itself. Defaults to `0.01`.
mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In
rows mode, the regularization effect seeks to make the rows of the
input orthogonal to each other. In columns mode, it seeks to make
the columns of the input orthogonal to each other.
Example:
>>> regularizer = OrthogonalRegularizer(factor=0.01)
>>> layer = Dense(units=4, kernel_regularizer=regularizer)
"""
def __init__(self, factor=0.01, mode="rows"):
validate_float_arg(factor, name="factor")
self.factor = ops.convert_to_tensor(factor)
if mode not in {"rows", "columns"}:
raise ValueError(
"Invalid value for argument `mode`. Expected one of "
f'{{"rows", "columns"}}. Received: mode={mode}'
)
self.mode = mode
def __call__(self, inputs):
if len(inputs.shape) != 2:
raise ValueError(
"Inputs to OrthogonalRegularizer must have rank 2. Received: "
f"inputs.shape={inputs.shape}"
)
if self.mode == "rows":
inputs = normalize(inputs, axis=1)
product = ops.matmul(inputs, ops.transpose(inputs))
size = inputs.shape[0]
else:
inputs = normalize(inputs, axis=0)
product = ops.matmul(ops.transpose(inputs), inputs)
size = inputs.shape[1]
product_no_diagonal = product * (
1.0 - ops.eye(size, dtype=inputs.dtype)
)
num_pairs = size * (size - 1.0) / 2.0
return (
self.factor
* 0.5
* ops.sum(ops.absolute(product_no_diagonal))
/ num_pairs
)
def get_config(self):
return {"factor": float(self.factor), "mode": self.mode}
def validate_float_arg(value, name):
"""check penalty number availability, raise ValueError if failed."""
if (
not isinstance(value, (float, int))
or (math.isinf(value) or math.isnan(value))
or value < 0
):
raise ValueError(
f"Invalid value for argument {name}: expected a non-negative float."
f"Received: {name}={value}"
)
return float(value)
| OrthogonalRegularizer |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/combinatory_ports.py | {
"start": 614,
"end": 671
} | class ____:
def method(self) -> None:
pass
| Base |
python | python__mypy | mypyc/annotate.py | {
"start": 4164,
"end": 9082
} | class ____:
"""Annotations for a single compiled source file."""
def __init__(self, path: str, annotations: dict[int, list[Annotation]]) -> None:
self.path = path
self.annotations = annotations
def generate_annotated_html(
html_fnam: str, result: BuildResult, modules: dict[str, ModuleIR], mapper: Mapper
) -> None:
annotations = []
for mod, mod_ir in modules.items():
path = result.graph[mod].path
tree = result.graph[mod].tree
assert tree is not None
annotations.append(
generate_annotations(path or "<source>", tree, mod_ir, result.types, mapper)
)
html = generate_html_report(annotations)
with open(html_fnam, "w") as f:
f.write(html)
formatter = FancyFormatter(sys.stdout, sys.stderr, False)
formatted = formatter.style(os.path.abspath(html_fnam), "none", underline=True, bold=True)
print(f"\nWrote {formatted} -- open in browser to view\n")
def generate_annotations(
path: str, tree: MypyFile, ir: ModuleIR, type_map: dict[Expression, Type], mapper: Mapper
) -> AnnotatedSource:
anns = {}
for func_ir in ir.functions:
anns.update(function_annotations(func_ir, tree))
visitor = ASTAnnotateVisitor(type_map, mapper)
for defn in tree.defs:
defn.accept(visitor)
anns.update(visitor.anns)
for line in visitor.ignored_lines:
if line in anns:
del anns[line]
return AnnotatedSource(path, anns)
def function_annotations(func_ir: FuncIR, tree: MypyFile) -> dict[int, list[Annotation]]:
"""Generate annotations based on mypyc IR."""
# TODO: check if func_ir.line is -1
anns: dict[int, list[Annotation]] = {}
for block in func_ir.blocks:
for op in block.ops:
if isinstance(op, CallC):
name = op.function_name
ann: str | Annotation | None = None
if name == "CPyObject_GetAttr":
attr_name = get_str_literal(op.args[1])
if attr_name in ("__prepare__", "GeneratorExit", "StopIteration"):
# These attributes are internal to mypyc/CPython, and/or accessed
# implicitly in generated code. The user has little control over
# them.
ann = None
elif attr_name:
ann = f'Get non-native attribute "{attr_name}".'
else:
ann = "Dynamic attribute lookup."
elif name == "PyObject_SetAttr":
attr_name = get_str_literal(op.args[1])
if attr_name == "__mypyc_attrs__":
# This is set implicitly and can't be avoided.
ann = None
elif attr_name:
ann = f'Set non-native attribute "{attr_name}".'
else:
ann = "Dynamic attribute set."
elif name == "PyObject_VectorcallMethod":
method_name = get_str_literal(op.args[0])
if method_name:
ann = f'Call non-native method "{method_name}" (it may be defined in a non-native class, or decorated).'
else:
ann = "Dynamic method call."
elif name in op_hints:
ann = op_hints[name]
elif name in ("CPyDict_GetItem", "CPyDict_SetItem"):
if (
isinstance(op.args[0], LoadStatic)
and isinstance(op.args[1], LoadLiteral)
and func_ir.name != "__top_level__"
):
load = op.args[0]
name = str(op.args[1].value)
sym = tree.names.get(name)
if (
sym
and sym.node
and load.namespace == "static"
and load.identifier == "globals"
):
if sym.node.fullname in stdlib_hints:
ann = stdlib_hints[sym.node.fullname]
elif isinstance(sym.node, Var):
ann = (
f'Access global "{name}" through namespace '
+ "dictionary (hint: access is faster if you can make it Final)."
)
else:
ann = f'Access "{name}" through global namespace dictionary.'
if ann:
if isinstance(ann, str):
ann = Annotation(ann)
anns.setdefault(op.line, []).append(ann)
return anns
| AnnotatedSource |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-weaviate/destination_weaviate/indexer.py | {
"start": 655,
"end": 744
} | class ____(Exception):
pass
CLOUD_DEPLOYMENT_MODE = "cloud"
| WeaviatePartialBatchError |
python | bokeh__bokeh | src/bokeh/core/property/instance.py | {
"start": 1783,
"end": 4156
} | class ____(Property[T]):
""" Accept values that are instances of any class.
.. note::
This is primarily useful for validation purpose. Non-serializable
objects will fail regardless during the serialization process.
"""
_instance_type: type[T] | Callable[[], type[T]] | str
def __init__(self, instance_type: type[T] | Callable[[], type[T]] | str, default: Init[T] = Undefined, help: str | None = None):
if not (isinstance(instance_type, (type, str)) or callable(instance_type)):
raise ValueError(f"expected a type, fn() -> type, or string, got {instance_type}")
if isinstance(instance_type, type):
self._assert_type(instance_type)
self._instance_type = instance_type
super().__init__(default=default, help=help)
@staticmethod
def _assert_type(instance_type: type[Any]) -> None:
pass
def __str__(self) -> str:
class_name = self.__class__.__name__
instance_type = self.instance_type.__name__
return f"{class_name}({instance_type})"
@property
def has_ref(self) -> bool:
return True
@property
def instance_type(self) -> type[T]:
instance_type: type[Serializable]
if isinstance(self._instance_type, type):
instance_type = self._instance_type
elif isinstance(self._instance_type, str):
module, name = self._instance_type.rsplit(".", 1)
instance_type = getattr(import_module(module, "bokeh"), name)
self._assert_type(instance_type)
self._instance_type = instance_type
else:
instance_type = self._instance_type()
self._assert_type(instance_type)
self._instance_type = instance_type
return instance_type
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if isinstance(value, self.instance_type):
return
instance_type = self.instance_type.__name__
value_type = type(value).__name__
msg = "" if not detail else f"expected an instance of type {instance_type}, got {value} of type {value_type}"
raise ValueError(msg)
def _may_have_unstable_default(self):
# because the instance value is mutable
return self._default is not Undefined
| Object |
python | Netflix__metaflow | metaflow/datastore/flow_datastore.py | {
"start": 289,
"end": 14552
} | class ____(object):
default_storage_impl = None
def __init__(
self,
flow_name,
environment=None,
metadata=None,
event_logger=None,
monitor=None,
storage_impl=None,
ds_root=None,
):
"""
Initialize a Flow level datastore.
This datastore can then be used to get TaskDataStore to store artifacts
and metadata about a task as well as a ContentAddressedStore to store
things like packages, etc.
Parameters
----------
flow_name : str
The name of the flow
environment : MetaflowEnvironment, optional
Environment this datastore is operating in
metadata : MetadataProvider, optional
The metadata provider to use and update if needed, by default None
event_logger : EventLogger, optional
EventLogger to use to report events, by default None
monitor : Monitor, optional
Monitor to use to measure/monitor events, by default None
storage_impl : type
Class for the backing DataStoreStorage to use; if not provided use
default_storage_impl, optional
ds_root : str
The optional root for this datastore; if not provided, use the
default for the DataStoreStorage, optional
"""
storage_impl = storage_impl if storage_impl else self.default_storage_impl
if storage_impl is None:
raise RuntimeError("No datastore storage implementation specified")
self._storage_impl = storage_impl(ds_root)
self.TYPE = self._storage_impl.TYPE
# Public attributes
self.flow_name = flow_name
self.environment = environment
self.metadata = metadata
self.logger = event_logger
self.monitor = monitor
self.ca_store = ContentAddressedStore(
self._storage_impl.path_join(self.flow_name, "data"), self._storage_impl
)
# Private
self._metadata_cache = None
@property
def datastore_root(self):
return self._storage_impl.datastore_root
def set_metadata_cache(self, cache):
self._metadata_cache = cache
def get_task_datastores(
self,
run_id=None,
steps=None,
pathspecs=None,
allow_not_done=False,
attempt=None,
include_prior=False,
mode="r",
join_type=None,
orig_flow_datastore=None,
spin_artifacts=None,
):
"""
Return a list of TaskDataStore for a subset of the tasks.
We filter the list based on `steps` if non-None.
Alternatively, `pathspecs` can contain the exact list of pathspec(s)
(run_id/step_name/task_id) that should be filtered.
Note: When `pathspecs` is specified, we expect strict consistency and
not eventual consistency in contrast to other modes.
Parameters
----------
run_id : str, optional
Run ID to get the tasks from. If not specified, use pathspecs,
by default None
steps : List[str] , optional
Steps to get the tasks from. If run_id is specified, this
must also be specified, by default None
pathspecs : List[str], optional
Full task specs (run_id/step_name/task_id[/attempt]). Can be used instead of
specifying run_id and steps, by default None
allow_not_done : bool, optional
If True, returns the latest attempt of a task even if that attempt
wasn't marked as done, by default False
attempt : int, optional
Attempt number of the tasks to return. If not provided, returns latest attempt.
include_prior : boolean, default False
If True, returns all attempts up to and including attempt.
mode : str, default "r"
Mode to initialize the returned TaskDataStores in.
join_type : str, optional, default None
If specified, the join type for the task. This is used to determine
the user specified artifacts for the task in case of a spin task.
orig_flow_datastore : MetadataProvider, optional, default None
The metadata provider in case of a spin task. If provided, the
returned TaskDataStore will be a SpinTaskDatastore instead of a
TaskDataStore.
spin_artifacts : Dict[str, Any], optional, default None
Artifacts provided by user that can override the artifacts fetched via the
spin pathspec.
Returns
-------
List[TaskDataStore]
Task datastores for all the tasks specified.
"""
task_urls = []
# Note: When `pathspecs` is specified, we avoid the potentially
# eventually consistent `list_content` operation, and directly construct
# the task_urls list.
if pathspecs:
task_urls = [
self._storage_impl.path_join(self.flow_name, pathspec)
for pathspec in pathspecs
]
else:
run_prefix = self._storage_impl.path_join(self.flow_name, run_id)
if steps:
step_urls = [
self._storage_impl.path_join(run_prefix, step) for step in steps
]
else:
step_urls = [
step.path
for step in self._storage_impl.list_content([run_prefix])
if step.is_file is False
]
task_urls = [
task.path
for task in self._storage_impl.list_content(step_urls)
if task.is_file is False
]
urls = []
# parse content urls for specific attempt only, or for all attempts in max range
attempt_range = range(metaflow_config.MAX_ATTEMPTS)
# we have no reason to check for attempts greater than MAX_ATTEMPTS, as they do not exist.
if attempt is not None and attempt <= metaflow_config.MAX_ATTEMPTS - 1:
attempt_range = range(attempt + 1) if include_prior else [attempt]
for task_url in task_urls:
# task_url can have a trailing slash, so strip this to avoid empty strings in the split
task_splits = task_url.rstrip("/").split("/")
# Usually it is flow, run, step, task (so 4 components) -- if we have a
# fifth one, there is a specific attempt number listed as well.
task_attempt_range = attempt_range
if len(task_splits) == 5:
task_attempt_range = [int(task_splits[4])]
for attempt in task_attempt_range:
for suffix in [
TaskDataStore.METADATA_DATA_SUFFIX,
TaskDataStore.METADATA_ATTEMPT_SUFFIX,
TaskDataStore.METADATA_DONE_SUFFIX,
]:
urls.append(
self._storage_impl.path_join(
task_url,
TaskDataStore.metadata_name_for_attempt(suffix, attempt),
)
)
latest_started_attempts = {}
done_attempts = set()
data_objs = {}
with self._storage_impl.load_bytes(urls) as get_results:
for key, path, meta in get_results:
if path is not None:
_, run, step, task, fname = self._storage_impl.path_split(key)
attempt, fname = TaskDataStore.parse_attempt_metadata(fname)
attempt = int(attempt)
if fname == TaskDataStore.METADATA_DONE_SUFFIX:
done_attempts.add((run, step, task, attempt))
elif fname == TaskDataStore.METADATA_ATTEMPT_SUFFIX:
latest_started_attempts[(run, step, task)] = max(
latest_started_attempts.get((run, step, task), 0), attempt
)
elif fname == TaskDataStore.METADATA_DATA_SUFFIX:
# This somewhat breaks the abstraction since we are using
# load_bytes directly instead of load_metadata
with open(path, encoding="utf-8") as f:
data_objs[(run, step, task, attempt)] = json.load(f)
# We now figure out the latest attempt that started *and* finished.
# Note that if an attempt started but didn't finish, we do *NOT* return
# the previous attempt
latest_started_attempts = set(
(run, step, task, attempt)
for (run, step, task), attempt in latest_started_attempts.items()
)
if allow_not_done:
latest_to_fetch = (
done_attempts.union(latest_started_attempts)
if include_prior
else latest_started_attempts
)
else:
latest_to_fetch = (
done_attempts
if include_prior
else (latest_started_attempts & done_attempts)
)
latest_to_fetch = [
(
v[0],
v[1],
v[2],
v[3],
data_objs.get(v),
mode,
allow_not_done,
join_type,
orig_flow_datastore,
spin_artifacts,
)
for v in latest_to_fetch
]
return list(itertools.starmap(self.get_task_datastore, latest_to_fetch))
def get_task_datastore(
self,
run_id,
step_name,
task_id,
attempt=None,
data_metadata=None,
mode="r",
allow_not_done=False,
join_type=None,
orig_flow_datastore=None,
spin_artifacts=None,
persist=True,
):
if orig_flow_datastore is not None:
# In spin step subprocess, use SpinTaskDatastore for accessing artifacts
if join_type is not None:
# If join_type is specified, we need to use the artifacts corresponding
# to that particular join index, specified by the parent task pathspec.
spin_artifacts = spin_artifacts.get(
f"{run_id}/{step_name}/{task_id}", {}
)
from_start(
"FlowDataStore: get_task_datastore for spin task for type %s %s metadata"
% (self.TYPE, "without" if data_metadata is None else "with")
)
# Get the task datastore for the spun task.
orig_datastore = orig_flow_datastore.get_task_datastore(
run_id,
step_name,
task_id,
attempt=attempt,
data_metadata=data_metadata,
mode=mode,
allow_not_done=allow_not_done,
persist=persist,
)
return SpinTaskDatastore(
self.flow_name,
run_id,
step_name,
task_id,
orig_datastore,
spin_artifacts,
)
cache_hit = False
if (
self._metadata_cache is not None
and data_metadata is None
and attempt is not None
and allow_not_done is False
):
# If we have a metadata cache, we can try to load the metadata
# from the cache if it is not provided.
data_metadata = self._metadata_cache.load_metadata(
run_id, step_name, task_id, attempt
)
cache_hit = data_metadata is not None
from_start(
"FlowDataStore: get_task_datastore for regular task for type %s %s metadata"
% (self.TYPE, "without" if data_metadata is None else "with")
)
task_datastore = TaskDataStore(
self,
run_id,
step_name,
task_id,
attempt=attempt,
data_metadata=data_metadata,
mode=mode,
allow_not_done=allow_not_done,
persist=persist,
)
# Only persist in cache if it is non-changing (so done only) and we have
# a non-None attempt
if (
not cache_hit
and self._metadata_cache is not None
and allow_not_done is False
and attempt is not None
):
self._metadata_cache.store_metadata(
run_id, step_name, task_id, attempt, task_datastore.ds_metadata
)
return task_datastore
def save_data(self, data_iter, len_hint=0):
"""Saves data to the underlying content-addressed store
Parameters
----------
data_iter : Iterator[bytes]
Iterator over blobs to save; each item in the list will be saved individually.
len_hint : int
Estimate of the number of items that will be produced by the iterator,
by default 0.
Returns
-------
(str, str)
Tuple containing the URI to access the saved resource as well as
the key needed to retrieve it using load_data. This is returned in
the same order as the input.
"""
save_results = self.ca_store.save_blobs(data_iter, raw=True, len_hint=len_hint)
return [(r.uri, r.key) for r in save_results]
def load_data(self, keys, force_raw=False):
"""Retrieves data from the underlying content-addressed store
Parameters
----------
keys : List[str]
Keys to retrieve
force_raw : bool, optional
Backward compatible mode. Raw data will be properly identified with
metadata information but older datastores did not do this. If you
know the data should be handled as raw data, set this to True,
by default False
Returns
-------
Iterator[bytes]
Iterator over (key, blob) tuples
"""
for key, blob in self.ca_store.load_blobs(keys, force_raw=force_raw):
yield key, blob
| FlowDataStore |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_pool_test.py | {
"start": 1684,
"end": 31311
} | class ____(object):
@unittest.skipIf(not ALSO_RUN_BENCHMARKS, 'Benchmarks are disabled.')
def testDescriptorPoolBenchmark(self):
if ALSO_RUN_BENCHMARKS:
n_trials = 100
# FindFileByName
name = 'google/protobuf/internal/factory_test1.proto'
duration = timeit.timeit(
lambda: self.pool.FindFileByName(name),
number=n_trials,
)
print(f'FindFileByName: {duration / n_trials * 1000}ms')
# FindEnumTypeByName
name = 'google.protobuf.python.internal.Factory1Enum'
duration = timeit.timeit(
lambda: self.pool.FindEnumTypeByName(name),
number=n_trials,
)
print(f'FindEnumTypeByName: {duration / n_trials * 1000}ms')
# FindOneofByName
name = 'google.protobuf.python.internal.Factory2Message.oneof_field'
duration = timeit.timeit(
lambda: self.pool.FindOneofByName(name),
number=n_trials,
)
print(f'FindOneofByName: {duration / n_trials * 1000}ms')
# FindExtensionByName
name = 'google.protobuf.python.internal.another_field'
duration = timeit.timeit(
lambda: self.pool.FindExtensionByName(name),
number=n_trials,
)
print(f'FindExtensionByName: {duration / n_trials * 1000}ms')
else:
print('Skipping benchmark in non-benchmark mode.')
def testFindFileByName(self):
name1 = 'google/protobuf/internal/factory_test1.proto'
file_desc1 = self.pool.FindFileByName(name1)
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual(name1, file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
name2 = 'google/protobuf/internal/factory_test2.proto'
file_desc2 = self.pool.FindFileByName(name2)
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual(name2, file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileByName('Does not exist')
def testFindFileContainingSymbol(self):
file_desc1 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test1.proto',
file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
file_desc2 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
# Tests top level extension.
file_desc3 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.another_field')
self.assertIsInstance(file_desc3, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc3.name)
# Tests nested extension inside a message.
file_desc4 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertIsInstance(file_desc4, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc4.name)
file_desc5 = self.pool.FindFileContainingSymbol(
'proto2_unittest.TestService')
self.assertIsInstance(file_desc5, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/unittest.proto',
file_desc5.name)
# Tests the generated pool.
assert descriptor_pool.Default().FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field')
assert descriptor_pool.Default().FindFileContainingSymbol(
'google.protobuf.python.internal.another_field')
assert descriptor_pool.Default().FindFileContainingSymbol(
'proto2_unittest.TestService')
# Can find field.
file_desc6 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message.list_value')
self.assertIsInstance(file_desc6, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test1.proto',
file_desc6.name)
# Can find top level Enum value.
file_desc7 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.FACTORY_1_VALUE_0')
self.assertIsInstance(file_desc7, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test1.proto',
file_desc7.name)
# Can find nested Enum value.
file_desc8 = self.pool.FindFileContainingSymbol(
'proto2_unittest.TestAllTypes.FOO')
self.assertIsInstance(file_desc8, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/unittest.proto',
file_desc8.name)
# TODO: Add tests for no package when b/13860351 is fixed.
self.assertRaises(KeyError, self.pool.FindFileContainingSymbol,
'google.protobuf.python.internal.Factory1Message.none_field')
def testCrossFileMessageTypesByName(self):
self.assertIs(
descriptor_pool_test1_pb2.DescriptorPoolTest1.DESCRIPTOR,
descriptor_pool_test1_pb2.DESCRIPTOR.message_types_by_name[
'DescriptorPoolTest1'
],
)
with self.assertRaises(KeyError):
descriptor_pool_test2_pb2.DESCRIPTOR.message_types_by_name[
'DescriptorPoolTest1'
]
def testCrossFileEnumTypesByName(self):
self.assertIs(
descriptor_pool_test1_pb2.TopLevelEnumTest1.DESCRIPTOR,
descriptor_pool_test1_pb2.DESCRIPTOR.enum_types_by_name[
'TopLevelEnumTest1'
],
)
with self.assertRaises(KeyError):
descriptor_pool_test2_pb2.DESCRIPTOR.enum_types_by_name[
'TopLevelEnumTest1'
]
def testCrossFileExtensionsByName(self):
self.assertIs(
descriptor_pool_test1_pb2.top_level_extension_test1,
descriptor_pool_test1_pb2.DESCRIPTOR.extensions_by_name[
'top_level_extension_test1'
],
)
with self.assertRaises(KeyError):
descriptor_pool_test2_pb2.DESCRIPTOR.extensions_by_name[
'top_level_extension_test1'
]
def testCrossFileServicesByName(self):
descriptor_pool_test1_pb2.DESCRIPTOR.services_by_name[
'DescriptorPoolTestService'
],
with self.assertRaises(KeyError):
descriptor_pool_test2_pb2.DESCRIPTOR.services_by_name[
'DescriptorPoolTestService'
]
def testFindFileContainingSymbolFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileContainingSymbol('Does not exist')
def testFindMessageTypeByName(self):
msg1 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(msg1, descriptor.Descriptor)
self.assertEqual('Factory1Message', msg1.name)
self.assertEqual('google.protobuf.python.internal.Factory1Message',
msg1.full_name)
self.assertEqual(None, msg1.containing_type)
self.assertFalse(msg1.has_options)
nested_msg1 = msg1.nested_types[0]
self.assertEqual('NestedFactory1Message', nested_msg1.name)
self.assertEqual(msg1, nested_msg1.containing_type)
nested_enum1 = msg1.enum_types[0]
self.assertEqual('NestedFactory1Enum', nested_enum1.name)
self.assertEqual(msg1, nested_enum1.containing_type)
self.assertEqual(nested_msg1, msg1.fields_by_name[
'nested_factory_1_message'].message_type)
self.assertEqual(nested_enum1, msg1.fields_by_name[
'nested_factory_1_enum'].enum_type)
msg2 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(msg2, descriptor.Descriptor)
self.assertEqual('Factory2Message', msg2.name)
self.assertEqual('google.protobuf.python.internal.Factory2Message',
msg2.full_name)
self.assertIsNone(msg2.containing_type)
nested_msg2 = msg2.nested_types[0]
self.assertEqual('NestedFactory2Message', nested_msg2.name)
self.assertEqual(msg2, nested_msg2.containing_type)
nested_enum2 = msg2.enum_types[0]
self.assertEqual('NestedFactory2Enum', nested_enum2.name)
self.assertEqual(msg2, nested_enum2.containing_type)
self.assertEqual(nested_msg2, msg2.fields_by_name[
'nested_factory_2_message'].message_type)
self.assertEqual(nested_enum2, msg2.fields_by_name[
'nested_factory_2_enum'].enum_type)
self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value)
self.assertEqual(
1776, msg2.fields_by_name['int_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['double_with_default'].has_default_value)
self.assertEqual(
9.99, msg2.fields_by_name['double_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['string_with_default'].has_default_value)
self.assertEqual(
'hello world', msg2.fields_by_name['string_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value)
self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value)
self.assertEqual(
1, msg2.fields_by_name['enum_with_default'].default_value)
msg3 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')
self.assertEqual(nested_msg2, msg3)
self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value)
self.assertEqual(
b'a\xfb\x00c',
msg2.fields_by_name['bytes_with_default'].default_value)
self.assertEqual(1, len(msg2.oneofs))
self.assertEqual(1, len(msg2.oneofs_by_name))
self.assertEqual(2, len(msg2.oneofs[0].fields))
for name in ['oneof_int', 'oneof_string']:
self.assertEqual(msg2.oneofs[0],
msg2.fields_by_name[name].containing_oneof)
self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields)
def testFindTypeErrors(self):
self.assertRaises(TypeError, self.pool.FindExtensionByNumber, '')
self.assertRaises(KeyError, self.pool.FindMethodByName, '')
# TODO: Fix python to raise correct errors.
if api_implementation.Type() == 'python':
error_type = AttributeError
else:
error_type = TypeError
self.assertRaises(error_type, self.pool.FindMessageTypeByName, 0)
self.assertRaises(error_type, self.pool.FindFieldByName, 0)
self.assertRaises(error_type, self.pool.FindExtensionByName, 0)
self.assertRaises(error_type, self.pool.FindEnumTypeByName, 0)
self.assertRaises(error_type, self.pool.FindOneofByName, 0)
self.assertRaises(error_type, self.pool.FindServiceByName, 0)
self.assertRaises(error_type, self.pool.FindMethodByName, 0)
self.assertRaises(error_type, self.pool.FindFileContainingSymbol, 0)
if api_implementation.Type() == 'python':
error_type = KeyError
self.assertRaises(error_type, self.pool.FindFileByName, 0)
def testFindMessageTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindMessageTypeByName('Does not exist')
def testFindEnumTypeByName(self):
enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Enum')
self.assertIsInstance(enum1, descriptor.EnumDescriptor)
self.assertEqual(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number)
self.assertEqual(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number)
self.assertFalse(enum1.has_options)
nested_enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum')
self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number)
self.assertEqual(
1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number)
enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Enum')
self.assertIsInstance(enum2, descriptor.EnumDescriptor)
self.assertEqual(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number)
self.assertEqual(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number)
nested_enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')
self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number)
self.assertEqual(
1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number)
def testFindEnumTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindEnumTypeByName('Does not exist')
def testFindFieldByName(self):
field = self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory1Message.list_value')
self.assertEqual(field.name, 'list_value')
self.assertTrue(field.is_repeated)
self.assertFalse(field.has_options)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindOneofByName(self):
oneof = self.pool.FindOneofByName(
'google.protobuf.python.internal.Factory2Message.oneof_field')
self.assertEqual(oneof.name, 'oneof_field')
with self.assertRaises(KeyError):
self.pool.FindOneofByName('Does not exist')
def testFindExtensionByName(self):
# An extension defined in a message.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.another_field')
self.assertEqual(extension.name, 'another_field')
self.assertEqual(extension.number, 1002)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindAllExtensions(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
message_field1 = factory_test2.extensions_by_name['message_field1']
message_field2 = factory_test2.extensions_by_name['message_field2']
extensions = self.pool.FindAllExtensions(factory1_message)
expected_extension_numbers = set(
[one_more_field, another_field, message_field1, message_field2]
)
self.assertEqual(expected_extension_numbers, set(extensions))
# Verify that mutating the returned list does not affect the pool.
extensions.append('unexpected_element')
# Get the extensions again, the returned value does not contain the
# 'unexpected_element'.
extensions = self.pool.FindAllExtensions(factory1_message)
self.assertEqual(expected_extension_numbers, set(extensions))
def testFindExtensionByNumber(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
# Build factory_test2.proto which will put extensions to the pool
self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
# An extension defined in a message.
extension = self.pool.FindExtensionByNumber(factory1_message, 1001)
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByNumber(factory1_message, 1002)
self.assertEqual(extension.name, 'another_field')
with self.assertRaises(KeyError):
extension = self.pool.FindExtensionByNumber(factory1_message, 1234567)
def testExtensionsLenFromParsed(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message'
)
# Build factory_test2.proto which will put extensions to the pool
self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto'
)
message_class = message_factory.GetMessageClass(factory1_message)
message = message_class()
self.assertEqual(len(message.Extensions), 0)
message.ParseFromString(b'\xda\x3e\000\xe2\x3e\000')
self.assertEqual(len(message.Extensions), 2)
# Verify consistency with related methods.
self.assertEqual(len(list(message.Extensions)), 2)
self.assertEqual(len(message.ListFields()), 2)
def testExtensionsLenFromSet(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message'
)
# Build factory_test2.proto which will put extensions to the pool
self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto'
)
message_class = message_factory.GetMessageClass(factory1_message)
message = message_class()
self.assertEqual(len(message.Extensions), 0)
extension1 = self.pool.FindExtensionByNumber(factory1_message, 1003)
extension2 = self.pool.FindExtensionByNumber(factory1_message, 1004)
message.Extensions[extension1].a = 1
message.Extensions[extension2].a = 2
self.assertEqual(len(message.Extensions), 2)
# Verify consistency with related methods.
self.assertEqual(len(list(message.Extensions)), 2)
self.assertEqual(len(message.ListFields()), 2)
def testExtensionsAreNotFields(self):
with self.assertRaises(KeyError):
self.pool.FindFieldByName('google.protobuf.python.internal.another_field')
with self.assertRaises(KeyError):
self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
with self.assertRaises(KeyError):
self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory1Message.list_value')
def testFindService(self):
service = self.pool.FindServiceByName('proto2_unittest.TestService')
self.assertEqual(service.full_name, 'proto2_unittest.TestService')
with self.assertRaises(KeyError):
self.pool.FindServiceByName('Does not exist')
method = self.pool.FindMethodByName('proto2_unittest.TestService.Foo')
self.assertIs(method.containing_service, service)
with self.assertRaises(KeyError):
self.pool.FindMethodByName('proto2_unittest.TestService.Doesnotexist')
def testUserDefinedDB(self):
db = descriptor_database.DescriptorDatabase()
self.pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
self.testFindMessageTypeByName()
def testAddSerializedFile(self):
if isinstance(self, SecondaryDescriptorFromDescriptorDB):
if api_implementation.Type() != 'python':
# Cpp extension cannot call Add on a DescriptorPool
# that uses a DescriptorDatabase.
# TODO: Fix python and cpp extension diff.
return
self.pool = descriptor_pool.DescriptorPool()
file1 = self.pool.AddSerializedFile(
self.factory_test1_fd.SerializeToString())
file2 = self.pool.AddSerializedFile(
self.factory_test2_fd.SerializeToString())
self.assertEqual(file1.name,
'google/protobuf/internal/factory_test1.proto')
self.assertEqual(file2.name,
'google/protobuf/internal/factory_test2.proto')
self.testFindMessageTypeByName()
self.pool.AddSerializedFile(timestamp_pb2.DESCRIPTOR.serialized_pb)
self.pool.AddSerializedFile(duration_pb2.DESCRIPTOR.serialized_pb)
self.pool.AddSerializedFile(struct_pb2.DESCRIPTOR.serialized_pb)
file_json = self.pool.AddSerializedFile(
more_messages_pb2.DESCRIPTOR.serialized_pb)
field = file_json.message_types_by_name['class'].fields_by_name['int_field']
self.assertEqual(field.json_name, 'json_int')
def testAddSerializedFileTwice(self):
if isinstance(self, SecondaryDescriptorFromDescriptorDB):
if api_implementation.Type() != 'python':
# Cpp extension cannot call Add on a DescriptorPool
# that uses a DescriptorDatabase.
# TODO: Fix python and cpp extension diff.
return
self.pool = descriptor_pool.DescriptorPool()
file1_first = self.pool.AddSerializedFile(
self.factory_test1_fd.SerializeToString())
file1_again = self.pool.AddSerializedFile(
self.factory_test1_fd.SerializeToString())
self.assertIs(file1_first, file1_again)
def testEnumDefaultValue(self):
"""Test the default value of enums which don't start at zero."""
def _CheckDefaultValue(file_descriptor):
default_value = (file_descriptor
.message_types_by_name['DescriptorPoolTest1']
.fields_by_name['nested_enum']
.default_value)
self.assertEqual(default_value,
descriptor_pool_test1_pb2.DescriptorPoolTest1.BETA)
# First check what the generated descriptor contains.
_CheckDefaultValue(descriptor_pool_test1_pb2.DESCRIPTOR)
# Then check the generated pool. Normally this is the same descriptor.
file_descriptor = symbol_database.Default().pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
self.assertIs(file_descriptor, descriptor_pool_test1_pb2.DESCRIPTOR)
_CheckDefaultValue(file_descriptor)
if isinstance(self, SecondaryDescriptorFromDescriptorDB):
if api_implementation.Type() != 'python':
# Cpp extension cannot call Add on a DescriptorPool
# that uses a DescriptorDatabase.
# TODO: Fix python and cpp extension diff.
return
# Then check the dynamic pool and its internal DescriptorDatabase.
descriptor_proto = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(descriptor_proto)
# And do the same check as above
file_descriptor = self.pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
_CheckDefaultValue(file_descriptor)
def testDefaultValueForCustomMessages(self):
"""Check the value returned by non-existent fields."""
def _CheckValueAndType(value, expected_value, expected_type):
self.assertEqual(value, expected_value)
self.assertIsInstance(value, expected_type)
def _CheckDefaultValues(msg):
try:
int64 = long
except NameError: # Python3
int64 = int
try:
unicode_type = unicode
except NameError: # Python3
unicode_type = str
_CheckValueAndType(msg.optional_int32, 0, int)
_CheckValueAndType(msg.optional_uint64, 0, (int64, int))
_CheckValueAndType(msg.optional_float, 0, (float, int))
_CheckValueAndType(msg.optional_double, 0, (float, int))
_CheckValueAndType(msg.optional_bool, False, bool)
_CheckValueAndType(msg.optional_string, u'', unicode_type)
_CheckValueAndType(msg.optional_bytes, b'', bytes)
_CheckValueAndType(msg.optional_nested_enum, msg.FOO, int)
# First for the generated message
_CheckDefaultValues(unittest_pb2.TestAllTypes())
# Then for a message built with from the DescriptorPool.
pool = descriptor_pool.DescriptorPool()
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_public_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb))
message_class = message_factory.GetMessageClass(
pool.FindMessageTypeByName(
unittest_pb2.TestAllTypes.DESCRIPTOR.full_name))
_CheckDefaultValues(message_class())
def testAddFileDescriptor(self):
if isinstance(self, SecondaryDescriptorFromDescriptorDB):
if api_implementation.Type() != 'python':
# Cpp extension cannot call Add on a DescriptorPool
# that uses a DescriptorDatabase.
# TODO: Fix python and cpp extension diff.
return
file_desc = descriptor_pb2.FileDescriptorProto(name='some/file.proto')
self.pool.Add(file_desc)
self.pool.AddSerializedFile(file_desc.SerializeToString())
def testComplexNesting(self):
if isinstance(self, SecondaryDescriptorFromDescriptorDB):
if api_implementation.Type() != 'python':
# Cpp extension cannot call Add on a DescriptorPool
# that uses a DescriptorDatabase.
# TODO: Fix python and cpp extension diff.
return
timestamp_desc = descriptor_pb2.FileDescriptorProto.FromString(
timestamp_pb2.DESCRIPTOR.serialized_pb)
duration_desc = descriptor_pb2.FileDescriptorProto.FromString(
duration_pb2.DESCRIPTOR.serialized_pb)
struct_desc = descriptor_pb2.FileDescriptorProto.FromString(
struct_pb2.DESCRIPTOR.serialized_pb
)
more_messages_desc = descriptor_pb2.FileDescriptorProto.FromString(
more_messages_pb2.DESCRIPTOR.serialized_pb)
test1_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
test2_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(timestamp_desc)
self.pool.Add(duration_desc)
self.pool.Add(struct_desc)
self.pool.Add(more_messages_desc)
self.pool.Add(test1_desc)
self.pool.Add(test2_desc)
TEST1_FILE.CheckFile(self, self.pool)
TEST2_FILE.CheckFile(self, self.pool)
def testConflictRegister(self):
if isinstance(self, SecondaryDescriptorFromDescriptorDB):
if api_implementation.Type() != 'python':
# Cpp extension cannot call Add on a DescriptorPool
# that uses a DescriptorDatabase.
# TODO: Fix python and cpp extension diff.
return
unittest_fd = descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb)
conflict_fd = copy.deepcopy(unittest_fd)
conflict_fd.name = 'other_file'
if api_implementation.Type() != 'python':
pass
else:
pool = copy.deepcopy(self.pool)
file_descriptor = unittest_pb2.DESCRIPTOR
pool._AddDescriptor(
file_descriptor.message_types_by_name['TestAllTypes'])
pool._AddEnumDescriptor(
file_descriptor.enum_types_by_name['ForeignEnum'])
pool._AddServiceDescriptor(
file_descriptor.services_by_name['TestService'])
pool._AddExtensionDescriptor(
file_descriptor.extensions_by_name['optional_int32_extension'])
pool.Add(unittest_fd)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
pool.Add(conflict_fd)
self.assertTrue(len(w))
self.assertIs(w[0].category, RuntimeWarning)
self.assertIn('Conflict register for file "other_file": ',
str(w[0].message))
pool.FindFileByName(unittest_fd.name)
with self.assertRaises(TypeError):
pool.FindFileByName(conflict_fd.name)
def testTypeNotSet(self):
f = descriptor_pb2.FileDescriptorProto(
name='google/protobuf/internal/not_type.proto',
package='google.protobuf.python.internal',
syntax='proto3')
f.enum_type.add(name='TestEnum').value.add(name='DEFAULTVALUE',
number=0)
msg_proto = f.message_type.add(name='TestMessage')
msg_proto.nested_type.add(name='Nested')
# type may not set if type_name is set in FieldDescriptorProto
msg_proto.field.add(name='nested_field',
number=1,
label=descriptor.FieldDescriptor.LABEL_OPTIONAL,
type_name='Nested')
msg_proto.field.add(name='enum_field',
number=2,
label=descriptor.FieldDescriptor.LABEL_REPEATED,
type_name='TestEnum')
pool = descriptor_pool.DescriptorPool()
pool.Add(f)
file_des = pool.FindFileByName('google/protobuf/internal/not_type.proto')
msg = file_des.message_types_by_name['TestMessage']
nested_field = msg.fields_by_name['nested_field']
self.assertTrue(nested_field.has_presence)
# cpp extension and upb do not provide is_packed on FieldDescriptor
if api_implementation.Type() == 'python':
self.assertFalse(nested_field.is_packed)
enum_field = msg.fields_by_name['enum_field']
self.assertFalse(enum_field.has_presence)
if api_implementation.Type() == 'python':
self.assertTrue(enum_field.is_packed)
@testing_refleaks.TestCase
| DescriptorPoolTestBase |
python | doocs__leetcode | solution/0500-0599/0589.N-ary Tree Preorder Traversal/Solution.py | {
"start": 152,
"end": 449
} | class ____:
def preorder(self, root: "Node") -> List[int]:
def dfs(root):
if root is None:
return
ans.append(root.val)
for child in root.children:
dfs(child)
ans = []
dfs(root)
return ans
| Solution |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_timeseries_trace_metrics.py | {
"start": 390,
"end": 6090
} | class ____(OrganizationEventsEndpointTestBase):
endpoint = "sentry-api-0-organization-events-timeseries"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.start = self.day_ago = before_now(days=1).replace(
hour=10, minute=0, second=0, microsecond=0
)
self.end = self.start + timedelta(hours=6)
self.two_days_ago = self.day_ago - timedelta(days=1)
self.url = reverse(
self.endpoint,
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
def _do_request(self, data, url=None, features=None):
return self.client.get(self.url if url is None else url, data=data, format="json")
def test_simple(self) -> None:
metric_values = [6, 0, 6, 3, 0, 3]
trace_metrics = []
for hour, value in enumerate(metric_values):
trace_metrics.extend(
[
self.create_trace_metric(
"foo",
value,
"counter",
timestamp=self.start + timedelta(hours=hour),
)
]
)
self.store_trace_metrics(trace_metrics)
response = self._do_request(
data={
"start": self.start,
"end": self.end,
"interval": "1h",
"yAxis": "sum(value)",
"query": "metric.name:foo",
"project": self.project.id,
"dataset": "tracemetrics",
},
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "tracemetrics",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 1
timeseries = response.data["timeSeries"][0]
assert len(timeseries["values"]) == 6
assert timeseries["yAxis"] == "sum(value)"
assert timeseries["values"] == build_expected_timeseries(
self.start, 3_600_000, metric_values, ignore_accuracy=True
)
assert timeseries["meta"] == {
"dataScanned": "full",
"valueType": "number",
"valueUnit": None,
"interval": 3_600_000,
}
def test_top_events(self) -> None:
self.store_trace_metrics(
[
self.create_trace_metric(
"foo",
1,
"counter",
timestamp=self.start + timedelta(minutes=1),
attributes={"environment": {"string_value": "prod"}},
),
self.create_trace_metric(
"foo",
1,
"counter",
timestamp=self.start + timedelta(minutes=1),
attributes={"environment": {"string_value": "dev"}},
),
self.create_trace_metric(
"foo",
1,
"counter",
timestamp=self.start + timedelta(minutes=1),
attributes={"environment": {"string_value": "prod"}},
),
self.create_trace_metric(
"foo",
1,
"counter",
timestamp=self.start + timedelta(minutes=1),
attributes={"environment": {"string_value": "dev"}},
),
]
)
self.end = self.start + timedelta(minutes=6)
response = self._do_request(
data={
"start": self.start,
"end": self.end,
"interval": "1m",
"yAxis": "sum(value)",
"groupBy": ["environment"],
"project": self.project.id,
"dataset": "tracemetrics",
"excludeOther": 0,
"topEvents": 2,
}
)
assert response.status_code == 200, response.content
assert response.data["meta"] == {
"dataset": "tracemetrics",
"start": self.start.timestamp() * 1000,
"end": self.end.timestamp() * 1000,
}
assert len(response.data["timeSeries"]) == 2
timeseries = response.data["timeSeries"][0]
assert len(timeseries["values"]) == 6
assert timeseries["yAxis"] == "sum(value)"
assert timeseries["values"] == build_expected_timeseries(
self.start, 60_000, [0, 2, 0, 0, 0, 0], ignore_accuracy=True
)
assert timeseries["groupBy"] == [{"key": "environment", "value": "prod"}]
assert timeseries["meta"] == {
"dataScanned": "full",
"valueType": "number",
"valueUnit": None,
"interval": 60_000,
"isOther": False,
"order": 0,
}
timeseries = response.data["timeSeries"][1]
assert len(timeseries["values"]) == 6
assert timeseries["yAxis"] == "sum(value)"
assert timeseries["values"] == build_expected_timeseries(
self.start, 60_000, [0, 2, 0, 0, 0, 0], ignore_accuracy=True
)
assert timeseries["groupBy"] == [{"key": "environment", "value": "dev"}]
assert timeseries["meta"] == {
"dataScanned": "full",
"valueType": "number",
"valueUnit": None,
"interval": 60_000,
"isOther": False,
"order": 1,
}
| OrganizationEventsStatsTraceMetricsEndpointTest |
python | scrapy__scrapy | tests/test_downloader_handlers_http_base.py | {
"start": 26281,
"end": 26669
} | class ____(TestSimpleHttpsBase):
# above tests use a server certificate for "localhost",
# client connection to "localhost" too.
# here we test that even if the server certificate is for another domain,
# "www.example.com" in this case,
# the tests still pass
keyfile = "keys/example-com.key.pem"
certfile = "keys/example-com.cert.pem"
| TestHttpsWrongHostnameBase |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/init.py | {
"start": 538,
"end": 5064
} | class ____:
"""The context object available as the argument to the initialization function of a :py:class:`dagster.ResourceDefinition`.
Users should not instantiate this object directly. To construct an `InitResourceContext` for testing purposes, use :py:func:`dagster.build_init_resource_context`.
Example:
.. code-block:: python
from dagster import resource, InitResourceContext
@resource
def the_resource(init_context: InitResourceContext):
init_context.log.info("Hello, world!")
"""
def __init__(
self,
resource_config: Any,
resources: Resources,
resource_def: Optional[ResourceDefinition],
all_resource_defs: Mapping[str, ResourceDefinition],
instance: Optional[DagsterInstance] = None,
dagster_run: Optional[DagsterRun] = None,
log_manager: Optional[DagsterLogManager] = None,
event_loop: Optional[asyncio.AbstractEventLoop] = None,
):
self._resource_config = resource_config
self._resource_def = resource_def
self._all_resource_defs = all_resource_defs
self._log_manager = log_manager
self._instance = instance
self._resources = resources
self._dagster_run = dagster_run
self._event_loop = event_loop
@public
@property
def resource_config(self) -> Any:
"""The configuration data provided by the run config. The schema
for this data is defined by the ``config_field`` argument to
:py:class:`ResourceDefinition`.
"""
return self._resource_config
@public
@property
def resource_def(self) -> ResourceDefinition:
"""The definition of the resource currently being constructed."""
return check.not_none(self._resource_def)
@public
@property
def resources(self) -> Resources:
"""The resources that are available to the resource that we are initializing."""
return self._resources
@public
@property
def instance(self) -> Optional[DagsterInstance]:
"""The Dagster instance configured for the current execution context."""
return self._instance
@public
@property
def run(self) -> Optional[DagsterRun]:
"""The dagster run to use. When initializing resources outside of execution context, this will be None."""
return self._dagster_run
@deprecated(
breaking_version="a future release",
subject="InitResourceContext.dagster_run",
additional_warn_text="You have called the deprecated method dagster_run on InitResourceContext. Use context.run instead.",
)
@property
def dagster_run(self) -> Optional[DagsterRun]:
"""The dagster run to use. When initializing resources outside of execution context, this will be None."""
return self._dagster_run
@public
@property
def log(self) -> Optional[DagsterLogManager]:
"""The Dagster log manager configured for the current execution context."""
return self._log_manager
# backcompat: keep around this property from when InitResourceContext used to be a NamedTuple
@public
@property
def log_manager(self) -> Optional[DagsterLogManager]:
"""The log manager for this run of the job."""
return self._log_manager
@deprecated(
breaking_version="a future release",
subject="InitResourceContext.run_id",
additional_warn_text="You have called the deprecated method run_id on InitResourceContext. Use context.run.run_id instead.",
)
@property
def run_id(self) -> Optional[str]:
"""The id for this run of the job or pipeline. When initializing resources outside of
execution context, this will be None.
"""
return self.dagster_run.run_id if self.dagster_run else None
@property
def all_resource_defs(self) -> Mapping[str, ResourceDefinition]:
return self._all_resource_defs
def replace_config(self, config: Any) -> "InitResourceContext":
return InitResourceContext(
resource_config=config,
resources=self.resources,
instance=self.instance,
resource_def=self.resource_def,
all_resource_defs=self.all_resource_defs,
dagster_run=self.dagster_run,
log_manager=self.log,
)
@property
def event_loop(self) -> Optional[asyncio.AbstractEventLoop]:
return self._event_loop
| InitResourceContext |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/state.py | {
"start": 2424,
"end": 4337
} | class ____(Store):
def __init__(self, statedir: str):
self.statedir = os.path.abspath(statedir)
self.location_file_prefix = "location-"
if not os.path.isdir(self.statedir):
os.makedirs(self.statedir)
def __repr__(self):
return f"<FileStore(statedir={self.statedir!r})>"
def _get_filepath(self, location_name) -> str:
return os.path.join(self.statedir, f"{self.location_file_prefix}{location_name}.json")
def list_locations(self) -> list[LocationState]:
return [
self._location_from_file(os.path.join(self.statedir, filename))
for filename in os.listdir(self.statedir)
if filename.startswith(self.location_file_prefix)
]
def load(self, location_name: str) -> LocationState:
filepath = self._get_filepath(location_name)
if not filepath:
raise KeyError(f"No saved state for {location_name} at {filepath}")
return self._location_from_file(self._get_filepath(location_name))
def _location_from_file(self, filepath: str) -> LocationState:
with open(filepath, encoding="utf-8") as f:
return LocationState.parse_obj(json.load(f))
def save(self, location_state: LocationState):
filepath = self._get_filepath(location_state.location_name)
with open(filepath, "w", encoding="utf-8") as f:
f.write(location_state.json())
def deselect(self, location_names: list[str]):
locations = [self.load(location_name) for location_name in location_names]
for location in locations:
location.selected = False
self.save(location)
def select(self, location_names: list[str]):
locations = [self.load(location_name) for location_name in location_names]
for location in locations:
location.selected = True
self.save(location)
| FileStore |
python | langchain-ai__langchain | libs/langchain/langchain_classic/indexes/vectorstore.py | {
"start": 7042,
"end": 9788
} | class ____(BaseModel):
"""Logic for creating indexes."""
vectorstore_cls: type[VectorStore] = Field(
default_factory=_get_in_memory_vectorstore,
)
embedding: Embeddings
text_splitter: TextSplitter = Field(default_factory=_get_default_text_splitter)
vectorstore_kwargs: dict = Field(default_factory=dict)
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
def from_loaders(self, loaders: list[BaseLoader]) -> VectorStoreIndexWrapper:
"""Create a `VectorStore` index from a list of loaders.
Args:
loaders: A list of `BaseLoader` instances to load documents.
Returns:
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
"""
docs = []
for loader in loaders:
docs.extend(loader.load())
return self.from_documents(docs)
async def afrom_loaders(self, loaders: list[BaseLoader]) -> VectorStoreIndexWrapper:
"""Asynchronously create a `VectorStore` index from a list of loaders.
Args:
loaders: A list of `BaseLoader` instances to load documents.
Returns:
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
"""
docs = []
for loader in loaders:
docs.extend([doc async for doc in loader.alazy_load()])
return await self.afrom_documents(docs)
def from_documents(self, documents: list[Document]) -> VectorStoreIndexWrapper:
"""Create a `VectorStore` index from a list of documents.
Args:
documents: A list of `Document` objects.
Returns:
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
"""
sub_docs = self.text_splitter.split_documents(documents)
vectorstore = self.vectorstore_cls.from_documents(
sub_docs,
self.embedding,
**self.vectorstore_kwargs,
)
return VectorStoreIndexWrapper(vectorstore=vectorstore)
async def afrom_documents(
self,
documents: list[Document],
) -> VectorStoreIndexWrapper:
"""Asynchronously create a `VectorStore` index from a list of documents.
Args:
documents: A list of `Document` objects.
Returns:
A `VectorStoreIndexWrapper` containing the constructed vectorstore.
"""
sub_docs = self.text_splitter.split_documents(documents)
vectorstore = await self.vectorstore_cls.afrom_documents(
sub_docs,
self.embedding,
**self.vectorstore_kwargs,
)
return VectorStoreIndexWrapper(vectorstore=vectorstore)
| VectorstoreIndexCreator |
python | python-openxml__python-docx | tests/image/test_jpeg.py | {
"start": 16221,
"end": 17864
} | class ____:
def it_can_construct_from_a_stream(self, stream_, _MarkerFinder__init_):
marker_finder = _MarkerFinder.from_stream(stream_)
_MarkerFinder__init_.assert_called_once_with(ANY, stream_)
assert isinstance(marker_finder, _MarkerFinder)
def it_can_find_the_next_marker_after_a_given_offset(self, next_fixture):
marker_finder, start, expected_code_and_offset = next_fixture
marker_code, segment_offset = marker_finder.next(start)
assert (marker_code, segment_offset) == expected_code_and_offset
# fixtures -------------------------------------------------------
@pytest.fixture
def _MarkerFinder__init_(self, request):
return initializer_mock(request, _MarkerFinder)
@pytest.fixture(
params=[
(0, JPEG_MARKER_CODE.SOI, 2),
(1, JPEG_MARKER_CODE.APP0, 4),
(2, JPEG_MARKER_CODE.APP0, 4),
(3, JPEG_MARKER_CODE.EOI, 12),
(4, JPEG_MARKER_CODE.EOI, 12),
(6, JPEG_MARKER_CODE.EOI, 12),
(8, JPEG_MARKER_CODE.EOI, 12),
]
)
def next_fixture(self, request):
start, marker_code, segment_offset = request.param
bytes_ = b"\xff\xd8\xff\xe0\x00\x01\xff\x00\xff\xff\xff\xd9"
stream_reader = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
marker_finder = _MarkerFinder(stream_reader)
expected_code_and_offset = (marker_code, segment_offset)
return marker_finder, start, expected_code_and_offset
@pytest.fixture
def stream_(self, request):
return instance_mock(request, io.BytesIO)
| Describe_MarkerFinder |
python | pypa__warehouse | warehouse/macaroons/caveats/__init__.py | {
"start": 2756,
"end": 3356
} | class ____(Caveat):
user_id: StrictStr
def verify(self, request: Request, context: Any, permission: str) -> Result:
if not isinstance(request.identity, UserContext):
return Failure("token with user restriction without a user")
if request.identity.macaroon is None:
return Failure("token with user restriction without a macaroon")
if str(request.identity.user.id) != self.user_id:
return Failure("current user does not match user restriction in token")
return Success()
@as_caveat(tag=4)
@dataclass(frozen=True)
| RequestUser |
python | huggingface__transformers | src/transformers/models/lfm2_vl/image_processing_lfm2_vl_fast.py | {
"start": 6053,
"end": 6578
} | class ____(ImagesKwargs, total=False):
"""
downsample_factor (`int`, *optional*, defaults to `2`):
The downsampling factor for images used when resizing the image.
"""
downsample_factor: int
do_image_splitting: bool
min_tiles: int
max_tiles: int
use_thumbnail: bool
min_image_tokens: int
max_image_tokens: int
encoder_patch_size: int
tile_size: int
max_pixels_tolerance: float
do_pad: bool
return_row_col_info: bool
@auto_docstring
| Lfm2VlImageProcessorKwargs |
python | xlwings__xlwings | xlwings/pro/utils.py | {
"start": 871,
"end": 7439
} | class ____:
@staticmethod
def get_cipher():
try:
return Fernet(os.getenv("XLWINGS_LICENSE_KEY_SECRET"))
except (TypeError, ValueError):
raise xlwings.LicenseError(
"Couldn't validate xlwings license key."
) from None
@staticmethod
def get_license():
# Env Var - also used if LICENSE_KEY is in config sheet and called via UDF
if os.getenv("XLWINGS_LICENSE_KEY"):
return os.environ["XLWINGS_LICENSE_KEY"]
# Sheet config (only used by RunPython, UDFs use env var)
try:
sheet_license_key = read_config_sheet(xlwings.Book.caller()).get(
"LICENSE_KEY"
)
if sheet_license_key:
return sheet_license_key
except: # noqa: E722
pass
# User config file
config_file = xlwings.USER_CONFIG_FILE
if os.path.exists(config_file):
with open(config_file, "r") as f:
config = f.readlines()
key = None
for line in config:
if line.split(",")[0] == '"LICENSE_KEY"':
key = line.split(",")[1].strip()[1:-1]
if key:
return key
raise xlwings.LicenseError("Couldn't find an xlwings license key.")
@staticmethod
@lru_cache()
def validate_license(product, license_type=None):
key = LicenseHandler.get_license()
if key == "noncommercial":
return {"license_type": "noncommercial"}
if key.startswith("gA") and not Fernet:
# Legacy up to 0.27.12
raise ImportError(
"You are using a legacy xlwings license key that requires the "
"'cryptography' package. Either install it via 'pip install "
"cryptography' or contact us for a new license key that doesn't depend "
"on cryptography."
) from None
elif key.startswith("gA"):
cipher_suite = LicenseHandler.get_cipher()
try:
license_info = json.loads(cipher_suite.decrypt(key.encode()).decode())
except (binascii.Error, InvalidToken):
raise xlwings.LicenseError("Invalid xlwings license key.") from None
else:
signature = hmac.new(
os.getenv("XLWINGS_LICENSE_KEY_SECRET").encode(),
key[:-5].encode(),
hashlib.sha256,
).hexdigest()
if signature[:5] != key[-5:]:
raise xlwings.LicenseError("Invalid xlwings license key.") from None
else:
try:
license_info = json.loads(
base64.urlsafe_b64decode(key[:-5]).decode()
)
except: # noqa: E722
raise xlwings.LicenseError("Invalid xlwings license key.") from None
try:
if (
license_type == "developer"
and license_info["license_type"] != "developer"
):
raise xlwings.LicenseError(
"You need a paid xlwings license key for this action."
)
except KeyError:
raise xlwings.LicenseError(
"You need a paid xlwings license key for this action."
) from None
if (
"valid_until" not in license_info.keys()
or "products" not in license_info.keys()
):
raise xlwings.LicenseError("Invalid xlwings license key format.") from None
license_valid_until = dt.datetime.strptime(
license_info["valid_until"], "%Y-%m-%d"
).date()
if dt.date.today() > license_valid_until:
raise xlwings.LicenseError(
"Your xlwings license expired on {}.".format(
license_valid_until.strftime("%Y-%m-%d")
)
) from None
if product not in license_info["products"]:
raise xlwings.LicenseError(
f"Your xlwings license key isn't valid for the '{product}' "
"functionality."
) from None
if "version" in license_info.keys() and VersionNumber(
license_info["version"]
) < VersionNumber(xlwings.__version__):
raise xlwings.LicenseError(
f"Your deploy key is only valid for <=v{license_info['version']}. "
f"You're using v{xlwings.__version__}."
) from None
if (license_valid_until - dt.date.today()) < dt.timedelta(days=30):
warnings.warn(
f"Your xlwings license key expires in "
f"{(license_valid_until - dt.date.today()).days} days."
)
return license_info
@staticmethod
def create_deploy_key():
license_info = LicenseHandler.validate_license("pro", license_type="developer")
if license_info["license_type"] == "noncommercial":
return "noncommercial"
license_dict = json.dumps(
{
"version": xlwings.__version__,
"products": license_info["products"],
"valid_until": "2999-12-31",
"license_type": "deploy_key",
}
).encode()
if LicenseHandler.get_license().startswith("gA"):
# Legacy
cipher_suite = LicenseHandler.get_cipher()
return cipher_suite.encrypt(license_dict).decode()
else:
body = base64.urlsafe_b64encode(license_dict)
signature = hmac.new(
os.getenv("XLWINGS_LICENSE_KEY_SECRET").encode(), body, hashlib.sha256
).hexdigest()
return f"{body.decode()}{signature[:5]}"
@lru_cache()
def get_embedded_code_temp_dir():
tmp_base_path = os.path.join(tempfile.gettempdir(), "xlwings")
os.makedirs(tmp_base_path, exist_ok=True)
try:
# HACK: Clean up directories that are older than 30 days
# This should be done in the C++ part when the Python process is killed
for subdir in glob.glob(tmp_base_path + "/*/"):
if os.path.getmtime(subdir) < time.time() - 30 * 86400:
shutil.rmtree(subdir, ignore_errors=True)
except Exception:
pass # we don't care if it fails
tempdir = tempfile.mkdtemp(dir=tmp_base_path)
# This only works for RunPython calls running outside the COM server
atexit.register(shutil.rmtree, tempdir)
return tempdir
| LicenseHandler |
python | wandb__wandb | wandb/apis/public/artifacts.py | {
"start": 30409,
"end": 33997
} | class ____(SizedRelayPaginator["FileFragment", "File"]):
"""A paginator for files in an artifact.
<!-- lazydoc-ignore-init: internal -->
"""
QUERY: Document # Must be set per-instance
last_response: ArtifactFileConnection | None
def __init__(
self,
client: Client,
artifact: Artifact,
names: Sequence[str] | None = None,
per_page: int = 50,
):
from wandb.sdk.artifacts._generated import (
ARTIFACT_COLLECTION_MEMBERSHIP_FILES_GQL,
ARTIFACT_VERSION_FILES_GQL,
)
from wandb.sdk.artifacts._gqlutils import server_supports
self.query_via_membership = server_supports(
client, pb.ARTIFACT_COLLECTION_MEMBERSHIP_FILES
)
self.artifact = artifact
if self.query_via_membership:
query_str = ARTIFACT_COLLECTION_MEMBERSHIP_FILES_GQL
variables = {
"entity": artifact.entity,
"project": artifact.project,
"collection": artifact.name.split(":")[0],
"alias": artifact.version,
"fileNames": names,
}
else:
query_str = ARTIFACT_VERSION_FILES_GQL
variables = {
"entity": artifact.source_entity,
"project": artifact.source_project,
"name": artifact.source_name,
"artifactType": artifact.type,
"fileNames": names,
}
omit_fields = set()
# The server must advertise at least SDK 0.12.21
# to get storagePath
if not client.version_supported("0.12.21"):
omit_fields.add("storagePath")
if not server_supports(client, pb.TOTAL_COUNT_IN_FILE_CONNECTION):
omit_fields.add("totalCount")
self.QUERY = gql_compat(query_str, omit_fields=omit_fields)
super().__init__(client, variables=variables, per_page=per_page)
@override
def _update_response(self) -> None:
from wandb.sdk.artifacts._generated import (
ArtifactCollectionMembershipFiles,
ArtifactVersionFiles,
)
from wandb.sdk.artifacts._models.pagination import ArtifactFileConnection
data = self.client.execute(self.QUERY, variable_values=self.variables)
# Extract the inner `*Connection` result for faster/easier access.
if self.query_via_membership:
result = ArtifactCollectionMembershipFiles.model_validate(data)
conn = result.project.artifact_collection.artifact_membership.files
else:
result = ArtifactVersionFiles.model_validate(data)
conn = result.project.artifact_type.artifact.files
if conn is None:
raise ValueError(f"Unable to parse {nameof(type(self))!r} response data")
self.last_response = ArtifactFileConnection.model_validate(conn)
@property
def path(self) -> list[str]:
"""Returns the path of the artifact."""
return [self.artifact.entity, self.artifact.project, self.artifact.name]
def _convert(self, node: FileFragment) -> File:
return File(self.client, attrs=node.model_dump(exclude_unset=True))
def __repr__(self) -> str:
path_str = "/".join(self.path)
try:
total = len(self)
except NotImplementedError:
# Older server versions don't correctly support totalCount
return f"<ArtifactFiles {path_str}>"
else:
return f"<ArtifactFiles {path_str} ({total})>"
| ArtifactFiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.