language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/greentest/3.14/test_urllib2.py | {
"start": 14231,
"end": 17172
} | class ____:
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2:
name, action = spec
else:
name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib.error.URLError("blah")
assert False
def close(self):
pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler):
pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
| MockHandler |
python | ray-project__ray | rllib/examples/_old_api_stack/models/autoregressive_action_dist.py | {
"start": 2632,
"end": 4994
} | class ____(TorchDistributionWrapper):
"""Action distribution P(a1, a2) = P(a1) * P(a2 | a1)"""
def deterministic_sample(self):
# First, sample a1.
a1_dist = self._a1_distribution()
a1 = a1_dist.deterministic_sample()
# Sample a2 conditioned on a1.
a2_dist = self._a2_distribution(a1)
a2 = a2_dist.deterministic_sample()
self._action_logp = a1_dist.logp(a1) + a2_dist.logp(a2)
# Return the action tuple.
return (a1, a2)
def sample(self):
# First, sample a1.
a1_dist = self._a1_distribution()
a1 = a1_dist.sample()
# Sample a2 conditioned on a1.
a2_dist = self._a2_distribution(a1)
a2 = a2_dist.sample()
self._action_logp = a1_dist.logp(a1) + a2_dist.logp(a2)
# Return the action tuple.
return (a1, a2)
def logp(self, actions):
a1, a2 = actions[:, 0], actions[:, 1]
a1_vec = torch.unsqueeze(a1.float(), 1)
a1_logits, a2_logits = self.model.action_module(self.inputs, a1_vec)
return TorchCategorical(a1_logits).logp(a1) + TorchCategorical(a2_logits).logp(
a2
)
def sampled_action_logp(self):
return self._action_logp
def entropy(self):
a1_dist = self._a1_distribution()
a2_dist = self._a2_distribution(a1_dist.sample())
return a1_dist.entropy() + a2_dist.entropy()
def kl(self, other):
a1_dist = self._a1_distribution()
a1_terms = a1_dist.kl(other._a1_distribution())
a1 = a1_dist.sample()
a2_terms = self._a2_distribution(a1).kl(other._a2_distribution(a1))
return a1_terms + a2_terms
def _a1_distribution(self):
BATCH = self.inputs.shape[0]
zeros = torch.zeros((BATCH, 1)).to(self.inputs.device)
a1_logits, _ = self.model.action_module(self.inputs, zeros)
a1_dist = TorchCategorical(a1_logits)
return a1_dist
def _a2_distribution(self, a1):
a1_vec = torch.unsqueeze(a1.float(), 1)
_, a2_logits = self.model.action_module(self.inputs, a1_vec)
a2_dist = TorchCategorical(a2_logits)
return a2_dist
@staticmethod
def required_model_output_shape(action_space, model_config):
return 16 # controls model output feature vector size
| TorchBinaryAutoregressiveDistribution |
python | bokeh__bokeh | src/bokeh/models/callbacks.py | {
"start": 6958,
"end": 8108
} | class ____(Callback):
""" Allows to update a property of an object. """
# explicit __init__ to support Init signatures
def __init__(self, obj: Init[HasProps] = Intrinsic, attr: Init[str] = Intrinsic, value: Init[Any] = Intrinsic, **kwargs: Any) -> None:
super().__init__(obj=obj, attr=attr, value=value, **kwargs)
obj: HasProps = Required(Instance(HasProps), help="""
Object to set the value on.
""")
attr: str = Required(String, help="""
The property to modify.
""")
value = Required(AnyRef, help="""
The value to set.
""")
@error(NOT_A_PROPERTY_OF)
def _check_if_an_attribute_is_a_property_of_a_model(self):
if self.obj.lookup(self.attr, raises=False):
return None
else:
return f"{self.attr} is not a property of {self.obj}"
@error(INVALID_PROPERTY_VALUE)
def _check_if_provided_a_valid_value(self):
descriptor = self.obj.lookup(self.attr)
if descriptor.property.is_valid(self.value):
return None
else:
return f"{self.value!r} is not a valid value for {self.obj}.{self.attr}"
| SetValue |
python | pypa__pip | src/pip/_vendor/urllib3/packages/six.py | {
"start": 17738,
"end": 18519
} | class ____(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(
Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response",
"moves.urllib.response",
)
| Module_six_moves_urllib_response |
python | pytorch__pytorch | torch/utils/_sympy/functions.py | {
"start": 20711,
"end": 20921
} | class ____(sympy.Function):
is_integer = True
@classmethod
def eval(cls, base, shift):
if shift < 0:
raise ValueError("negative shift count")
return base * 2**shift
| LShift |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault5.py | {
"start": 227,
"end": 355
} | class ____[T: ClassA = ClassA]:
owner: T
def post_comment[T: ClassA](owner: T) -> ClassB[T]:
return ClassB(owner)
| ClassB |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/properties.py | {
"start": 806,
"end": 1049
} | class ____(Class):
def uses_property(self):
self.tainted = _test_source()
return self.my_property
def uses_property_but_no_tito_taint(self):
self.untainted = _test_source()
return self.my_property
| Derived |
python | ray-project__ray | python/ray/tests/test_autoscaling_policy.py | {
"start": 15875,
"end": 21431
} | class ____(unittest.TestCase):
def setUp(self):
_NODE_PROVIDERS["mock"] = lambda config: self.create_provider
self.provider = None
self.tmpdir = tempfile.mkdtemp()
logging.disable(level=logging.CRITICAL)
# This seems to be the only way of turning the cli logger off. The
# expected methods like `cli_logger.configure` don't work.
def do_nothing(*args, **kwargs):
pass
cli_logger._print = type(cli_logger._print)(do_nothing, type(cli_logger))
def tearDown(self):
self.provider = None
del _NODE_PROVIDERS["mock"]
_clear_provider_cache()
shutil.rmtree(self.tmpdir)
ray.shutdown()
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config):
path = self.tmpdir + "/simple.yaml"
with open(path, "w") as f:
f.write(yaml.dump(config))
return path
def testManyTasks(self):
config = copy.deepcopy(SAMPLE_CLUSTER_CONFIG)
config_path = self.write_config(config)
self.provider = MockProvider()
simulator = Simulator(config_path, self.provider)
done_count = 0
def done_callback():
nonlocal done_count
done_count += 1
tasks = [
Task(duration=200, resources={"CPU": 1}, done_callback=done_callback)
for _ in range(5000)
]
simulator.submit(tasks)
time = 0
while done_count < len(tasks):
time = simulator.step()
assert time < 850
# TODO (Alex): Not clear what's actually worth asserting here.
assert simulator.node_costs()
# Check event logs contain add/remove node events.
assert any(
"Adding" in x for x in simulator.autoscaler.event_summarizer.summary()
)
assert any(
"Removing" in x for x in simulator.autoscaler.event_summarizer.summary()
)
def testManyActors(self):
config = copy.deepcopy(SAMPLE_CLUSTER_CONFIG)
config_path = self.write_config(config)
self.provider = MockProvider()
simulator = Simulator(config_path, self.provider)
start_count = 0
def start_callback():
nonlocal start_count
start_count += 1
tasks = [
Actor(
duration=float("inf"),
resources={"CPU": 1},
start_callback=start_callback,
)
for _ in range(5000)
]
simulator.submit(tasks)
time = 0
while start_count < len(tasks):
time = simulator.step()
assert time < 650
# Check event logs contain add/remove node events.
assert any(
"Adding" in x for x in simulator.autoscaler.event_summarizer.summary()
)
assert any(
"Removing" in x for x in simulator.autoscaler.event_summarizer.summary()
)
def testManyPlacementGroups(self):
config = copy.deepcopy(SAMPLE_CLUSTER_CONFIG)
config_path = self.write_config(config)
self.provider = MockProvider()
simulator = Simulator(config_path, self.provider)
start_count = 0
def start_callback():
nonlocal start_count
start_count += 1
placement_group_requests = []
for _ in range(500):
placement_group_requests.append(
PlacementGroup(
duration=float("inf"),
bundles=[{"CPU": 1}, {"CPU": 2}],
strategy=PlacementStrategy.STRICT_PACK,
start_callback=start_callback,
)
)
for _ in range(500):
placement_group_requests.append(
PlacementGroup(
duration=float("inf"),
bundles=[{"CPU": 1}, {"CPU": 2}],
strategy=PlacementStrategy.STRICT_SPREAD,
start_callback=start_callback,
)
)
# SPREAD and PACK tests fail, but under the real GCS placement group
# scheduling algorithm we could also be left in a situation in which
# the autoscaler thinks the placement group is placeable, but the
# placement group scheduler doesn't know how to schedule it.
# for _ in range(500):
# placement_group_requests.append(PlacementGroup(
# duration=float("inf"), bundles=[{"CPU": 1}, {"CPU": 2}],
# strategy=PlacementStrategy.PACK,
# start_callback=start_callback))
# for _ in range(500):
# placement_group_requests.append(PlacementGroup(
# duration=float("inf"),
# bundles=[{"CPU": 2}, {"CPU": 1}],
# strategy=PlacementStrategy.SPREAD,
# start_callback=start_callback))
simulator.submit(placement_group_requests)
time = 0
while start_count < len(placement_group_requests):
time = simulator.step()
assert time < 630
# Check event logs contain add/remove node events.
assert any(
"Adding" in x for x in simulator.autoscaler.event_summarizer.summary()
)
assert any(
"Removing" in x for x in simulator.autoscaler.event_summarizer.summary()
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| AutoscalingPolicyTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/GraphicsView.py | {
"start": 394,
"end": 15472
} | class ____(QtWidgets.QGraphicsView):
"""Re-implementation of QGraphicsView that removes scrollbars and allows unambiguous control of the
viewed coordinate range. Also automatically creates a GraphicsScene and a central QGraphicsWidget
that is automatically scaled to the full view geometry.
This widget is the basis for :class:`PlotWidget <pyqtgraph.PlotWidget>`,
:class:`GraphicsLayoutWidget <pyqtgraph.GraphicsLayoutWidget>`, and the view widget in
:class:`ImageView <pyqtgraph.ImageView>`.
By default, the view coordinate system matches the widget's pixel coordinates and
automatically updates when the view is resized. This can be overridden by setting
autoPixelRange=False. The exact visible range can be set with setRange().
The view can be panned using the middle mouse button and scaled using the right mouse button if
enabled via enableMouse() (but ordinarily, we use ViewBox for this functionality)."""
sigDeviceRangeChanged = QtCore.Signal(object, object)
sigDeviceTransformChanged = QtCore.Signal(object)
sigMouseReleased = QtCore.Signal(object)
sigSceneMouseMoved = QtCore.Signal(object)
#sigRegionChanged = QtCore.Signal(object)
sigScaleChanged = QtCore.Signal(object)
lastFileDir = None
def __init__(self, parent=None, useOpenGL=None, background='default'):
"""
============== ============================================================
**Arguments:**
parent Optional parent widget
useOpenGL If True, the GraphicsView will use OpenGL to do all of its
rendering. This can improve performance on some systems,
but may also introduce bugs (the combination of
QGraphicsView and QOpenGLWidget is still an 'experimental'
feature of Qt)
background Set the background color of the GraphicsView. Accepts any
single argument accepted by
:func:`mkColor <pyqtgraph.mkColor>`. By
default, the background color is determined using the
'backgroundColor' configuration option (see
:func:`setConfigOptions <pyqtgraph.setConfigOptions>`).
============== ============================================================
"""
self.closed = False
QtWidgets.QGraphicsView.__init__(self, parent)
# This connects a cleanup function to QApplication.aboutToQuit. It is
# called from here because we have no good way to react when the
# QApplication is created by the user.
# See pyqtgraph.__init__.py
from .. import _connectCleanup
_connectCleanup()
if useOpenGL is None:
useOpenGL = getConfigOption('useOpenGL')
self.useOpenGL(useOpenGL)
self.setCacheMode(self.CacheModeFlag.CacheBackground)
## This might help, but it's probably dangerous in the general case..
#self.setOptimizationFlag(self.DontSavePainterState, True)
self.setBackgroundRole(QtGui.QPalette.ColorRole.NoRole)
self.setBackground(background)
self.setFocusPolicy(QtCore.Qt.FocusPolicy.StrongFocus)
self.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
self.setTransformationAnchor(QtWidgets.QGraphicsView.ViewportAnchor.NoAnchor)
self.setResizeAnchor(QtWidgets.QGraphicsView.ViewportAnchor.AnchorViewCenter)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.ViewportUpdateMode.MinimalViewportUpdate)
self.lockedViewports = []
self.lastMousePos = None
self.setMouseTracking(True)
self.aspectLocked = False
self.range = QtCore.QRectF(0, 0, 1, 1)
self.autoPixelRange = True
self.currentItem = None
self.clearMouse()
self.updateMatrix()
# GraphicsScene must have parent or expect crashes!
self.sceneObj = GraphicsScene(parent=self)
self.setScene(self.sceneObj)
## by default we set up a central widget with a grid layout.
## this can be replaced if needed.
self.centralWidget = None
self.setCentralItem(QtWidgets.QGraphicsWidget())
self.centralLayout = QtWidgets.QGraphicsGridLayout()
self.centralWidget.setLayout(self.centralLayout)
self.mouseEnabled = False
self.scaleCenter = False ## should scaling center around view center (True) or mouse click (False)
self.clickAccepted = False
def setAntialiasing(self, aa):
"""Enable or disable default antialiasing.
Note that this will only affect items that do not specify their own antialiasing options."""
if aa:
self.setRenderHints(self.renderHints() | QtGui.QPainter.RenderHint.Antialiasing)
else:
self.setRenderHints(self.renderHints() & ~QtGui.QPainter.RenderHint.Antialiasing)
def setBackground(self, background):
"""
Set the background color of the GraphicsView.
To use the defaults specified py pyqtgraph.setConfigOption, use background='default'.
To make the background transparent, use background=None.
"""
self._background = background
if background == 'default':
background = getConfigOption('background')
brush = fn.mkBrush(background)
self.setBackgroundBrush(brush)
def paintEvent(self, ev):
self.scene().prepareForPaint()
return super().paintEvent(ev)
def render(self, *args, **kwds):
self.scene().prepareForPaint()
return super().render(*args, **kwds)
def close(self):
self.centralWidget = None
self.scene().clear()
self.currentItem = None
self.sceneObj = None
self.closed = True
self.setViewport(None)
super(GraphicsView, self).close()
def useOpenGL(self, b=True):
old_vp = self.viewport()
new_vp = None
if b:
if not isinstance(old_vp, OpenGLHelpers.GraphicsViewGLWidget):
new_vp = OpenGLHelpers.GraphicsViewGLWidget()
else:
if not type(old_vp) is QtWidgets.QWidget:
new_vp = QtWidgets.QWidget()
if new_vp is not None:
self.setViewport(new_vp)
def keyPressEvent(self, ev):
self.scene().keyPressEvent(ev) ## bypass view, hand event directly to scene
## (view likes to eat arrow key events)
def setCentralItem(self, item):
return self.setCentralWidget(item)
def setCentralWidget(self, item):
"""Sets a QGraphicsWidget to automatically fill the entire view (the item will be automatically
resize whenever the GraphicsView is resized)."""
if self.centralWidget is not None:
self.scene().removeItem(self.centralWidget)
self.centralWidget = item
if item is not None:
self.sceneObj.addItem(item)
self.resizeEvent(None)
def addItem(self, *args):
return self.scene().addItem(*args)
def removeItem(self, *args):
return self.scene().removeItem(*args)
def enableMouse(self, b=True):
self.mouseEnabled = b
self.autoPixelRange = (not b)
def clearMouse(self):
self.mouseTrail = []
self.lastButtonReleased = None
def resizeEvent(self, ev):
if self.closed:
return
if self.autoPixelRange:
self.range = QtCore.QRectF(0, 0, self.size().width(), self.size().height())
GraphicsView.setRange(self, self.range, padding=0, disableAutoPixel=False) ## we do this because some subclasses like to redefine setRange in an incompatible way.
self.updateMatrix()
def updateMatrix(self, propagate=True):
self.setSceneRect(self.range)
if self.autoPixelRange:
self.resetTransform()
else:
if self.aspectLocked:
self.fitInView(self.range, QtCore.Qt.AspectRatioMode.KeepAspectRatio)
else:
self.fitInView(self.range, QtCore.Qt.AspectRatioMode.IgnoreAspectRatio)
if propagate:
for v in self.lockedViewports:
v.setXRange(self.range, padding=0)
self.sigDeviceRangeChanged.emit(self, self.range)
self.sigDeviceTransformChanged.emit(self)
def viewRect(self):
"""Return the boundaries of the view in scene coordinates"""
## easier to just return self.range ?
r = QtCore.QRectF(self.rect())
return self.viewportTransform().inverted()[0].mapRect(r)
def visibleRange(self):
## for backward compatibility
return self.viewRect()
def translate(self, dx, dy):
self.range.adjust(dx, dy, dx, dy)
self.updateMatrix()
def scale(self, sx, sy, center=None):
scale = [sx, sy]
if self.aspectLocked:
scale[0] = scale[1]
if self.scaleCenter:
center = None
if center is None:
center = self.range.center()
w = self.range.width() / scale[0]
h = self.range.height() / scale[1]
self.range = QtCore.QRectF(center.x() - (center.x()-self.range.left()) / scale[0], center.y() - (center.y()-self.range.top()) /scale[1], w, h)
self.updateMatrix()
self.sigScaleChanged.emit(self)
def setRange(self, newRect=None, padding=0.05, lockAspect=None, propagate=True, disableAutoPixel=True):
if disableAutoPixel:
self.autoPixelRange=False
if newRect is None:
newRect = self.visibleRange()
padding = 0
padding = Point(padding)
newRect = QtCore.QRectF(newRect)
pw = newRect.width() * padding[0]
ph = newRect.height() * padding[1]
newRect = newRect.adjusted(-pw, -ph, pw, ph)
scaleChanged = False
if self.range.width() != newRect.width() or self.range.height() != newRect.height():
scaleChanged = True
self.range = newRect
#print "New Range:", self.range
if self.centralWidget is not None:
self.centralWidget.setGeometry(self.range)
self.updateMatrix(propagate)
if scaleChanged:
self.sigScaleChanged.emit(self)
def scaleToImage(self, image):
"""Scales such that pixels in image are the same size as screen pixels. This may result in a significant performance increase."""
pxSize = image.pixelSize()
image.setPxMode(True)
try:
self.sigScaleChanged.disconnect(image.setScaledMode)
except (TypeError, RuntimeError):
pass
tl = image.sceneBoundingRect().topLeft()
w = self.size().width() * pxSize[0]
h = self.size().height() * pxSize[1]
range = QtCore.QRectF(tl.x(), tl.y(), w, h)
GraphicsView.setRange(self, range, padding=0)
self.sigScaleChanged.connect(image.setScaledMode)
def lockXRange(self, v1):
if not v1 in self.lockedViewports:
self.lockedViewports.append(v1)
def setXRange(self, r, padding=0.05):
r1 = QtCore.QRectF(self.range)
r1.setLeft(r.left())
r1.setRight(r.right())
GraphicsView.setRange(self, r1, padding=[padding, 0], propagate=False)
def setYRange(self, r, padding=0.05):
r1 = QtCore.QRectF(self.range)
r1.setTop(r.top())
r1.setBottom(r.bottom())
GraphicsView.setRange(self, r1, padding=[0, padding], propagate=False)
def wheelEvent(self, ev):
super().wheelEvent(ev)
if not self.mouseEnabled:
return
delta = ev.angleDelta().x()
if delta == 0:
delta = ev.angleDelta().y()
sc = 1.001 ** delta
#self.scale *= sc
#self.updateMatrix()
self.scale(sc, sc)
def setAspectLocked(self, s):
self.aspectLocked = s
def leaveEvent(self, ev):
self.scene().leaveEvent(ev) ## inform scene when mouse leaves
def mousePressEvent(self, ev):
super().mousePressEvent(ev)
if not self.mouseEnabled:
return
lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
self.lastMousePos = lpos
self.mousePressPos = lpos
self.clickAccepted = ev.isAccepted()
if not self.clickAccepted:
self.scene().clearSelection()
return ## Everything below disabled for now..
def mouseReleaseEvent(self, ev):
super().mouseReleaseEvent(ev)
if not self.mouseEnabled:
return
self.sigMouseReleased.emit(ev)
self.lastButtonReleased = ev.button()
return ## Everything below disabled for now..
def mouseMoveEvent(self, ev):
lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
if self.lastMousePos is None:
self.lastMousePos = lpos
delta = Point(lpos - self.lastMousePos)
self.lastMousePos = lpos
super().mouseMoveEvent(ev)
if not self.mouseEnabled:
return
self.sigSceneMouseMoved.emit(self.mapToScene(lpos.toPoint()))
if self.clickAccepted: ## Ignore event if an item in the scene has already claimed it.
return
if ev.buttons() == QtCore.Qt.MouseButton.RightButton:
delta = Point(fn.clip_scalar(delta[0], -50, 50), fn.clip_scalar(-delta[1], -50, 50))
scale = 1.01 ** delta
self.scale(scale[0], scale[1], center=self.mapToScene(self.mousePressPos.toPoint()))
self.sigDeviceRangeChanged.emit(self, self.range)
elif ev.buttons() in [QtCore.Qt.MouseButton.MiddleButton, QtCore.Qt.MouseButton.LeftButton]: ## Allow panning by left or mid button.
px = self.pixelSize()
tr = -delta * px
self.translate(tr[0], tr[1])
self.sigDeviceRangeChanged.emit(self, self.range)
def pixelSize(self):
"""Return vector with the length and width of one view pixel in scene coordinates"""
p0 = Point(0,0)
p1 = Point(1,1)
tr = self.transform().inverted()[0]
p01 = tr.map(p0)
p11 = tr.map(p1)
return Point(p11 - p01)
def dragEnterEvent(self, ev):
ev.ignore() ## not sure why, but for some reason this class likes to consume drag events
| GraphicsView |
python | anthropics__anthropic-sdk-python | src/anthropic/types/thinking_block_param.py | {
"start": 218,
"end": 367
} | class ____(TypedDict, total=False):
signature: Required[str]
thinking: Required[str]
type: Required[Literal["thinking"]]
| ThinkingBlockParam |
python | TheAlgorithms__Python | data_structures/binary_tree/non_recursive_segment_tree.py | {
"start": 1131,
"end": 4746
} | class ____[T]:
def __init__(self, arr: list[T], fnc: Callable[[T, T], T]) -> None:
"""
Segment Tree constructor, it works just with commutative combiner.
:param arr: list of elements for the segment tree
:param fnc: commutative function for combine two elements
>>> SegmentTree(['a', 'b', 'c'], lambda a, b: f'{a}{b}').query(0, 2)
'abc'
>>> SegmentTree([(1, 2), (2, 3), (3, 4)],
... lambda a, b: (a[0] + b[0], a[1] + b[1])).query(0, 2)
(6, 9)
"""
any_type: Any | T = None
self.N: int = len(arr)
self.st: list[T] = [any_type for _ in range(self.N)] + arr
self.fn = fnc
self.build()
def build(self) -> None:
for p in range(self.N - 1, 0, -1):
self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
def update(self, p: int, v: T) -> None:
"""
Update an element in log(N) time
:param p: position to be update
:param v: new value
>>> st = SegmentTree([3, 1, 2, 4], min)
>>> st.query(0, 3)
1
>>> st.update(2, -1)
>>> st.query(0, 3)
-1
"""
p += self.N
self.st[p] = v
while p > 1:
p = p // 2
self.st[p] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
def query(self, left: int, right: int) -> T | None:
"""
Get range query value in log(N) time
:param left: left element index
:param right: right element index
:return: element combined in the range [left, right]
>>> st = SegmentTree([1, 2, 3, 4], lambda a, b: a + b)
>>> st.query(0, 2)
6
>>> st.query(1, 2)
5
>>> st.query(0, 3)
10
>>> st.query(2, 3)
7
"""
left, right = left + self.N, right + self.N
res: T | None = None
while left <= right:
if left % 2 == 1:
res = self.st[left] if res is None else self.fn(res, self.st[left])
if right % 2 == 0:
res = self.st[right] if res is None else self.fn(res, self.st[right])
left, right = (left + 1) // 2, (right - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
test_array = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
test_updates = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
min_segment_tree = SegmentTree(test_array, min)
max_segment_tree = SegmentTree(test_array, max)
sum_segment_tree = SegmentTree(test_array, lambda a, b: a + b)
def test_all_segments() -> None:
"""
Test all possible segments
"""
for i in range(len(test_array)):
for j in range(i, len(test_array)):
min_range = reduce(min, test_array[i : j + 1])
max_range = reduce(max, test_array[i : j + 1])
sum_range = reduce(lambda a, b: a + b, test_array[i : j + 1])
assert min_range == min_segment_tree.query(i, j)
assert max_range == max_segment_tree.query(i, j)
assert sum_range == sum_segment_tree.query(i, j)
test_all_segments()
for index, value in test_updates.items():
test_array[index] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| SegmentTree |
python | celery__celery | t/unit/app/test_amqp.py | {
"start": 657,
"end": 1755
} | class ____:
def test_setup_nolimit(self, app):
app.conf.broker_pool_limit = None
try:
delattr(app, '_pool')
except AttributeError:
pass
app.amqp._producer_pool = None
pool = app.amqp.producer_pool
assert pool.limit == app.pool.limit
assert not pool._resource.queue
r1 = pool.acquire()
r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
def test_setup(self, app):
app.conf.broker_pool_limit = 2
try:
delattr(app, '_pool')
except AttributeError:
pass
app.amqp._producer_pool = None
pool = app.amqp.producer_pool
assert pool.limit == app.pool.limit
assert pool._resource.queue
p1 = r1 = pool.acquire()
p2 = r2 = pool.acquire()
r1.release()
r2.release()
r1 = pool.acquire()
r2 = pool.acquire()
assert p2 is r1
assert p1 is r2
r1.release()
r2.release()
| test_ProducerPool |
python | openai__openai-python | src/openai/types/conversations/message.py | {
"start": 1340,
"end": 2033
} | class ____(BaseModel):
id: str
"""The unique ID of the message."""
content: List[Content]
"""The content of the message"""
role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"]
"""The role of the message.
One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`,
`developer`, or `tool`.
"""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
type: Literal["message"]
"""The type of the message. Always set to `message`."""
| Message |
python | ansible__ansible | lib/ansible/plugins/cache/__init__.py | {
"start": 2296,
"end": 10347
} | class ____(BaseCacheModule):
"""
A caching module backed by file based storage.
"""
def __init__(self, *args, **kwargs):
try:
super(BaseFileCacheModule, self).__init__(*args, **kwargs)
self._cache_dir = self._get_cache_connection(self.get_option('_uri'))
self._timeout = float(self.get_option('_timeout'))
except KeyError:
self._cache_dir = self._get_cache_connection(C.CACHE_PLUGIN_CONNECTION)
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self.plugin_name = resource_from_fqcr(self.__module__)
self._cache = {}
self.validate_cache_connection()
self._sanitized = {}
self._files = {}
def _get_cache_connection(self, source):
if source:
try:
return os.path.expanduser(os.path.expandvars(source))
except TypeError:
pass
def _sanitize_key(self, key: str) -> str:
"""
Ensures key name is safe to use on the filesystem
"""
if key not in self._sanitized:
for invalid in self._PATH_CHARS:
if invalid in key:
self._sanitized[key] = hashlib.sha256(key.encode()).hexdigest()[:max(len(key), 12)]
break
else:
self._sanitized[key] = key
return self._sanitized[key]
def validate_cache_connection(self):
if not self._cache_dir:
raise AnsibleError(f"'{self.plugin_name!r}' cache plugin requires the 'fact_caching_connection' configuration option "
"to be set (to a writeable directory path)")
if not os.path.exists(self._cache_dir):
try:
os.makedirs(self._cache_dir)
except OSError as ex:
raise AnsibleError(f"Error in {self.plugin_name!r} cache plugin while trying to create cache dir {self._cache_dir!r}.") from ex
else:
for x in (os.R_OK, os.W_OK, os.X_OK):
if not os.access(self._cache_dir, x):
raise AnsibleError(f"'{self.plugin_name!r}' cache, configured path ({self._cache_dir}) does not have necessary permissions (rwx),"
" disabling plugin")
def _get_cache_file_name(self, key: str) -> str:
if key not in self._files:
safe = self._sanitize_key(key) # use key or filesystem safe hash of key
prefix = self.get_option('_prefix')
if not prefix:
prefix = ''
self._files[key] = os.path.join(self._cache_dir, prefix + safe)
return self._files[key]
def get(self, key):
""" This checks the in memory cache first as the fact was not expired at 'gather time'
and it would be problematic if the key did expire after some long running tasks and
user gets 'undefined' error in the same play """
if key not in self._cache:
if self.has_expired(key) or key == "":
raise KeyError
cachefile = self._get_cache_file_name(key)
try:
value = self._load(cachefile)
self._cache[key] = value
except ValueError as e:
display.warning("error in '%s' cache plugin while trying to read %s : %s. "
"Most likely a corrupt file, so erasing and failing." % (self.plugin_name, cachefile, to_bytes(e)))
self.delete(key)
raise AnsibleError("The cache file %s was corrupt, or did not otherwise contain valid data. "
"It has been removed, so you can re-run your command now." % cachefile)
except FileNotFoundError:
raise KeyError
except Exception as ex:
raise AnsibleError(f"Error while accessing the cache file {cachefile!r}.") from ex
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
cachefile = self._get_cache_file_name(key)
tmpfile_handle, tmpfile_path = tempfile.mkstemp(dir=self._cache_dir)
try:
try:
self._dump(value, tmpfile_path)
except OSError as ex:
display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to write to {tmpfile_path!r}.", exception=ex)
try:
os.close(tmpfile_handle) # os.rename fails if handle is still open in WSL
os.rename(tmpfile_path, cachefile)
os.chmod(cachefile, mode=S_IRWU_RG_RO)
except OSError as ex:
display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to move {tmpfile_path!r} to {cachefile!r}.", exception=ex)
finally:
try:
os.unlink(tmpfile_path)
except OSError:
pass
def has_expired(self, key):
if self._timeout == 0:
return False
cachefile = self._get_cache_file_name(key)
try:
st = os.stat(cachefile)
except FileNotFoundError:
return False
except OSError as ex:
display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to stat {cachefile!r}.", exception=ex)
return False
if time.time() - st.st_mtime <= self._timeout:
return False
if key in self._cache:
del self._cache[key]
return True
def keys(self):
# When using a prefix we must remove it from the key name before
# checking the expiry and returning it to the caller. Keys that do not
# share the same prefix cannot be fetched from the cache.
prefix = self.get_option('_prefix')
prefix_length = len(prefix)
keys = []
for k in os.listdir(self._cache_dir):
if k.startswith('.') or not k.startswith(prefix):
continue
k = k[prefix_length:]
if not self.has_expired(k):
keys.append(k)
return keys
def contains(self, key):
cachefile = self._get_cache_file_name(key)
if key in self._cache:
return True
if self.has_expired(key):
return False
try:
os.stat(cachefile)
return True
except FileNotFoundError:
return False
except OSError as ex:
display.error_as_warning(f"Error in {self.plugin_name!r} cache plugin while trying to stat {cachefile!r}.", exception=ex)
def delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
os.remove(self._get_cache_file_name(key))
except OSError:
pass # TODO: only pass on non existing?
def flush(self):
self._cache = {}
for key in self.keys():
self.delete(key)
@abstractmethod
def _load(self, filepath: str) -> object:
"""
Read data from a filepath and return it as a value
:arg filepath: The filepath to read from.
:returns: The value stored in the filepath
This method reads from the file on disk and takes care of any parsing
and transformation of the data before returning it. The value
returned should be what Ansible would expect if it were uncached data.
.. note:: Filehandles have advantages but calling code doesn't know
whether this file is text or binary, should be decoded, or accessed via
a library function. Therefore the API uses a filepath and opens
the file inside of the method.
"""
pass
@abstractmethod
def _dump(self, value: object, filepath: str) -> None:
"""
Write data to a filepath
:arg value: The value to store
:arg filepath: The filepath to store it at
"""
pass
| BaseFileCacheModule |
python | python-openxml__python-docx | tests/image/test_tiff.py | {
"start": 574,
"end": 2052
} | class ____:
def it_can_construct_from_a_tiff_stream(self, stream_, _TiffParser_, tiff_parser_, Tiff__init_):
px_width, px_height = 111, 222
horz_dpi, vert_dpi = 333, 444
tiff_parser_.px_width = px_width
tiff_parser_.px_height = px_height
tiff_parser_.horz_dpi = horz_dpi
tiff_parser_.vert_dpi = vert_dpi
tiff = Tiff.from_stream(stream_)
_TiffParser_.parse.assert_called_once_with(stream_)
Tiff__init_.assert_called_once_with(ANY, px_width, px_height, horz_dpi, vert_dpi)
assert isinstance(tiff, Tiff)
def it_knows_its_content_type(self):
tiff = Tiff(None, None, None, None)
assert tiff.content_type == MIME_TYPE.TIFF
def it_knows_its_default_ext(self):
tiff = Tiff(None, None, None, None)
assert tiff.default_ext == "tiff"
# fixtures -------------------------------------------------------
@pytest.fixture
def Tiff__init_(self, request):
return initializer_mock(request, Tiff)
@pytest.fixture
def _TiffParser_(self, request, tiff_parser_):
_TiffParser_ = class_mock(request, "docx.image.tiff._TiffParser")
_TiffParser_.parse.return_value = tiff_parser_
return _TiffParser_
@pytest.fixture
def tiff_parser_(self, request):
return instance_mock(request, _TiffParser)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, io.BytesIO)
| DescribeTiff |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 608910,
"end": 609809
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "organizations")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
organizations = sgqlc.types.Field(
OrganizationConnection,
graphql_name="organizations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
| RevokeEnterpriseOrganizationsMigratorRolePayload |
python | kamyu104__LeetCode-Solutions | Python/select-k-disjoint-special-substrings.py | {
"start": 64,
"end": 1358
} | class ____(object):
def maxSubstringLength(self, s, k):
"""
:type s: str
:type k: int
:rtype: bool
"""
def erase_overlap_intervals(intervals):
intervals.sort(key=lambda interval: interval[1])
result, right = 0, float("-inf")
for l, r in intervals:
if l <= right:
result += 1
else:
right = r
return result
cnt = [0]*26
lookup1, lookup2 = [-1]*26, [-1]*26
for i, c in enumerate(s):
cnt[ord(c)-ord('a')] += 1
if lookup1[ord(c)-ord('a')] == -1:
lookup1[ord(c)-ord('a')] = i
lookup2[ord(c)-ord('a')] = i
intervals = []
for i in lookup1:
if i == -1:
continue
for j in lookup2:
if j == -1 or i > j:
continue
total = sum(cnt[c] for c in xrange(len(cnt)) if i <= lookup1[c] <= lookup2[c] <= j)
if total == j-i+1 and total < len(s):
intervals.append((i, j))
return len(intervals)-erase_overlap_intervals(intervals) >= k
# Time: O(26 * n + 26 * log(26))
# Space: O(26)
# hash table, sort, greedy
| Solution |
python | pytorch__pytorch | torch/utils/data/datapipes/dataframe/dataframes.py | {
"start": 1667,
"end": 6102
} | class ____:
# TODO: All operations are shared across entire InitialCapture, need to figure out what if we join two captures
def __init__(self, schema_df=None) -> None:
self.ctx = {"operations": [], "variables": [], "schema_df": schema_df}
def __str__(self) -> str:
return self._ops_str()
def _ops_str(self):
res = ""
# pyrefly: ignore [not-iterable]
for op in self.ctx["operations"]:
if len(res) > 0:
res += "\n"
res += str(op)
return res
def __getstate__(self):
# TODO(VitalyFedyunin): Currently can't pickle (why?)
self.ctx["schema_df"] = None
# pyrefly: ignore [not-iterable]
for var in self.ctx["variables"]:
var.calculated_value = None
state = {}
for item in self.__dict__:
state[item] = getattr(self, item)
return state
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __getattr__(self, attrname):
if attrname == "kwarg" or attrname == "kwargs":
raise RuntimeError("no kwargs!")
if attrname == "__deepcopy__":
raise AttributeError
result = CaptureGetAttr(self, attrname, ctx=self.ctx)
return result
def __getitem__(self, key):
return CaptureGetItem(self, key, ctx=self.ctx)
def __setitem__(self, key, value) -> None:
# pyrefly: ignore [missing-attribute]
self.ctx["operations"].append(CaptureSetItem(self, key, value, ctx=self.ctx))
def __add__(self, add_val):
res = CaptureAdd(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
# pyrefly: ignore [missing-attribute]
self.ctx["operations"].append(
CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
)
return var
def __sub__(self, add_val):
res = CaptureSub(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
# pyrefly: ignore [missing-attribute]
self.ctx["operations"].append(
CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
)
return var
def __mul__(self, add_val):
res = CaptureMul(self, add_val, ctx=self.ctx)
var = CaptureVariable(res, ctx=self.ctx)
t = CaptureVariableAssign(variable=var, value=res, ctx=self.ctx)
# pyrefly: ignore [missing-attribute]
self.ctx["operations"].append(t)
return var
def _is_context_empty(self):
# pyrefly: ignore [bad-argument-type]
return len(self.ctx["operations"]) == 0 and len(self.ctx["variables"]) == 0
def apply_ops_2(self, dataframe) -> None:
# TODO(VitalyFedyunin): Make this calculation thread safe (as currently it updates pointer)
# pyrefly: ignore [unsupported-operation]
self.ctx["variables"][0].calculated_value = dataframe
# pyrefly: ignore [not-iterable]
for op in self.ctx["operations"]:
op.execute()
@property
def columns(self):
self.apply_ops_2(self.ctx["schema_df"])
value = self.execute()
return value.columns
# TODO(VitalyFedyunin): Add tests
# TODO(VitalyFedyunin): Need to join context if one of them are empty because we used capture
def __call__(self, *args, **kwargs):
# TODO: Check if args or kwargs have more than one different context
if self._is_context_empty():
# TODO: Allow CaptureA to take context from mock
for arg in args:
if isinstance(arg, Capture) and not arg._is_context_empty():
self.ctx = arg.ctx
break
if self._is_context_empty():
for k, v in kwargs.items():
if isinstance(k, Capture) and not k._is_context_empty():
self.ctx = k.ctx
break
if isinstance(v, Capture) and not v._is_context_empty():
self.ctx = v.ctx
break
res = CaptureCall(self, ctx=self.ctx, args=args, kwargs=kwargs)
var = CaptureVariable(None, ctx=self.ctx)
t = CaptureVariableAssign(ctx=self.ctx, variable=var, value=res)
# pyrefly: ignore [missing-attribute]
self.ctx["operations"].append(t)
return var
| Capture |
python | pytorch__pytorch | torch/nn/modules/_functions.py | {
"start": 11832,
"end": 12143
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx, *args):
ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
return args
@staticmethod
def backward(ctx, *args):
return args
| BackwardHookFunction |
python | doocs__leetcode | solution/1500-1599/1590.Make Sum Divisible by P/Solution.py | {
"start": 0,
"end": 465
} | class ____:
def minSubarray(self, nums: List[int], p: int) -> int:
k = sum(nums) % p
if k == 0:
return 0
last = {0: -1}
cur = 0
ans = len(nums)
for i, x in enumerate(nums):
cur = (cur + x) % p
target = (cur - k + p) % p
if target in last:
ans = min(ans, i - last[target])
last[cur] = i
return -1 if ans == len(nums) else ans
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_9/projects.py | {
"start": 15048,
"end": 22509
} | class ____(NonStrictDataModel):
"""
:param id: Project id
:type id: str
:param name: Project name
:type name: str
:param description: Project description
:type description: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Creation time
:type created: datetime.datetime
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param default_output_destination: The default output destination URL for new
tasks under this project
:type default_output_destination: str
:param stats: Additional project stats
:type stats: Stats
"""
_schema = {
"properties": {
"company": {"description": "Company id", "type": ["string", "null"]},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"name": {"description": "Project name", "type": ["string", "null"]},
"stats": {
"description": "Additional project stats",
"oneOf": [{"$ref": "#/definitions/stats"}, {"type": "null"}],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
created: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
default_output_destination: Optional[str] = None,
stats: Any = None,
**kwargs: Any
) -> None:
super(ProjectsGetAllResponseSingle, self).__init__(**kwargs)
self.id = id
self.name = name
self.description = description
self.user = user
self.company = company
self.created = created
self.tags = tags
self.system_tags = system_tags
self.default_output_destination = default_output_destination
self.stats = stats
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("description")
def description(self) -> Optional[str]:
return self._property_description
@description.setter
def description(self, value: Optional[str]) -> None:
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("default_output_destination")
def default_output_destination(self) -> Optional[str]:
return self._property_default_output_destination
@default_output_destination.setter
def default_output_destination(self, value: Optional[str]) -> None:
if value is None:
self._property_default_output_destination = None
return
self.assert_isinstance(value, "default_output_destination", six.string_types)
self._property_default_output_destination = value
@schema_property("stats")
def stats(self) -> Any:
return self._property_stats
@stats.setter
def stats(self, value: Any) -> None:
if value is None:
self._property_stats = None
return
if isinstance(value, dict):
value = Stats.from_dict(value)
else:
self.assert_isinstance(value, "stats", Stats)
self._property_stats = value
| ProjectsGetAllResponseSingle |
python | davidhalter__parso | parso/python/tree.py | {
"start": 5404,
"end": 5635
} | class ____(_LeafWithoutNewlines):
__slots__ = ()
type = 'endmarker'
def __repr__(self):
return "<%s: prefix=%s end_pos=%s>" % (
type(self).__name__, repr(self.prefix), self.end_pos
)
| EndMarker |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/commands.py | {
"start": 9095,
"end": 40914
} | class ____(ShellOperation):
"""
A block representing a dbt operation, containing multiple dbt and shell commands.
For long-lasting operations, use the trigger method and utilize the block as a
context manager for automatic closure of processes when context is exited.
If not, manually call the close method to close processes.
For short-lasting operations, use the run method. Context is automatically managed
with this method.
Attributes:
commands: A list of commands to execute sequentially.
stream_output: Whether to stream output.
env: A dictionary of environment variables to set for the shell operation.
working_dir: The working directory context the commands will be executed within.
shell: The shell to use to execute the commands.
extension: The extension to use for the temporary file.
if unset defaults to `.ps1` on Windows and `.sh` on other platforms.
profiles_dir: The directory to search for the profiles.yml file.
Setting this appends the `--profiles-dir` option to the dbt commands
provided. If this is not set, will try using the DBT_PROFILES_DIR
environment variable, but if that's also not
set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error if profiles.yml already
exists under profile_dir and overwrite_profiles is set to False.
Examples:
Load a configured block.
```python
from prefect_dbt import DbtCoreOperation
dbt_op = DbtCoreOperation.load("BLOCK_NAME")
```
Execute short-lasting dbt debug and list with a custom DbtCliProfile.
```python
from prefect_dbt import DbtCoreOperation, DbtCliProfile
from prefect_dbt.cli.configs import SnowflakeTargetConfigs
from prefect_snowflake import SnowflakeConnector
snowflake_connector = await SnowflakeConnector.load("snowflake-connector")
target_configs = SnowflakeTargetConfigs(connector=snowflake_connector)
dbt_cli_profile = DbtCliProfile(
name="jaffle_shop",
target="dev",
target_configs=target_configs,
)
dbt_init = DbtCoreOperation(
commands=["dbt debug", "dbt list"],
dbt_cli_profile=dbt_cli_profile,
overwrite_profiles=True
)
dbt_init.run()
```
Execute a longer-lasting dbt run as a context manager.
```python
with DbtCoreOperation(commands=["dbt run"]) as dbt_run:
dbt_process = dbt_run.trigger()
# do other things
dbt_process.wait_for_completion()
dbt_output = dbt_process.fetch_result()
```
"""
_block_type_name = "dbt Core Operation"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
profiles_dir: Optional[Path] = Field(
default=None,
description=(
"The directory to search for the profiles.yml file. "
"Setting this appends the `--profiles-dir` option to the dbt commands "
"provided. If this is not set, will try using the DBT_PROFILES_DIR "
"environment variable, but if that's also not "
"set, will use the default directory `$HOME/.dbt/`."
),
)
project_dir: Optional[Path] = Field(
default=None,
description=(
"The directory to search for the dbt_project.yml file. "
"Default is the current working directory and its parents."
),
)
overwrite_profiles: bool = Field(
default=False,
description=(
"Whether the existing profiles.yml file under profiles_dir "
"should be overwritten with a new profile."
),
)
dbt_cli_profile: Optional[DbtCliProfile] = Field(
default=None,
description=(
"Profiles class containing the profile written to profiles.yml. "
"Note! This is optional and will raise an error if profiles.yml already "
"exists under profile_dir and overwrite_profiles is set to False."
),
)
def _find_valid_profiles_dir(self) -> PosixPath:
"""
Ensure that there is a profiles.yml available for use.
"""
profiles_dir = self.profiles_dir
if profiles_dir is None:
if self.env.get("DBT_PROFILES_DIR") is not None:
# get DBT_PROFILES_DIR from the user input env
profiles_dir = self.env["DBT_PROFILES_DIR"]
else:
# get DBT_PROFILES_DIR from the system env, or default to ~/.dbt
profiles_dir = os.getenv("DBT_PROFILES_DIR", Path.home() / ".dbt")
profiles_dir = relative_path_to_current_platform(
Path(profiles_dir).expanduser()
)
# https://docs.getdbt.com/dbt-cli/configure-your-profile
# Note that the file always needs to be called profiles.yml,
# regardless of which directory it is in.
profiles_path = profiles_dir / "profiles.yml"
overwrite_profiles = self.overwrite_profiles
dbt_cli_profile = self.dbt_cli_profile
if not profiles_path.exists() or overwrite_profiles:
if dbt_cli_profile is None:
raise ValueError(
"Since overwrite_profiles is True or profiles_path is empty, "
"need `dbt_cli_profile` to write a profile"
)
profile = dbt_cli_profile.get_profile()
profiles_dir.mkdir(exist_ok=True)
with open(profiles_path, "w+") as f:
yaml.dump(profile, f, default_flow_style=False)
elif dbt_cli_profile is not None:
raise ValueError(
f"Since overwrite_profiles is False and profiles_path {profiles_path} "
f"already exists, the profile within dbt_cli_profile couldn't be used; "
f"if the existing profile is satisfactory, do not set dbt_cli_profile"
)
return profiles_dir
def _append_dirs_to_commands(self, profiles_dir) -> List[str]:
"""
Append profiles_dir and project_dir options to dbt commands.
"""
project_dir = self.project_dir
commands = []
for command in self.commands:
command += f' --profiles-dir "{profiles_dir}"'
if project_dir is not None:
project_dir = Path(project_dir).expanduser()
command += f' --project-dir "{project_dir}"'
commands.append(command)
return commands
def _compile_kwargs(self, **open_kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""
Helper method to compile the kwargs for `open_process` so it's not repeated
across the run and trigger methods.
"""
profiles_dir = self._find_valid_profiles_dir()
commands = self._append_dirs_to_commands(profiles_dir=profiles_dir)
# _compile_kwargs is called within trigger() and run(), prior to execution.
# However _compile_kwargs directly uses self.commands, but here we modified
# the commands without saving back to self.commands so we need to create a copy.
# was also thinking of using env vars but DBT_PROJECT_DIR is not supported yet.
modified_self = self.copy()
modified_self.commands = commands
return super(type(self), modified_self)._compile_kwargs(**open_kwargs)
@sync_compatible
@task
async def run_dbt_build(
profiles_dir: Optional[Union[Path, str]] = None,
project_dir: Optional[Union[Path, str]] = None,
overwrite_profiles: bool = False,
dbt_cli_profile: Optional[DbtCliProfile] = None,
create_summary_artifact: bool = False,
summary_artifact_key: str = "dbt-build-task-summary",
extra_command_args: Optional[List[str]] = None,
stream_output: bool = True,
):
"""
Executes the 'dbt build' command within a Prefect task,
and optionally creates a Prefect artifact summarizing the dbt build results.
Args:
profiles_dir: The directory to search for the profiles.yml file. Setting this
appends the `--profiles-dir` option to the command provided.
If this is not set, will try using the DBT_PROFILES_DIR env variable,
but if that's also not set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error
if profiles.yml already exists under profile_dir
and overwrite_profiles is set to False.
create_summary_artifact: If True, creates a Prefect artifact on the task run
with the dbt build results using the specified artifact key.
Defaults to False.
summary_artifact_key: The key under which to store
the dbt build results artifact in Prefect.
Defaults to 'dbt-build-task-summary'.
extra_command_args: Additional command arguments to pass to the dbt build command.
stream_output: If True, the output from the dbt command will be logged in Prefect
as it happens.
Defaults to True.
Example:
```python
from prefect import flow
from prefect_dbt.cli.tasks import dbt_build_task
@flow
def dbt_test_flow():
dbt_build_task(
project_dir="/Users/test/my_dbt_project_dir",
extra_command_args=["--model", "foo_model"]
)
```
Raises:
ValueError: If required dbt_cli_profile is not provided
when needed for profile writing.
RuntimeError: If the dbt build fails for any reason,
it will be indicated by the exception raised.
"""
results = await trigger_dbt_cli_command.fn(
command="build",
profiles_dir=profiles_dir,
project_dir=project_dir,
overwrite_profiles=overwrite_profiles,
dbt_cli_profile=dbt_cli_profile,
create_summary_artifact=create_summary_artifact,
summary_artifact_key=summary_artifact_key,
extra_command_args=extra_command_args,
stream_output=stream_output,
)
return results
@sync_compatible
@task
async def run_dbt_model(
profiles_dir: Optional[Union[Path, str]] = None,
project_dir: Optional[Union[Path, str]] = None,
overwrite_profiles: bool = False,
dbt_cli_profile: Optional[DbtCliProfile] = None,
create_summary_artifact: bool = False,
summary_artifact_key: str = "dbt-run-task-summary",
extra_command_args: Optional[List[str]] = None,
stream_output: bool = True,
):
"""
Executes the 'dbt run' command within a Prefect task,
and optionally creates a Prefect artifact summarizing the dbt model results.
Args:
profiles_dir: The directory to search for the profiles.yml file. Setting this
appends the `--profiles-dir` option to the command provided.
If this is not set, will try using the DBT_PROFILES_DIR env variable,
but if that's also not set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error
if profiles.yml already exists under profile_dir
and overwrite_profiles is set to False.
create_summary_artifact: If True, creates a Prefect artifact on the task run
with the dbt model run results using the specified artifact key.
Defaults to False.
summary_artifact_key: The key under which to store
the dbt model run results artifact in Prefect.
Defaults to 'dbt-run-task-summary'.
extra_command_args: Additional command arguments to pass to the dbt run command.
stream_output: If True, the output from the dbt command will be logged in Prefect
as it happens.
Defaults to True.
Example:
```python
from prefect import flow
from prefect_dbt.cli.tasks import dbt_run_task
@flow
def dbt_test_flow():
dbt_run_task(
project_dir="/Users/test/my_dbt_project_dir",
extra_command_args=["--model", "foo_model"]
)
```
Raises:
ValueError: If required dbt_cli_profile is not provided
when needed for profile writing.
RuntimeError: If the dbt build fails for any reason,
it will be indicated by the exception raised.
"""
results = await trigger_dbt_cli_command.fn(
command="run",
profiles_dir=profiles_dir,
project_dir=project_dir,
overwrite_profiles=overwrite_profiles,
dbt_cli_profile=dbt_cli_profile,
create_summary_artifact=create_summary_artifact,
summary_artifact_key=summary_artifact_key,
extra_command_args=extra_command_args,
stream_output=stream_output,
)
return results
@sync_compatible
@task
async def run_dbt_test(
profiles_dir: Optional[Union[Path, str]] = None,
project_dir: Optional[Union[Path, str]] = None,
overwrite_profiles: bool = False,
dbt_cli_profile: Optional[DbtCliProfile] = None,
create_summary_artifact: bool = False,
summary_artifact_key: str = "dbt-test-task-summary",
extra_command_args: Optional[List[str]] = None,
stream_output: bool = True,
):
"""
Executes the 'dbt test' command within a Prefect task,
and optionally creates a Prefect artifact summarizing the dbt test results.
Args:
profiles_dir: The directory to search for the profiles.yml file. Setting this
appends the `--profiles-dir` option to the command provided.
If this is not set, will try using the DBT_PROFILES_DIR env variable,
but if that's also not set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error
if profiles.yml already exists under profile_dir
and overwrite_profiles is set to False.
create_summary_artifact: If True, creates a Prefect artifact on the task run
with the dbt test results using the specified artifact key.
Defaults to False.
summary_artifact_key: The key under which to store
the dbt test results artifact in Prefect.
Defaults to 'dbt-test-task-summary'.
extra_command_args: Additional command arguments to pass to the dbt test command.
stream_output: If True, the output from the dbt command will be logged in Prefect
as it happens.
Defaults to True.
Example:
```python
from prefect import flow
from prefect_dbt.cli.tasks import dbt_test_task
@flow
def dbt_test_flow():
dbt_test_task(
project_dir="/Users/test/my_dbt_project_dir",
extra_command_args=["--model", "foo_model"]
)
```
Raises:
ValueError: If required dbt_cli_profile is not provided
when needed for profile writing.
RuntimeError: If the dbt build fails for any reason,
it will be indicated by the exception raised.
"""
results = await trigger_dbt_cli_command.fn(
command="test",
profiles_dir=profiles_dir,
project_dir=project_dir,
overwrite_profiles=overwrite_profiles,
dbt_cli_profile=dbt_cli_profile,
create_summary_artifact=create_summary_artifact,
summary_artifact_key=summary_artifact_key,
extra_command_args=extra_command_args,
stream_output=stream_output,
)
return results
@sync_compatible
@task
async def run_dbt_snapshot(
profiles_dir: Optional[Union[Path, str]] = None,
project_dir: Optional[Union[Path, str]] = None,
overwrite_profiles: bool = False,
dbt_cli_profile: Optional[DbtCliProfile] = None,
create_summary_artifact: bool = False,
summary_artifact_key: str = "dbt-snapshot-task-summary",
extra_command_args: Optional[List[str]] = None,
stream_output: bool = True,
):
"""
Executes the 'dbt snapshot' command within a Prefect task,
and optionally creates a Prefect artifact summarizing the dbt snapshot results.
Args:
profiles_dir: The directory to search for the profiles.yml file. Setting this
appends the `--profiles-dir` option to the command provided.
If this is not set, will try using the DBT_PROFILES_DIR env variable,
but if that's also not set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error
if profiles.yml already exists under profile_dir
and overwrite_profiles is set to False.
create_summary_artifact: If True, creates a Prefect artifact on the task run
with the dbt snapshot results using the specified artifact key.
Defaults to False.
summary_artifact_key: The key under which to store
the dbt snapshot results artifact in Prefect.
Defaults to 'dbt-snapshot-task-summary'.
extra_command_args: Additional command arguments to pass to the dbt snapshot command.
stream_output: If True, the output from the dbt command will be logged in Prefect
as it happens.
Defaults to True.
Example:
```python
from prefect import flow
from prefect_dbt.cli.tasks import dbt_snapshot_task
@flow
def dbt_test_flow():
dbt_snapshot_task(
project_dir="/Users/test/my_dbt_project_dir",
extra_command_args=["--fail-fast"]
)
```
Raises:
ValueError: If required dbt_cli_profile is not provided
when needed for profile writing.
RuntimeError: If the dbt build fails for any reason,
it will be indicated by the exception raised.
"""
results = await trigger_dbt_cli_command.fn(
command="snapshot",
profiles_dir=profiles_dir,
project_dir=project_dir,
overwrite_profiles=overwrite_profiles,
dbt_cli_profile=dbt_cli_profile,
create_summary_artifact=create_summary_artifact,
summary_artifact_key=summary_artifact_key,
extra_command_args=extra_command_args,
stream_output=stream_output,
)
return results
@sync_compatible
@task
async def run_dbt_seed(
profiles_dir: Optional[Union[Path, str]] = None,
project_dir: Optional[Union[Path, str]] = None,
overwrite_profiles: bool = False,
dbt_cli_profile: Optional[DbtCliProfile] = None,
create_summary_artifact: bool = False,
summary_artifact_key: str = "dbt-seed-task-summary",
extra_command_args: Optional[List[str]] = None,
stream_output: bool = True,
):
"""
Executes the 'dbt seed' command within a Prefect task,
and optionally creates a Prefect artifact summarizing the dbt seed results.
Args:
profiles_dir: The directory to search for the profiles.yml file. Setting this
appends the `--profiles-dir` option to the command provided.
If this is not set, will try using the DBT_PROFILES_DIR env variable,
but if that's also not set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error
if profiles.yml already exists under profile_dir
and overwrite_profiles is set to False.
create_summary_artifact: If True, creates a Prefect artifact on the task run
with the dbt seed results using the specified artifact key.
Defaults to False.
summary_artifact_key: The key under which to store
the dbt seed results artifact in Prefect.
Defaults to 'dbt-seed-task-summary'.
extra_command_args: Additional command arguments to pass to the dbt seed command.
stream_output: If True, the output from the dbt command will be logged in Prefect
as it happens.
Defaults to True.
Example:
```python
from prefect import flow
from prefect_dbt.cli.tasks import dbt_seed_task
@flow
def dbt_test_flow():
dbt_seed_task(
project_dir="/Users/test/my_dbt_project_dir",
extra_command_args=["--fail-fast"]
)
```
Raises:
ValueError: If required dbt_cli_profile is not provided
when needed for profile writing.
RuntimeError: If the dbt build fails for any reason,
it will be indicated by the exception raised.
"""
results = await trigger_dbt_cli_command.fn(
command="seed",
profiles_dir=profiles_dir,
project_dir=project_dir,
overwrite_profiles=overwrite_profiles,
dbt_cli_profile=dbt_cli_profile,
create_summary_artifact=create_summary_artifact,
summary_artifact_key=summary_artifact_key,
extra_command_args=extra_command_args,
stream_output=stream_output,
)
return results
@sync_compatible
@task
async def run_dbt_source_freshness(
profiles_dir: Optional[Union[Path, str]] = None,
project_dir: Optional[Union[Path, str]] = None,
overwrite_profiles: bool = False,
dbt_cli_profile: Optional[DbtCliProfile] = None,
create_summary_artifact: bool = False,
summary_artifact_key: str = "dbt-source-freshness-task-summary",
extra_command_args: Optional[List[str]] = None,
stream_output: bool = True,
):
"""
Executes the 'dbt source freshness' command within a Prefect task,
and optionally creates a Prefect artifact summarizing the dbt source freshness results.
Args:
profiles_dir: The directory to search for the profiles.yml file. Setting this
appends the `--profiles-dir` option to the command provided.
If this is not set, will try using the DBT_PROFILES_DIR env variable,
but if that's also not set, will use the default directory `$HOME/.dbt/`.
project_dir: The directory to search for the dbt_project.yml file.
Default is the current working directory and its parents.
overwrite_profiles: Whether the existing profiles.yml file under profiles_dir
should be overwritten with a new profile.
dbt_cli_profile: Profiles class containing the profile written to profiles.yml.
Note! This is optional and will raise an error
if profiles.yml already exists under profile_dir
and overwrite_profiles is set to False.
create_summary_artifact: If True, creates a Prefect artifact on the task run
with the dbt source freshness results using the specified artifact key.
Defaults to False.
summary_artifact_key: The key under which to store
the dbt source freshness results artifact in Prefect.
Defaults to 'dbt-source-freshness-task-summary'.
extra_command_args: Additional command arguments to pass to the dbt source freshness command.
stream_output: If True, the output from the dbt command will be logged in Prefect
as it happens.
Defaults to True.
Example:
```python
from prefect import flow
from prefect_dbt.cli.commands import run_dbt_source_freshness
@flow
def dbt_test_flow():
run_dbt_source_freshness(
project_dir="/Users/test/my_dbt_project_dir",
extra_command_args=["--fail-fast"]
)
```
Raises:
ValueError: If required dbt_cli_profile is not provided
when needed for profile writing.
RuntimeError: If the dbt build fails for any reason,
it will be indicated by the exception raised.
"""
results = await trigger_dbt_cli_command.fn(
command="source freshness",
profiles_dir=profiles_dir,
project_dir=project_dir,
overwrite_profiles=overwrite_profiles,
dbt_cli_profile=dbt_cli_profile,
create_summary_artifact=create_summary_artifact,
summary_artifact_key=summary_artifact_key,
extra_command_args=extra_command_args,
stream_output=stream_output,
)
return results
def create_summary_markdown(run_results: dict[str, Any], command: str) -> str:
"""
Creates a Prefect task artifact summarizing the results
of the above predefined prefrect-dbt task.
"""
prefix = "dbt" if not command.startswith("dbt") else ""
markdown = f"# {prefix} {command} Task Summary\n"
markdown += _create_node_summary_table_md(run_results=run_results)
if (
run_results["Error"] != []
or run_results["Fail"] != []
or run_results["Skipped"] != []
or run_results["Warn"] != []
):
markdown += "\n\n ## Unsuccessful Nodes ❌\n\n"
markdown += _create_unsuccessful_markdown(run_results=run_results)
if run_results["Success"] != []:
successful_runs_str = ""
for r in run_results["Success"]:
if isinstance(r, (RunResult, FreshnessResult)):
successful_runs_str += f"* {r.node.name}\n"
elif isinstance(r, RunResultOutput):
successful_runs_str += f"* {r.unique_id}\n"
else:
successful_runs_str += f"* {r}\n"
markdown += f"""\n## Successful Nodes ✅\n\n{successful_runs_str}\n\n"""
return markdown
def _create_node_info_md(node_name, resource_type, message, path, compiled_code) -> str:
"""
Creates template for unsuccessful node information
"""
markdown = f"""
**{node_name}**
Type: {resource_type}
Message:
> {message}
Path: {path}
"""
if compiled_code:
markdown += f"""
Compiled code:
```sql
{compiled_code}
```
"""
return markdown
def _create_node_summary_table_md(run_results: dict) -> str:
"""
Creates a table for node summary
"""
markdown = f"""
| Successes | Errors | Failures | Skips | Warnings |
| :-------: | :----: | :------: | :---: | :------: |
| {len(run_results["Success"])} | {len(run_results["Error"])} | {len(run_results["Fail"])} | {len(run_results["Skipped"])} | {len(run_results["Warn"])} |
"""
return markdown
def _create_unsuccessful_markdown(run_results: dict) -> str:
"""
Creates markdown summarizing the results
of unsuccessful nodes, including compiled code.
"""
markdown = ""
if len(run_results["Error"]) > 0:
markdown += "\n### Errored Nodes:\n"
for n in run_results["Error"]:
markdown += _create_node_info_md(
n.node.name,
n.node.resource_type,
n.message,
n.node.path,
(
n.node.compiled_code
if n.node.resource_type not in ["seed", "source"]
else None
),
)
if len(run_results["Fail"]) > 0:
markdown += "\n### Failed Nodes:\n"
for n in run_results["Fail"]:
markdown += _create_node_info_md(
n.node.name,
n.node.resource_type,
n.message,
n.node.path,
(
n.node.compiled_code
if n.node.resource_type not in ["seed", "source"]
else None
),
)
if len(run_results["Skipped"]) > 0:
markdown += "\n### Skipped Nodes:\n"
for n in run_results["Skipped"]:
markdown += _create_node_info_md(
n.node.name,
n.node.resource_type,
n.message,
n.node.path,
(
n.node.compiled_code
if n.node.resource_type not in ["seed", "source"]
else None
),
)
if len(run_results["Warn"]) > 0:
markdown += "\n### Warned Nodes:\n"
for n in run_results["Warn"]:
markdown += _create_node_info_md(
n.node.name,
n.node.resource_type,
n.message,
n.node.path,
(
n.node.compiled_code
if n.node.resource_type not in ["seed", "source"]
else None
),
)
return markdown
def consolidate_run_results(results: dbtRunnerResult) -> dict:
run_results: Dict[str, List[str]] = {
"Success": [],
"Fail": [],
"Skipped": [],
"Error": [],
"Warn": [],
}
if results.exception is None:
for r in results.result.results:
if r.status == NodeStatus.Fail:
run_results["Fail"].append(r)
elif r.status == NodeStatus.Error or r.status == NodeStatus.RuntimeErr:
run_results["Error"].append(r)
elif r.status == NodeStatus.Skipped:
run_results["Skipped"].append(r)
elif r.status == NodeStatus.Success or r.status == NodeStatus.Pass:
run_results["Success"].append(r)
elif r.status == NodeStatus.Warn:
run_results["Warn"].append(r)
return run_results
| DbtCoreOperation |
python | walkccc__LeetCode | solutions/1274. Number of Ships in a Rectangle/1274.py | {
"start": 306,
"end": 1256
} | class ____(object):
def countShips(
self,
sea: 'Sea',
topRight: 'Point',
bottomLeft: 'Point',
) -> int:
if topRight.x < bottomLeft.x or topRight.y < bottomLeft.y:
return 0
if not sea.hasShips(topRight, bottomLeft):
return 0
# sea.hashShips(topRight, bottomLeft) == True
if topRight.x == bottomLeft.x and topRight.y == bottomLeft.y:
return 1
mx = (topRight.x + bottomLeft.x) // 2
my = (topRight.y + bottomLeft.y) // 2
ans = 0
# the top-right
ans += self.countShips(sea, topRight, Point(mx + 1, my + 1))
# the bottom-right
ans += self.countShips(sea, Point(topRight.x, my),
Point(mx + 1, bottomLeft.y))
# the top-left
ans += self.countShips(sea, Point(mx, topRight.y),
Point(bottomLeft.x, my + 1))
# the bottom-left
ans += self.countShips(sea, Point(mx, my), bottomLeft)
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/count-prefixes-of-a-given-string.py | {
"start": 61,
"end": 276
} | class ____(object):
def countPrefixes(self, words, s):
"""
:type words: List[str]
:type s: str
:rtype: int
"""
return sum(itertools.imap(s.startswith, words))
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox12.py | {
"start": 315,
"end": 866
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox("E9", "This is some text", {"fill": {"none": True}})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_daemon_cursor.py | {
"start": 1233,
"end": 2092
} | class ____(FieldSerializer):
def pack(
self,
mapping: Mapping[str, float],
whitelist_map: WhitelistMap,
descent_path: str,
) -> JsonSerializableValue:
return pack_value(SerializableNonScalarKeyMapping(mapping), whitelist_map, descent_path)
def unpack( # pyright: ignore[reportIncompatibleMethodOverride]
self,
unpacked_value: JsonSerializableValue,
whitelist_map: WhitelistMap,
context: UnpackContext,
) -> PackableValue:
return unpack_value(unpacked_value, dict, whitelist_map, context)
@whitelist_for_serdes(
field_serializers={
"last_observe_request_timestamp_by_asset_key": ObserveRequestTimestampSerializer
}
)
@dataclass(frozen=True)
# TODO: rename to scheduling cursor or something
# 2024-05-16 -- schrockn
| ObserveRequestTimestampSerializer |
python | encode__django-rest-framework | rest_framework/permissions.py | {
"start": 585,
"end": 895
} | class ____(OperationHolderMixin):
def __init__(self, operator_class, op1_class):
self.operator_class = operator_class
self.op1_class = op1_class
def __call__(self, *args, **kwargs):
op1 = self.op1_class(*args, **kwargs)
return self.operator_class(op1)
| SingleOperandHolder |
python | pytorch__pytorch | functorch/einops/_parsing.py | {
"start": 2053,
"end": 12309
} | class ____:
"""Structure containing information about one side of an `einops`-style pattern (e.g. 'b c (h w)')."""
def __init__(
self,
expression: str,
*,
allow_underscore: bool = False,
allow_duplicates: bool = False,
) -> None:
"""Parse the expression and store relevant metadata.
Args:
expression (str): the `einops`-pattern to parse
allow_underscore (bool): whether to allow axis identifier names to begin with an underscore
allow_duplicates (bool): whether to allow an identifier to appear more than once in the expression
"""
self.has_ellipsis: bool = False
self.has_ellipsis_parenthesized: Optional[bool] = None
self.identifiers: set[Union[str, AnonymousAxis]] = set()
# that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition
self.has_non_unitary_anonymous_axes: bool = False
# composition keeps structure of composite axes, see how different corner cases are handled in tests
self.composition: list[Union[list[Union[str, AnonymousAxis]], str]] = []
if "." in expression:
if "..." not in expression:
raise ValueError(
"Expression may contain dots only inside ellipsis (...)"
)
if str.count(expression, "...") != 1 or str.count(expression, ".") != 3:
raise ValueError(
"Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor "
)
expression = expression.replace("...", _ellipsis)
self.has_ellipsis = True
bracket_group: Optional[list[Union[str, AnonymousAxis]]] = None
def add_axis_name(x: str) -> None:
if x in self.identifiers:
if not (allow_underscore and x == "_") and not allow_duplicates:
raise ValueError(
f"Indexing expression contains duplicate dimension '{x}'"
)
if x == _ellipsis:
self.identifiers.add(_ellipsis)
if bracket_group is None:
self.composition.append(_ellipsis)
self.has_ellipsis_parenthesized = False
else:
bracket_group.append(_ellipsis)
self.has_ellipsis_parenthesized = True
else:
is_number = str.isdecimal(x)
if is_number and int(x) == 1:
# handling the case of anonymous axis of length 1
if bracket_group is None:
self.composition.append([])
else:
pass # no need to think about 1s inside parenthesis
return
is_axis_name, reason = self.check_axis_name_return_reason(
x, allow_underscore=allow_underscore
)
if not (is_number or is_axis_name):
raise ValueError(f"Invalid axis identifier: {x}\n{reason}")
axis_name: Union[str, AnonymousAxis] = (
AnonymousAxis(x) if is_number else x
)
self.identifiers.add(axis_name)
if is_number:
self.has_non_unitary_anonymous_axes = True
if bracket_group is None:
self.composition.append([axis_name])
else:
bracket_group.append(axis_name)
current_identifier = None
for char in expression:
if char in "() ":
if current_identifier is not None:
add_axis_name(current_identifier)
current_identifier = None
if char == "(":
if bracket_group is not None:
raise ValueError(
"Axis composition is one-level (brackets inside brackets not allowed)"
)
bracket_group = []
elif char == ")":
if bracket_group is None:
raise ValueError("Brackets are not balanced")
self.composition.append(bracket_group)
bracket_group = None
elif str.isalnum(char) or char in ["_", _ellipsis]:
if current_identifier is None:
current_identifier = char
else:
current_identifier += char
else:
raise ValueError(f"Unknown character '{char}'")
if bracket_group is not None:
raise ValueError(f"Imbalanced parentheses in expression: '{expression}'")
if current_identifier is not None:
add_axis_name(current_identifier)
@staticmethod
def check_axis_name_return_reason(
name: str, allow_underscore: bool = False
) -> tuple[bool, str]:
"""Check if the given axis name is valid, and a message explaining why if not.
Valid axes names are python identifiers except keywords, and should not start or end with an underscore.
Args:
name (str): the axis name to check
allow_underscore (bool): whether axis names are allowed to start with an underscore
Returns:
tuple[bool, str]: whether the axis name is valid, a message explaining why if not
"""
if not str.isidentifier(name):
return False, "not a valid python identifier"
elif name[0] == "_" or name[-1] == "_":
if name == "_" and allow_underscore:
return True, ""
return False, "axis name should should not start or end with underscore"
else:
if keyword.iskeyword(name):
warnings.warn(
f"It is discouraged to use axes names that are keywords: {name}",
RuntimeWarning,
)
if name in ["axis"]:
warnings.warn(
"It is discouraged to use 'axis' as an axis name and will raise an error in future",
FutureWarning,
)
return True, ""
@staticmethod
def check_axis_name(name: str) -> bool:
"""Check if the name is a valid axis name.
Args:
name (str): the axis name to check
Returns:
bool: whether the axis name is valid
"""
is_valid, _ = ParsedExpression.check_axis_name_return_reason(name)
return is_valid
def parse_pattern(
pattern: str, axes_lengths: Mapping[str, int]
) -> tuple[ParsedExpression, ParsedExpression]:
"""Parse an `einops`-style pattern into a left-hand side and right-hand side `ParsedExpression` object.
Args:
pattern (str): the `einops`-style rearrangement pattern
axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
Returns:
tuple[ParsedExpression, ParsedExpression]: a tuple containing the left-hand side and right-hand side expressions
"""
# adapted from einops.einops._prepare_transformation_recipe
# https://github.com/arogozhnikov/einops/blob/230ac1526c1f42c9e1f7373912c7f8047496df11/einops/einops.py
try:
left_str, right_str = pattern.split("->")
except ValueError:
raise ValueError("Pattern must contain a single '->' separator") from None
if _ellipsis in axes_lengths:
raise ValueError(f"'{_ellipsis}' is not an allowed axis identifier")
left = ParsedExpression(left_str)
right = ParsedExpression(right_str)
if not left.has_ellipsis and right.has_ellipsis:
raise ValueError(
f"Ellipsis found in right side, but not left side of a pattern {pattern}"
)
if left.has_ellipsis and left.has_ellipsis_parenthesized:
raise ValueError(
f"Ellipsis is parenthesis in the left side is not allowed: {pattern}"
)
return left, right
def validate_rearrange_expressions(
left: ParsedExpression, right: ParsedExpression, axes_lengths: Mapping[str, int]
) -> None:
"""Perform expression validations that are specific to the `rearrange` operation.
Args:
left (ParsedExpression): left-hand side expression
right (ParsedExpression): right-hand side expression
axes_lengths (Mapping[str, int]): any additional length specifications for dimensions
"""
for length in axes_lengths.values():
if (length_type := type(length)) is not int:
raise TypeError(
f"rearrange axis lengths must be integers, got: {length_type}"
)
if left.has_non_unitary_anonymous_axes or right.has_non_unitary_anonymous_axes:
raise ValueError("rearrange only supports unnamed axes of size 1")
difference = set.symmetric_difference(left.identifiers, right.identifiers)
if len(difference) > 0:
raise ValueError(
f"Identifiers only on one side of rearrange expression (should be on both): {difference}"
)
unmatched_axes = axes_lengths.keys() - left.identifiers
if len(unmatched_axes) > 0:
raise ValueError(
f"Identifiers not found in rearrange expression: {unmatched_axes}"
)
def comma_separate(collection: Collection[Union[str, Collection[str]]]) -> str:
"""Convert a collection of strings representing first class dims into a comma-separated string.
Args:
collection (Collection[Union[str, Collection[str]]]): the collection of strings to convert
Returns:
str: the comma-separated string
Examples:
>>> comma_separate(("d0",))
'd0'
>>> comma_separate(("d0", "d1", "d2", "d3"))
'd0, d1, d2, d3'
>>> comma_separate([("d1", "d4")])
'(d1, d4)'
>>> comma_separate([("d0",), (), ("d1",), ("d2",), ("d3", "d4")])
'(d0,), (), (d1,), (d2,), (d3, d4)'
"""
return ", ".join(
item
if isinstance(item, str)
else f"({comma_separate(item)}{',' if len(item) == 1 else ''})"
for item in collection
)
| ParsedExpression |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 61995,
"end": 62594
} | class ____(BaseDataset):
def test_write_list(self):
ds = self.f.create_dataset(make_name(), (1,), dtype="3int8")
ds[0] = [1, 2, 3]
np.testing.assert_array_equal(ds[:], [[1, 2, 3]])
ds[:] = [[4, 5, 6]]
np.testing.assert_array_equal(ds[:], [[4, 5, 6]])
def test_write_array(self):
ds = self.f.create_dataset(make_name(), (1,), dtype="3int8")
ds[0] = np.array([1, 2, 3])
np.testing.assert_array_equal(ds[:], [[1, 2, 3]])
ds[:] = np.array([[4, 5, 6]])
np.testing.assert_array_equal(ds[:], [[4, 5, 6]])
| TestSubarray |
python | scrapy__scrapy | scrapy/utils/sitemap.py | {
"start": 368,
"end": 1839
} | class ____:
"""Class to parse Sitemap (type=urlset) and Sitemap Index
(type=sitemapindex) files"""
def __init__(self, xmltext: str | bytes):
xmlp = lxml.etree.XMLParser(
recover=True, remove_comments=True, resolve_entities=False
)
self._root = lxml.etree.fromstring(xmltext, parser=xmlp)
rt = self._root.tag
assert isinstance(rt, str)
self.type = rt.split("}", 1)[1] if "}" in rt else rt
def __iter__(self) -> Iterator[dict[str, Any]]:
for elem in self._root.getchildren():
d: dict[str, Any] = {}
for el in elem.getchildren():
tag = el.tag
assert isinstance(tag, str)
name = tag.split("}", 1)[1] if "}" in tag else tag
if name == "link":
if "href" in el.attrib:
d.setdefault("alternate", []).append(el.get("href"))
else:
d[name] = el.text.strip() if el.text else ""
if "loc" in d:
yield d
def sitemap_urls_from_robots(
robots_text: str, base_url: str | None = None
) -> Iterable[str]:
"""Return an iterator over all sitemap urls contained in the given
robots.txt file
"""
for line in robots_text.splitlines():
if line.lstrip().lower().startswith("sitemap:"):
url = line.split(":", 1)[1].strip()
yield urljoin(base_url or "", url)
| Sitemap |
python | pytorch__pytorch | test/inductor/test_benchmarking.py | {
"start": 549,
"end": 4128
} | class ____(TestCase):
def setUp(self):
super().setUp()
torch.manual_seed(12345)
counters.clear()
@staticmethod
def get_counter_value(benchmarker_cls, fn_name):
return counters["inductor"][
f"benchmarking.{benchmarker_cls.__name__}.{fn_name}"
]
@staticmethod
def make_params(device, size=100):
fn, fn_args, fn_kwargs = torch.sum, (torch.randn(size, device=device),), {}
_callable = lambda: fn(*fn_args, **fn_kwargs) # noqa: E731
return (fn, fn_args, fn_kwargs), _callable
@unittest.skipIf(not HAS_CPU or not HAS_GPU, "requires CPU and GPU")
@decorateIf(
unittest.expectedFailure,
lambda params: params["benchmarker_cls"] is Benchmarker
and params["device"] == GPU_TYPE,
)
@parametrize("benchmarker_cls", ALL_BENCHMARKER_CLASSES)
@parametrize("device", (GPU_TYPE, "cpu"))
def test_benchmark_smoke(self, benchmarker_cls, device):
benchmarker = benchmarker_cls()
(fn, fn_args, fn_kwargs), _ = self.make_params(device)
timing = benchmarker.benchmark(fn, fn_args, fn_kwargs)
self.assertGreater(timing, 0)
self.assertEqual(self.get_counter_value(benchmarker_cls, "benchmark"), 1)
self.assertEqual(
self.get_counter_value(
benchmarker_cls, "benchmark_cpu" if device == "cpu" else "benchmark_gpu"
),
1,
)
@unittest.skipIf(not HAS_CPU, "requires CPU")
@parametrize("benchmarker_cls", ALL_BENCHMARKER_CLASSES)
def test_benchmark_cpu_smoke(self, benchmarker_cls, device="cpu"):
benchmarker = benchmarker_cls()
_, _callable = self.make_params(device)
timing = benchmarker.benchmark_cpu(_callable)
self.assertGreater(timing, 0)
self.assertEqual(self.get_counter_value(benchmarker_cls, "benchmark_cpu"), 1)
@unittest.skipIf(not HAS_GPU, "requires GPU")
@decorateIf(
unittest.expectedFailure,
lambda params: params["benchmarker_cls"] is Benchmarker,
)
@parametrize("benchmarker_cls", ALL_BENCHMARKER_CLASSES)
def test_benchmark_gpu_smoke(self, benchmarker_cls, device=GPU_TYPE):
benchmarker = benchmarker_cls()
_, _callable = self.make_params(device)
timing = benchmarker.benchmark_gpu(_callable)
self.assertGreater(timing, 0)
self.assertEqual(self.get_counter_value(benchmarker_cls, "benchmark_gpu"), 1)
@unittest.skipIf(not HAS_CPU and not HAS_GPU, "requires CPU or GPU")
@unittest.expectedFailure
@parametrize("benchmarker_cls", ALL_BENCHMARKER_CLASSES)
def test_benchmark_safely_infers_device_no_devices(
self, benchmarker_cls, device="cpu" if HAS_CPU else GPU_TYPE
):
benchmarker = benchmarker_cls()
(fn, _, _), _ = self.make_params(device)
benchmarker.benchmark(fn, (), {})
@unittest.skipIf(not HAS_CPU or not HAS_GPU, "requires CPU and GPU")
@unittest.expectedFailure
@parametrize("benchmarker_cls", ALL_BENCHMARKER_CLASSES)
def test_benchmark_safely_infers_device_many_devices(self, benchmarker_cls):
benchmarker = benchmarker_cls()
(fn, cpu_args, cpu_kwargs), _ = self.make_sum("cpu")
(_, gpu_args, gpu_kwargs), _ = self.make_sum(GPU_TYPE)
many_devices_args = cpu_args + gpu_args
many_devices_kwargs = cpu_kwargs
many_devices_kwargs.update(gpu_kwargs)
benchmarker.benchmark(fn, many_devices_args, many_devices_kwargs)
if __name__ == "__main__":
run_tests()
| TestBenchmarker |
python | mlflow__mlflow | dev/clint/src/clint/rules/nested_mock_patch.py | {
"start": 84,
"end": 1963
} | class ____(Rule):
def _message(self) -> str:
return (
"Do not nest `unittest.mock.patch` context managers. "
"Use multiple context managers in a single `with` statement instead: "
"`with mock.patch(...), mock.patch(...): ...`"
)
@staticmethod
def check(node: ast.With, resolver: Resolver) -> bool:
"""
Returns True if the with statement uses mock.patch and contains only a single
nested with statement that also uses mock.patch.
"""
# Check if the outer with statement uses mock.patch
outer_has_mock_patch = any(
NestedMockPatch._is_mock_patch(item.context_expr, resolver) for item in node.items
)
if not outer_has_mock_patch:
return False
# Check if the body has exactly one statement and it's a with statement
if len(node.body) == 1 and isinstance(node.body[0], ast.With):
# Check if the nested with statement also uses mock.patch
inner_has_mock_patch = any(
NestedMockPatch._is_mock_patch(item.context_expr, resolver)
for item in node.body[0].items
)
if inner_has_mock_patch:
return True
return False
@staticmethod
def _is_mock_patch(node: ast.expr, resolver: Resolver) -> bool:
"""
Returns True if the node is a call to mock.patch or any of its variants.
"""
# Handle direct calls: mock.patch(...), mock.patch.object(...), etc.
if isinstance(node, ast.Call):
if res := resolver.resolve(node.func):
match res:
# Matches unittest.mock.patch, unittest.mock.patch.object, etc.
case ["unittest", "mock", "patch", *_]:
return True
return False
| NestedMockPatch |
python | sqlalchemy__sqlalchemy | test/orm/test_cache_key.py | {
"start": 2276,
"end": 21404
} | class ____(fixtures.CacheKeyFixture, _fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_mapper_and_aliased(self):
User, Address, Keyword = self.classes("User", "Address", "Keyword")
addresses_table = self.tables.addresses
self._run_cache_key_fixture(
lambda: (
inspect(User),
inspect(Address),
inspect(aliased(User)),
inspect(aliased(aliased(User, addresses_table))),
inspect(aliased(aliased(User), addresses_table.select())),
inspect(aliased(Address)),
inspect(aliased(Address, addresses_table.select())),
inspect(aliased(User, addresses_table.select())),
),
compare_values=True,
)
def test_attributes(self):
User, Address, Keyword = self.classes("User", "Address", "Keyword")
self._run_cache_key_fixture(
lambda: (
User.id,
Address.id,
aliased(User).id,
aliased(User, name="foo").id,
aliased(User, name="bar").id,
User.name,
User.addresses,
Address.email_address,
aliased(User).addresses,
),
compare_values=True,
)
def test_bundles_in_annotations(self):
User = self.classes.User
self._run_cache_key_fixture(
lambda: (
Bundle("mybundle", User.id).__clause_element__(),
Bundle("myotherbundle", User.id).__clause_element__(),
Bundle("mybundle", User.name).__clause_element__(),
Bundle("mybundle", User.id, User.name).__clause_element__(),
),
compare_values=True,
)
def test_bundles_directly(self):
User = self.classes.User
self._run_cache_key_fixture(
lambda: (
Bundle("mybundle", User.id),
Bundle("mybundle", User.id).__clause_element__(),
Bundle("myotherbundle", User.id),
Bundle("mybundle", User.name),
Bundle("mybundle", User.id, User.name),
),
compare_values=True,
)
def test_query_expr(self):
(User,) = self.classes("User")
self._run_cache_key_fixture(
lambda: (
with_expression(User.name, true()),
with_expression(User.name, null()),
with_expression(User.name, func.foobar()),
with_expression(User.name, User.name == "test"),
),
compare_values=True,
)
self._run_cache_key_fixture(
lambda: (
Load(User).with_expression(User.name, true()),
Load(User).with_expression(User.name, null()),
Load(User).with_expression(User.name, func.foobar()),
Load(User).with_expression(User.name, User.name == "test"),
),
compare_values=True,
)
def test_loader_criteria(self):
User, Address = self.classes("User", "Address")
class Foo:
id = Column(Integer)
name = Column(String)
self._run_cache_key_fixture(
lambda: (
with_loader_criteria(User, User.name != "somename"),
with_loader_criteria(User, User.id != 5),
with_loader_criteria(User, lambda cls: cls.id == 10),
with_loader_criteria(Address, Address.id != 5),
with_loader_criteria(Foo, lambda cls: cls.id == 10),
),
compare_values=True,
)
def test_loader_criteria_bound_param_thing(self):
class Foo:
id = Column(Integer)
def go(param):
return with_loader_criteria(Foo, lambda cls: cls.id == param)
g1 = go(10)
g2 = go(20)
ck1 = g1._generate_cache_key()
ck2 = g2._generate_cache_key()
eq_(ck1.key, ck2.key)
eq_(ck1.bindparams[0].key, ck2.bindparams[0].key)
eq_(ck1.bindparams[0].value, 10)
eq_(ck2.bindparams[0].value, 20)
def test_instrumented_attributes(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
User.addresses,
User.addresses.of_type(aliased(Address)),
User.orders,
User.orders.and_(Order.id != 5),
User.orders.and_(Order.description != "somename"),
),
compare_values=True,
)
def test_unbound_options(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: (
joinedload(User.addresses),
joinedload(User.addresses.of_type(aliased(Address))),
joinedload(User.orders),
joinedload(User.orders.and_(Order.id != 5)),
joinedload(User.orders.and_(Order.id == 5)),
joinedload(User.orders.and_(Order.description != "somename")),
joinedload(User.orders).selectinload(Order.items),
defer(User.id),
defer("*"),
defer(Address.id),
subqueryload(User.orders),
selectinload(User.orders),
joinedload(User.addresses).defer(Address.id),
joinedload(aliased(User).addresses).defer(Address.id),
joinedload(User.orders).joinedload(Order.items),
joinedload(User.orders).subqueryload(Order.items),
subqueryload(User.orders).subqueryload(Order.items),
subqueryload(User.orders)
.subqueryload(Order.items)
.defer(Item.description),
defaultload(User.orders).defaultload(Order.items),
defaultload(User.orders),
),
compare_values=True,
)
def test_unbound_sub_options(self):
"""test #6869"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
Dingaling = self.classes.Dingaling
self._run_cache_key_fixture(
lambda: (
joinedload(User.addresses).options(
joinedload(Address.dingaling)
),
joinedload(User.addresses).options(
joinedload(Address.dingaling).options(
load_only(Dingaling.id)
)
),
joinedload(User.orders).options(
joinedload(Order.items).options(joinedload(Item.keywords))
),
),
compare_values=True,
)
def test_bound_options(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: (
Load(User).joinedload(User.addresses),
Load(User).joinedload(
User.addresses.of_type(aliased(Address))
),
Load(User).joinedload(User.orders),
Load(User).joinedload(User.orders.and_(Order.id != 5)),
Load(User).joinedload(
User.orders.and_(Order.description != "somename")
),
Load(User).defer(User.id),
Load(User).subqueryload(User.addresses),
Load(Address).defer(Address.id),
Load(Address).defer("*"),
Load(a1).defer(a1.id),
Load(User).joinedload(User.addresses).defer(Address.id),
Load(User).joinedload(User.orders).joinedload(Order.items),
Load(User).joinedload(User.orders).subqueryload(Order.items),
Load(User).subqueryload(User.orders).subqueryload(Order.items),
Load(User)
.subqueryload(User.orders)
.subqueryload(Order.items)
.defer(Item.description),
Load(User).defaultload(User.orders).defaultload(Order.items),
Load(User).defaultload(User.orders),
Load(Address).raiseload("*"),
Load(Address).raiseload(Address.user),
),
compare_values=True,
)
def test_selects_w_orm_joins(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: (
select(User).join(User.addresses),
select(User).join(User.orders),
select(User).join(User.addresses).join(User.orders),
select(User).join(Address, User.addresses),
select(User).join(a1, User.addresses),
select(User).join(User.addresses.of_type(a1)),
select(User).join(
User.addresses.and_(Address.email_address == "foo")
),
select(User)
.join(Address, User.addresses)
.join_from(User, Order),
select(User)
.join(Address, User.addresses)
.join_from(User, User.orders),
select(User.id, Order.id).select_from(
orm_join(User, Order, User.orders)
),
),
compare_values=True,
)
def test_orm_query_w_orm_joins(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session().query(User).join(User.addresses),
fixture_session().query(User).join(User.orders),
fixture_session()
.query(User)
.join(User.addresses)
.join(User.orders),
fixture_session()
.query(User)
.join(User.addresses)
.join(Address.dingaling),
fixture_session().query(User).join(Address, User.addresses),
fixture_session().query(User).join(a1, User.addresses),
fixture_session().query(User).join(User.addresses.of_type(a1)),
),
compare_values=True,
)
def test_orm_query_using_with_entities(self):
"""test issue #6503"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session()
.query(User)
.join(User.addresses)
.with_entities(Address.id),
#
fixture_session().query(Address.id).join(User.addresses),
#
fixture_session()
.query(User)
.options(selectinload(User.addresses))
.with_entities(User.id),
#
fixture_session()
.query(User)
.options(selectinload(User.addresses)),
#
fixture_session().query(User).with_entities(User.id),
#
# here, propagate_attr->orm is Address, entity is Address.id,
# but the join() + with_entities() will log a
# _MemoizedSelectEntities to differentiate
fixture_session()
.query(Address, Order)
.join(Address.dingaling)
.with_entities(Address.id),
#
# same, propagate_attr->orm is Address, entity is Address.id,
# but the join() + with_entities() will log a
# _MemoizedSelectEntities to differentiate
fixture_session()
.query(Address, User)
.join(Address.dingaling)
.with_entities(Address.id),
),
compare_values=True,
)
def test_synonyms(self, registry):
"""test for issue discovered in #7394"""
@registry.mapped
class User2:
__table__ = self.tables.users
name_syn = synonym("name")
@registry.mapped
class Address2:
__table__ = self.tables.addresses
name_syn = synonym("email_address")
self._run_cache_key_fixture(
lambda: (
User2.id,
User2.name,
User2.name_syn,
Address2.name_syn,
Address2.email_address,
aliased(User2).name_syn,
aliased(User2, name="foo").name_syn,
aliased(User2, name="bar").name_syn,
),
compare_values=True,
)
def test_more_with_entities_sanity_checks(self):
"""test issue #6503"""
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
sess = fixture_session()
q1 = (
sess.query(Address, Order)
.with_entities(Address.id)
._statement_20()
)
q2 = (
sess.query(Address, User).with_entities(Address.id)._statement_20()
)
assert not q1._memoized_select_entities
assert not q2._memoized_select_entities
# no joins or options, so q1 and q2 have the same cache key as Order/
# User are discarded. Note Address is first so propagate_attrs->orm is
# Address.
eq_(q1._generate_cache_key(), q2._generate_cache_key())
q3 = sess.query(Order).with_entities(Address.id)._statement_20()
q4 = sess.query(User).with_entities(Address.id)._statement_20()
# with Order/User as lead entity, this affects propagate_attrs->orm
# so keys are different
ne_(q3._generate_cache_key(), q4._generate_cache_key())
# confirm by deleting propagate attrs and memoized key and
# running again
q3._propagate_attrs = None
q4._propagate_attrs = None
del q3.__dict__["_generate_cache_key"]
del q4.__dict__["_generate_cache_key"]
eq_(q3._generate_cache_key(), q4._generate_cache_key())
# once there's a join() or options() prior to with_entities, now they
# are not discarded from the key; Order and User are in the
# _MemoizedSelectEntities
q5 = (
sess.query(Address, Order)
.join(Address.dingaling)
.with_entities(Address.id)
._statement_20()
)
q6 = (
sess.query(Address, User)
.join(Address.dingaling)
.with_entities(Address.id)
._statement_20()
)
assert q5._memoized_select_entities
assert q6._memoized_select_entities
ne_(q5._generate_cache_key(), q6._generate_cache_key())
def test_orm_query_from_statement(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session()
.query(User)
.from_statement(text("select * from user")),
select(User).from_statement(text("select * from user")),
fixture_session()
.query(User)
.options(selectinload(User.addresses))
.from_statement(text("select * from user")),
fixture_session()
.query(User)
.options(subqueryload(User.addresses))
.from_statement(text("select * from user")),
fixture_session()
.query(User)
.from_statement(text("select * from user order by id")),
fixture_session()
.query(User.id)
.from_statement(text("select * from user")),
),
compare_values=True,
)
def test_orm_query_basic(self):
User, Address, Keyword, Order, Item = self.classes(
"User", "Address", "Keyword", "Order", "Item"
)
a1 = aliased(Address)
self._run_cache_key_fixture(
lambda: stmt_20(
fixture_session().query(User),
fixture_session().query(User).prefix_with("foo"),
fixture_session().query(User).filter_by(name="ed"),
fixture_session()
.query(User)
.filter_by(name="ed")
.order_by(User.id),
fixture_session()
.query(User)
.filter_by(name="ed")
.order_by(User.name),
fixture_session()
.query(User)
.filter_by(name="ed")
.group_by(User.id),
fixture_session()
.query(User)
.join(User.addresses)
.filter(User.name == "ed"),
fixture_session().query(User).join(User.orders),
fixture_session()
.query(User)
.join(User.orders)
.filter(Order.description == "adsf"),
fixture_session()
.query(User)
.join(User.addresses)
.join(User.orders),
fixture_session().query(User).join(Address, User.addresses),
fixture_session().query(User).join(a1, User.addresses),
fixture_session().query(User).join(User.addresses.of_type(a1)),
fixture_session().query(Address).join(Address.user),
fixture_session().query(User, Address).filter_by(name="ed"),
fixture_session().query(User, a1).filter_by(name="ed"),
),
compare_values=True,
)
def test_options(self):
class MyOpt(CacheableOptions):
_cache_key_traversal = [
("x", InternalTraversal.dp_plain_obj),
("y", InternalTraversal.dp_plain_obj),
]
x = 5
y = ()
self._run_cache_key_fixture(
lambda: (
MyOpt,
MyOpt + {"x": 10},
MyOpt + {"x": 15, "y": ("foo",)},
MyOpt + {"x": 15, "y": ("foo",)} + {"y": ("foo", "bar")},
),
compare_values=True,
)
| CacheKeyTest |
python | pola-rs__polars | py-polars/tests/unit/functions/test_col.py | {
"start": 1460,
"end": 3595
} | class ____:
def __init__(self):
self._selected = df.select(pl.col.__foo)
def foo(self):
return df.select(pl.col.__foo)
@classmethod
def misc(cls):
def _nested():
return df.select(pl.col.__foo)
return _nested()
@staticmethod
def indirect():
return Mangler.misc()
@staticmethod
def testing1234():
return df.select(pl.col.__foo)
# detect mangling in init/instancemethod
assert Mangler()._selected.columns == ["__foo"]
assert Mangler().foo().columns == ["__foo"]
# additionally detect mangling in classmethod/staticmethod
if version_info >= (3, 11):
assert Mangler.misc().columns == ["__foo"]
assert Mangler.indirect().columns == ["__foo"]
assert Mangler.testing1234().columns == ["__foo"]
print("OK", end="")
""",
],
)
assert out == b"OK"
def test_col_select() -> None:
df = pl.DataFrame(
{
"ham": [1, 2, 3],
"hamburger": [11, 22, 33],
"foo": [3, 2, 1],
"bar": ["a", "b", "c"],
}
)
# Single column
assert df.select(pl.col("foo")).columns == ["foo"]
# Regex
assert df.select(pl.col("*")).columns == ["ham", "hamburger", "foo", "bar"]
assert df.select(pl.col("^ham.*$")).columns == ["ham", "hamburger"]
assert df.select(pl.col("*").exclude("ham")).columns == ["hamburger", "foo", "bar"]
# Multiple inputs
assert df.select(pl.col(["hamburger", "foo"])).columns == ["hamburger", "foo"]
assert df.select(pl.col("hamburger", "foo")).columns == ["hamburger", "foo"]
assert df.select(pl.col(pl.Series(["ham", "foo"]))).columns == ["ham", "foo"]
# Dtypes
assert df.select(pl.col(pl.String)).columns == ["bar"]
for dtype_col in (
pl.col(NUMERIC_DTYPES),
pl.col(pl.Int64, pl.Float64),
):
assert df.select(dtype_col).columns == ["ham", "hamburger", "foo"]
def test_col_series_selection() -> None:
ldf = pl.LazyFrame({"a": [1], "b": [1], "c": [1]})
srs = pl.Series(["b", "c"])
assert ldf.select(pl.col(srs)).collect_schema().names() == ["b", "c"]
| Mangler |
python | RaRe-Technologies__gensim | gensim/models/bm25model.py | {
"start": 13387,
"end": 17130
} | class ____(BM25ABC):
"""The scoring function of Trotman et al. [5]_.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import AtireBM25Model
>>> from gensim.test.utils import common_texts
>>>
>>> dictionary = Dictionary(common_texts) # fit dictionary
>>> corpus = [dictionary.doc2bow(line) for line in common_texts] # convert corpus to BoW format
>>>
>>> model = AtireBM25Model(dictionary=dictionary) # fit model
>>> vector = model[corpus[0]] # apply model to the first corpus document
References
----------
.. [5] Trotman, A., Jia X., Crane M., `Towards an Efficient and Effective Search Engine
<http://www.cs.otago.ac.nz/homepages/andrew/involvement/2012-SIGIR-OSIR.pdf#page=45>`_,
In: SIGIR 2012 Workshop on Open Source Information Retrieval. 40–47.
"""
def __init__(self, corpus=None, dictionary=None, k1=1.5, b=0.75):
r"""Pre-compute the average length of a document and inverse term document frequencies,
which will be used to weight term frequencies for the documents.
Parameters
----------
corpus : iterable of iterable of (int, int) or None, optional
An input corpus, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `dictionary` will be used to compute
the statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
dictionary : :class:`~gensim.corpora.Dictionary`
An input dictionary, which will be used to compute the average length of a document and
inverse term document frequencies. If None, then `corpus` will be used to compute the
statistics. If both `corpus` and `dictionary` are None, the statistics will be left
unintialized. Default is None.
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [5]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [5]_ suggests to set `b` to 0.75, which is the default.
Attributes
----------
k1 : float
A positive tuning parameter that determines the impact of the term frequency on its BM25
weight. Singhal [3]_ suggests to set `k1` between 1.0 and 2.0. Default is 1.5.
b : float
A tuning parameter between 0.0 and 1.0 that determines the document length
normalization: 1.0 corresponds to full document normalization, while 0.0 corresponds to
no length normalization. Singhal [3]_ suggests to set `b` to 0.75, which is the default.
"""
self.k1, self.b = k1, b
super().__init__(corpus, dictionary)
def precompute_idfs(self, dfs, num_docs):
idfs = dict()
for term_id, freq in dfs.items():
idf = math.log(num_docs) - math.log(freq)
idfs[term_id] = idf
return idfs
def get_term_weights(self, num_tokens, term_frequencies, idfs):
term_weights = idfs * (term_frequencies * (self.k1 + 1)
/ (term_frequencies + self.k1 * (1 - self.b + self.b
* num_tokens / self.avgdl)))
return term_weights
| AtireBM25Model |
python | PyCQA__flake8 | src/flake8/checker.py | {
"start": 2442,
"end": 8673
} | class ____:
"""Manage the parallelism and checker instances for each plugin and file.
This class will be responsible for the following:
- Determining the parallelism of Flake8, e.g.:
* Do we use :mod:`multiprocessing` or is it unavailable?
* Do we automatically decide on the number of jobs to use or did the
user provide that?
- Falling back to a serial way of processing files if we run into an
OSError related to :mod:`multiprocessing`
- Organizing the results of each checker so we can group the output
together and make our output deterministic.
"""
def __init__(
self,
style_guide: StyleGuideManager,
plugins: Checkers,
argv: Sequence[str],
) -> None:
"""Initialize our Manager instance."""
self.style_guide = style_guide
self.options = style_guide.options
self.plugins = plugins
self.jobs = self._job_count()
self.statistics = {
"files": 0,
"logical lines": 0,
"physical lines": 0,
"tokens": 0,
}
self.exclude = (*self.options.exclude, *self.options.extend_exclude)
self.argv = argv
self.results: list[tuple[str, Results, dict[str, int]]] = []
def _process_statistics(self) -> None:
for _, _, statistics in self.results:
for statistic in defaults.STATISTIC_NAMES:
self.statistics[statistic] += statistics[statistic]
self.statistics["files"] += len(self.filenames)
def _job_count(self) -> int:
# First we walk through all of our error cases:
# - multiprocessing library is not present
# - the user provided stdin and that's not something we can handle
# well
# - the user provided some awful input
if utils.is_using_stdin(self.options.filenames):
LOG.warning(
"The --jobs option is not compatible with supplying "
"input using - . Ignoring --jobs arguments.",
)
return 0
jobs = self.options.jobs
# If the value is "auto", we want to let the multiprocessing library
# decide the number based on the number of CPUs. However, if that
# function is not implemented for this particular value of Python we
# default to 1
if jobs.is_auto:
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 0
# Otherwise, we know jobs should be an integer and we can just convert
# it to an integer
return jobs.n_jobs
def _handle_results(self, filename: str, results: Results) -> int:
style_guide = self.style_guide
reported_results_count = 0
for error_code, line_number, column, text, physical_line in results:
reported_results_count += style_guide.handle_error(
code=error_code,
filename=filename,
line_number=line_number,
column_number=column,
text=text,
physical_line=physical_line,
)
return reported_results_count
def report(self) -> tuple[int, int]:
"""Report all of the errors found in the managed file checkers.
This iterates over each of the checkers and reports the errors sorted
by line number.
:returns:
A tuple of the total results found and the results reported.
"""
results_reported = results_found = 0
self.results.sort(key=operator.itemgetter(0))
for filename, results, _ in self.results:
results.sort(key=operator.itemgetter(1, 2))
with self.style_guide.processing_file(filename):
results_reported += self._handle_results(filename, results)
results_found += len(results)
return (results_found, results_reported)
def run_parallel(self) -> None:
"""Run the checkers in parallel."""
with _mp_prefork(self.plugins, self.options):
pool = _try_initialize_processpool(self.jobs, self.argv)
if pool is None:
self.run_serial()
return
pool_closed = False
try:
self.results = list(pool.imap_unordered(_mp_run, self.filenames))
pool.close()
pool.join()
pool_closed = True
finally:
if not pool_closed:
pool.terminate()
pool.join()
def run_serial(self) -> None:
"""Run the checkers in serial."""
self.results = [
FileChecker(
filename=filename,
plugins=self.plugins,
options=self.options,
).run_checks()
for filename in self.filenames
]
def run(self) -> None:
"""Run all the checkers.
This will intelligently decide whether to run the checks in parallel
or whether to run them in serial.
If running the checks in parallel causes a problem (e.g.,
:issue:`117`) this also implements fallback to serial processing.
"""
try:
if self.jobs > 1 and len(self.filenames) > 1:
self.run_parallel()
else:
self.run_serial()
except KeyboardInterrupt:
LOG.warning("Flake8 was interrupted by the user")
raise exceptions.EarlyQuit("Early quit while running checks")
def start(self) -> None:
"""Start checking files.
:param paths:
Path names to check. This is passed directly to
:meth:`~Manager.make_checkers`.
"""
LOG.info("Making checkers")
self.filenames = tuple(
expand_paths(
paths=self.options.filenames,
stdin_display_name=self.options.stdin_display_name,
filename_patterns=self.options.filename,
exclude=self.exclude,
),
)
self.jobs = min(len(self.filenames), self.jobs)
def stop(self) -> None:
"""Stop checking files."""
self._process_statistics()
| Manager |
python | getsentry__sentry | src/sentry/relocation/models/relocationtransfer.py | {
"start": 1405,
"end": 1759
} | class ____(BaseRelocationTransfer):
__relocation_scope__ = RelocationScope.Excluded
# The public key of the region that is requesting
# the relocation.
public_key = models.BinaryField(null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_controlrelocationtransfer"
@region_silo_model
| ControlRelocationTransfer |
python | keras-team__keras | keras/src/metrics/iou_metrics_test.py | {
"start": 221,
"end": 3883
} | class ____(testing.TestCase):
def test_config(self):
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], name="iou_class_1_0"
)
self.assertEqual(obj.name, "iou_class_1_0")
self.assertEqual(obj.num_classes, 2)
self.assertEqual(obj.target_class_ids, [1, 0])
obj2 = metrics.IoU.from_config(obj.get_config())
self.assertEqual(obj2.name, "iou_class_1_0")
self.assertEqual(obj2.num_classes, 2)
self.assertEqual(obj2.target_class_ids, [1, 0])
def test_unweighted(self):
y_pred = [0, 1, 0, 1]
y_true = [0, 0, 1, 1]
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
result = obj(y_true, y_pred)
# cm = [[1, 1],
# [1, 1]]
# sum_row = [2, 2], sum_col = [2, 2], true_positives = [1, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (2 + 2 - 1) + 1 / (2 + 2 - 1)) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_weighted(self):
y_pred = np.array([0, 1, 0, 1], dtype=np.float32)
y_true = np.array([0, 0, 1, 1])
sample_weight = np.array([0.2, 0.3, 0.4, 0.1])
obj = metrics.IoU(
num_classes=2, target_class_ids=[1, 0], dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.1 / (0.4 + 0.5 - 0.1) + 0.2 / (0.6 + 0.5 - 0.2)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_multi_dim_input(self):
y_pred = np.array([[0, 1], [0, 1]], dtype=np.float32)
y_true = np.array([[0, 0], [1, 1]])
sample_weight = np.array([[0.2, 0.3], [0.4, 0.1]])
obj = metrics.IoU(
num_classes=2, target_class_ids=[0, 1], dtype="float32"
)
result = obj(y_true, y_pred, sample_weight=sample_weight)
# cm = [[0.2, 0.3],
# [0.4, 0.1]]
# sum_row = [0.6, 0.4], sum_col = [0.5, 0.5], true_positives = [0.2,
# 0.1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (
0.2 / (0.6 + 0.5 - 0.2) + 0.1 / (0.4 + 0.5 - 0.1)
) / 2
self.assertAllClose(result, expected_result, atol=1e-3)
def test_zero_valid_entries(self):
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
self.assertAllClose(obj.result(), 0, atol=1e-3)
def test_zero_and_non_zero_entries(self):
y_pred = np.array([1], dtype=np.float32)
y_true = np.array([1])
obj = metrics.IoU(num_classes=2, target_class_ids=[0, 1])
result = obj(y_true, y_pred)
# cm = [[0, 0],
# [0, 1]]
# sum_row = [0, 1], sum_col = [0, 1], true_positives = [0, 1]
# iou = true_positives / (sum_row + sum_col - true_positives))
expected_result = (1 / (1 + 1 - 1)) / 1
self.assertAllClose(result, expected_result, atol=1e-3)
@pytest.mark.requires_trainable_backend
def test_compilation(self):
m_obj = metrics.MeanIoU(num_classes=2, ignore_class=0)
model = models.Sequential(
[
layers.Dense(2, activation="softmax"),
]
)
model.compile(optimizer="rmsprop", loss="mse", metrics=[m_obj])
model.fit(np.array([[1.0, 1.0]]), np.array([[1.0, 0.0]]))
| IoUTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_s3_to_dynamodb.py | {
"start": 3570,
"end": 9556
} | class ____:
@mock.patch.object(DynamoDBHook, "get_waiter")
@mock.patch("botocore.client.BaseClient._make_api_call")
def test_s3_to_dynamodb_new_table_wait_for_completion(self, mock_make_api_call, mock_wait, new_table_op):
mock_make_api_call.return_value = SUCCESS_S3_RESPONSE
res = new_table_op.execute(None)
mock_make_api_call.assert_called_once_with("ImportTable", IMPORT_TABLE_RESPONSE)
mock_wait.assert_called_once_with("import_table")
mock_wait.return_value.wait.assert_called_once_with(
ImportArn="arn:aws:dynamodb:import", WaiterConfig={"Delay": 30, "MaxAttempts": 240}
)
assert res == "arn:aws:dynamodb:import"
@pytest.mark.parametrize(
"delete_on_error",
[
pytest.param(
True,
id="delete-on-error",
),
pytest.param(
False,
id="no-delete-on-error",
),
],
)
@mock.patch("airflow.providers.amazon.aws.transfers.s3_to_dynamodb.DynamoDBHook")
def test_s3_to_dynamodb_new_table_delete_on_error(self, mock_hook, new_table_op, delete_on_error):
mock_wait = mock.Mock()
mock_wait.side_effect = WaiterError(name="NetworkError", reason="unit test error", last_response={})
mock_hook.return_value.get_waiter.return_value.wait = mock_wait
new_table_op.delete_on_error = delete_on_error
mock_hook.return_value.get_import_status.return_value = "FAILED", "400", "General error"
with pytest.raises(AirflowException):
new_table_op.execute(None)
if delete_on_error:
mock_hook.return_value.client.delete_table.assert_called_once_with(TableName="test-table")
else:
mock_hook.return_value.client.delete_table.assert_not_called()
@mock.patch("botocore.client.BaseClient._make_api_call")
def test_s3_to_dynamodb_new_table_no_wait(self, mock_make_api_call):
mock_make_api_call.return_value = SUCCESS_S3_RESPONSE
op = S3ToDynamoDBOperator(
task_id=TASK_ID,
s3_key=S3_KEY_PREFIX,
s3_bucket=BUCKET,
dynamodb_table_name=DYNAMODB_TABLE_NAME,
dynamodb_attributes=DYNAMODB_ATTRIBUTES,
dynamodb_key_schema=DYNAMODB_KEY_SCHEMA,
aws_conn_id=S3_CONN_ID,
import_table_creation_kwargs={"ProvisionedThroughput": DYNAMODB_PROV_THROUGHPUT},
wait_for_completion=False,
)
res = op.execute(None)
mock_make_api_call.assert_called_once_with("ImportTable", IMPORT_TABLE_RESPONSE)
assert res == "arn:aws:dynamodb:import"
@mock.patch("botocore.client.BaseClient._make_api_call")
def test_s3_to_dynamodb_new_table_client_error(self, mock_make_api_call, new_table_op):
mock_make_api_call.side_effect = ClientError(
error_response={"Error": {"Message": "Error message", "Code": "GeneralException"}},
operation_name="UnitTest",
)
with pytest.raises(AirflowException) as excinfo:
new_table_op.execute(None)
assert "S3 load into DynamoDB table failed with error" in str(excinfo.value), (
"Exception message not passed correctly"
)
@mock.patch("botocore.client.BaseClient._make_api_call")
def test_s3_to_dynamodb_new_table_job_startup_error(self, mock_make_api_call, new_table_op):
mock_make_api_call.return_value = FAILURE_S3_RESPONSE
exp_err_msg = "S3 into Dynamodb job creation failed. Code: 300. Failure: invalid csv format"
with pytest.raises(AirflowException) as excinfo:
new_table_op.execute(None)
assert str(excinfo.value) == exp_err_msg, "Exception message not passed correctly"
@mock.patch(
"airflow.providers.amazon.aws.transfers.s3_to_dynamodb.S3ToDynamoDBOperator._load_into_new_table"
)
@mock.patch.object(DynamoDBHook, "get_conn")
def test_s3_to_dynamodb_existing_table(self, mock_get_conn, new_table_load_mock, exist_table_op):
response = [
{
"Items": [
{"Date": {"N": "54675846"}, "Message": {"S": "Message1"}, "_id": {"S": "1"}},
{"Date": {"N": "54675847"}, "Message": {"S": "Message2"}, "_id": {"S": "2"}},
{"Date": {"N": "54675857"}, "Message": {"S": "Message3"}, "_id": {"S": "4"}},
]
}
]
batch_writer_calls = [mock.call(Item=item) for item in response[0]["Items"]]
mock_paginator = mock.Mock()
mock_paginator.paginate.return_value = response
mock_conn = mock.MagicMock()
mock_client = mock.Mock()
mock_put_item = mock.Mock()
mock_client.get_paginator.return_value = mock_paginator
mock_conn.meta.client = mock_client
mock_conn.Table.return_value.batch_writer.return_value.__enter__.return_value.put_item = mock_put_item
mock_conn.Table.return_value.table_arn = "arn:aws:dynamodb"
mock_get_conn.return_value = mock_conn
res = exist_table_op.execute(None)
new_table_load_mock.assert_called_once_with(
table_name=exist_table_op.tmp_table_name, delete_on_error=False
)
mock_client.get_paginator.assert_called_once_with("scan")
mock_client.get_paginator.return_value.paginate.assert_called_once_with(
TableName=exist_table_op.tmp_table_name,
Select="ALL_ATTRIBUTES",
ReturnConsumedCapacity="NONE",
ConsistentRead=True,
)
mock_conn.Table.assert_called_with("test-table")
mock_conn.Table.return_value.batch_writer.assert_called_once_with(overwrite_by_pkeys=["attribute_a"])
mock_put_item.assert_has_calls(batch_writer_calls)
mock_client.delete_table.assert_called_once_with(TableName=exist_table_op.tmp_table_name)
assert res == "arn:aws:dynamodb"
| TestS3ToDynamoDBOperator |
python | aio-libs__aiohttp | examples/combined_middleware.py | {
"start": 917,
"end": 1604
} | class ____:
"""Middleware that logs request timing and response status."""
async def __call__(
self,
request: ClientRequest,
handler: ClientHandlerType,
) -> ClientResponse:
start_time = time.monotonic()
# Log request
_LOGGER.info("[REQUEST] %s %s", request.method, request.url)
# Execute request
response = await handler(request)
# Log response
duration = time.monotonic() - start_time
_LOGGER.info(
"[RESPONSE] %s in %.2fs - Status: %s",
request.url.path,
duration,
response.status,
)
return response
| LoggingMiddleware |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 195625,
"end": 195964
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "topic")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
topic = sgqlc.types.Field("Topic", graphql_name="topic")
| AcceptTopicSuggestionPayload |
python | tiangolo__fastapi | fastapi/_compat/v1.py | {
"start": 5230,
"end": 5280
} | class ____:
ref_template: str
| GenerateJsonSchema |
python | walkccc__LeetCode | solutions/290. Word Pattern/290.py | {
"start": 0,
"end": 157
} | class ____:
def wordPattern(self, pattern: str, str: str) -> bool:
t = str.split()
return [*map(pattern.index, pattern)] == [*map(t.index, t)]
| Solution |
python | pytorch__pytorch | torch/testing/_internal/custom_tensor.py | {
"start": 333,
"end": 2647
} | class ____(torch.Tensor):
@staticmethod
def __new__(cls, elem):
shape = elem.shape
kwargs = {}
kwargs["strides"] = elem.stride()
kwargs["storage_offset"] = elem.storage_offset()
kwargs["device"] = elem.device
kwargs["layout"] = elem.layout
kwargs["requires_grad"] = elem.requires_grad
kwargs["dtype"] = elem.dtype
return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
def __init__(self, elem):
self.elem = elem
self.constant_attribute = 4
def __repr__(self):
inner_repr = repr(self.elem)
return f"CustomTensor({inner_repr})"
def get_complicated_metadata(self):
return FancyNamedTuple(self.constant_attribute, self.constant_attribute)
def __tensor_flatten__(self):
return ["elem"], self.constant_attribute
def add_constant(self, a):
self.constant_attribute += a
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert meta is not None
elem = inner_tensors["elem"]
out = ConstantExtraMetadataTensor(elem)
out.constant_attribute = meta
return out
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
args_inner = pytree.tree_map_only(
ConstantExtraMetadataTensor, lambda x: x.elem, args
)
kwargs_inner = pytree.tree_map_only(
ConstantExtraMetadataTensor, lambda x: x.elem, kwargs
)
out_inner = func(*args_inner, **kwargs_inner)
out_inner_flat, spec = pytree.tree_flatten(out_inner)
# for aten ops that return non-tensors, just assume that
# our cust inner tensors return the same value
out_flat = [
ConstantExtraMetadataTensor(o_inner)
if isinstance(o_inner, torch.Tensor)
else o_inner
for o_inner in out_inner_flat
]
out = pytree.tree_unflatten(out_flat, spec)
return return_and_correct_aliasing(func, args, kwargs, out)
# A simple tensor subclass that always returns plain tensor during __torch_dispatch__
# It is similar to TwoTensor and is used to simulate torchao quantized tensors
| ConstantExtraMetadataTensor |
python | readthedocs__readthedocs.org | readthedocs/organizations/filters.py | {
"start": 569,
"end": 1470
} | class ____(ModelFilterSet):
"""
Organization base filter set.
Adds some methods that are used for organization related queries and common
base querysets for filter fields.
Note, the querysets here are also found in the organization base views and
mixin classes. These are redefined here instead of passing in the querysets
from the view.
:param organization: Organization instance for current view
"""
def __init__(self, *args, organization=None, **kwargs):
self.organization = organization
super().__init__(*args, **kwargs)
def get_organization_queryset(self):
return Organization.objects.for_user(user=self.request.user)
def get_team_queryset(self):
return Team.objects.member(
self.request.user,
organization=self.organization,
).select_related("organization")
| OrganizationFilterSet |
python | PrefectHQ__prefect | src/prefect/server/database/query_components.py | {
"start": 28106,
"end": 28606
} | class ____(sa.TypeDecorator[list[UUID]]):
"""Map a JSON list of strings back to a list of UUIDs at the result loading stage"""
impl: Union[TypeEngine[Any], type[TypeEngine[Any]]] = sa.JSON()
cache_ok: Optional[bool] = True
def process_result_value(
self, value: Optional[list[Union[str, UUID]]], dialect: sa.Dialect
) -> Optional[list[UUID]]:
if value is None:
return value
return [v if isinstance(v, UUID) else UUID(v) for v in value]
| UUIDList |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/hook.py | {
"start": 2064,
"end": 7580
} | class ____:
"""The ``context`` object available to a hook function on an DagsterEvent."""
def __init__(
self,
step_execution_context: StepExecutionContext,
hook_def: HookDefinition,
):
self._step_execution_context = step_execution_context
self._hook_def = check.inst_param(hook_def, "hook_def", HookDefinition)
self._required_resource_keys = hook_def.required_resource_keys
self._resources = step_execution_context.scoped_resources_builder.build(
self._required_resource_keys
)
@public
@property
def job_name(self) -> str:
"""The name of the job where this hook is being triggered."""
return self._step_execution_context.job_name
@public
@property
def run_id(self) -> str:
"""The id of the run where this hook is being triggered."""
return self._step_execution_context.run_id
@public
@property
def hook_def(self) -> HookDefinition:
"""The hook that the context object belongs to."""
return self._hook_def
@public
@property
def instance(self) -> "DagsterInstance":
"""The instance configured to run the current job."""
return self._step_execution_context.instance
@property
def op(self) -> Node:
"""The op instance associated with the hook."""
return self._step_execution_context.op
@public
@property
def step_key(self) -> str:
"""The key for the step where this hook is being triggered."""
return self._step_execution_context.step.key
@public
@property
def required_resource_keys(self) -> AbstractSet[str]:
"""Resources required by this hook."""
return self._required_resource_keys
@public
@property
def resources(self) -> "Resources":
"""Resources available in the hook context."""
return self._resources
@public
@property
def op_config(self) -> Any:
"""The parsed config specific to this op."""
op_config = self._step_execution_context.resolved_run_config.ops.get(
str(self._step_execution_context.step.node_handle)
)
return op_config.config if op_config else None
# Because of the fact that we directly use the log manager of the step, if a user calls
# hook_context.log.with_tags, then they will end up mutating the step's logging tags as well.
# This is not problematic because the hook only runs after the step has been completed.
@public
@property
def log(self) -> DagsterLogManager:
"""Centralized log dispatch from user code."""
return self._step_execution_context.log
@public
@property
def op_exception(self) -> Optional[BaseException]:
"""The thrown exception in a failed op."""
exc = self._step_execution_context.step_exception
if isinstance(exc, RetryRequestedFromPolicy):
return exc.__cause__
return exc
@public
@property
def op_output_values(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:
"""The computed output values.
Returns a dictionary where keys are output names and the values are:
* the output values in the normal case
* a dictionary from mapping key to corresponding value in the mapped case
"""
results: dict[str, Union[Any, dict[str, Any]]] = {}
captured = self._step_execution_context.step_output_capture
if captured is None:
check.failed("Outputs were unexpectedly not captured for hook")
# make the returned values more user-friendly
for step_output_handle, value in captured.items():
if step_output_handle.mapping_key:
if results.get(step_output_handle.output_name) is None:
results[step_output_handle.output_name] = {
step_output_handle.mapping_key: value
}
else:
results[step_output_handle.output_name][step_output_handle.mapping_key] = value
else:
results[step_output_handle.output_name] = value
return results
@public
@property
def op_output_metadata(self) -> Mapping[str, Union[Any, Mapping[str, Any]]]:
"""The applied output metadata.
Returns a dictionary where keys are output names and the values are:
* the applied output metadata in the normal case
* a dictionary from mapping key to corresponding metadata in the mapped case
"""
results: dict[str, Union[Any, dict[str, Any]]] = {}
captured = self._step_execution_context.step_output_metadata_capture
if captured is None:
check.failed("Outputs were unexpectedly not captured for hook")
# make the returned values more user-friendly
for step_output_handle, metadata in captured.items():
if step_output_handle.mapping_key:
if results.get(step_output_handle.output_name) is None:
results[step_output_handle.output_name] = {
step_output_handle.mapping_key: metadata
}
else:
results[step_output_handle.output_name][step_output_handle.mapping_key] = (
metadata
)
else:
results[step_output_handle.output_name] = metadata
return results
| HookContext |
python | google__jax | tests/pallas/tpu_splash_attention_kernel_test.py | {
"start": 2061,
"end": 2314
} | class ____:
def get_mask(self) -> mask_lib.Mask:
raise NotImplementedError()
def full_mask_strategy(
q_seq_len: int, kv_seq_len: int
) -> hps.SearchStrategy[Mask]:
return hps.just(FullMask(q_seq_len, kv_seq_len))
@dataclasses.dataclass
| Mask |
python | huggingface__transformers | src/transformers/models/vitpose/modeling_vitpose.py | {
"start": 1351,
"end": 2488
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Loss is not supported at this moment. See https://github.com/ViTAE-Transformer/ViTPose/tree/main/mmpose/models/losses for further detail.
heatmaps (`torch.FloatTensor` of shape `(batch_size, num_keypoints, height, width)`):
Heatmaps as predicted by the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
(also called feature maps) of the model at the output of each stage.
"""
loss: Optional[torch.FloatTensor] = None
heatmaps: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
| VitPoseEstimatorOutput |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 33636,
"end": 34189
} | class ____(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ("body",)
body: list[Node]
# make sure nobody creates custom nodes
def _failing_new(*args: t.Any, **kwargs: t.Any) -> "te.NoReturn":
raise TypeError("can't create custom node types")
NodeType.__new__ = staticmethod(_failing_new) # type: ignore
del _failing_new
| ScopedEvalContextModifier |
python | pandas-dev__pandas | pandas/tests/arrays/masked/test_indexing.py | {
"start": 110,
"end": 3198
} | class ____:
def _check_setitem_invalid(self, arr, invalid):
msg = f"Invalid value '{invalid!s}' for dtype '{arr.dtype}'"
msg = re.escape(msg)
with pytest.raises(TypeError, match=msg):
arr[0] = invalid
with pytest.raises(TypeError, match=msg):
arr[:] = invalid
with pytest.raises(TypeError, match=msg):
arr[[0]] = invalid
# FIXME: don't leave commented-out
# with pytest.raises(TypeError):
# arr[[0]] = [invalid]
# with pytest.raises(TypeError):
# arr[[0]] = np.array([invalid], dtype=object)
# Series non-coercion, behavior subject to change
ser = pd.Series(arr)
with pytest.raises(TypeError, match=msg):
ser[0] = invalid
# TODO: so, so many other variants of this...
_invalid_scalars = [
1 + 2j,
"True",
"1",
"1.0",
pd.NaT,
np.datetime64("NaT"),
np.timedelta64("NaT"),
]
@pytest.mark.parametrize(
"invalid", _invalid_scalars + [1, 1.0, np.int64(1), np.float64(1)]
)
def test_setitem_validation_scalar_bool(self, invalid):
arr = pd.array([True, False, None], dtype="boolean")
self._check_setitem_invalid(arr, invalid)
@pytest.mark.parametrize("invalid", _invalid_scalars + [True, 1.5, np.float64(1.5)])
def test_setitem_validation_scalar_int(self, invalid, any_int_ea_dtype):
arr = pd.array([1, 2, None], dtype=any_int_ea_dtype)
self._check_setitem_invalid(arr, invalid)
@pytest.mark.parametrize("invalid", _invalid_scalars + [True])
def test_setitem_validation_scalar_float(self, invalid, float_ea_dtype):
arr = pd.array([1, 2, None], dtype=float_ea_dtype)
self._check_setitem_invalid(arr, invalid)
@pytest.mark.parametrize(
"dtype",
[
"Float64",
pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
@pytest.mark.parametrize("indexer", [1, [1], [False, True, False]])
def test_setitem_nan_in_float64_array(dtype, indexer, using_nan_is_na):
arr = pd.array([0, pd.NA, 1], dtype=dtype)
arr[indexer] = np.nan
if not using_nan_is_na:
assert np.isnan(arr[1])
else:
assert arr[1] is pd.NA
@pytest.mark.parametrize(
"dtype",
[
"Int64",
pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
@pytest.mark.parametrize("indexer", [1, [1], [False, True, False]])
def test_setitem_nan_in_int64_array(dtype, indexer, using_nan_is_na):
arr = pd.array([0, 1, 2], dtype=dtype)
if not using_nan_is_na:
err = TypeError
msg = "Invalid value 'nan' for dtype 'Int64'"
if dtype == "int64[pyarrow]":
import pyarrow as pa
err = pa.lib.ArrowInvalid
msg = "Could not convert nan with type float"
with pytest.raises(err, match=msg):
arr[indexer] = np.nan
assert arr[1] == 1
else:
arr[indexer] = np.nan
assert arr[1] is pd.NA
| TestSetitemValidation |
python | pypa__pip | tests/unit/test_utils.py | {
"start": 10333,
"end": 11452
} | class ____:
def __init__(self, duration: int = 1) -> None:
self.succeed_after = time.time() + duration
def call(self, *args: Any, **kw: Any) -> None:
"""Fail with OSError self.max_fails times"""
if time.time() < self.succeed_after:
raise OSError("Failed")
def test_rmtree_retries(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test pip._internal.utils.rmtree will retry failures
"""
monkeypatch.setattr(shutil, "rmtree", Failer(duration=1).call)
rmtree("foo")
def test_rmtree_retries_for_3sec(monkeypatch: pytest.MonkeyPatch) -> None:
"""
Test pip._internal.utils.rmtree will retry failures for no more than 3 sec
"""
monkeypatch.setattr(shutil, "rmtree", Failer(duration=5).call)
with pytest.raises(OSError):
rmtree("foo")
if sys.byteorder == "little":
expected_byte_string = (
"b'\\xff\\xfe/\\x00p\\x00a\\x00t\\x00h\\x00/\\x00d\\x00\\xe9\\x00f\\x00'"
)
elif sys.byteorder == "big":
expected_byte_string = (
"b'\\xfe\\xff\\x00/\\x00p\\x00a\\x00t\\x00h\\x00/\\x00d\\x00\\xe9\\x00f'"
)
| Failer |
python | great-expectations__great_expectations | great_expectations/metrics/column/values_non_null.py | {
"start": 213,
"end": 283
} | class ____(MetricResult[ConditionValues]): ...
| ColumnValuesNonNullResult |
python | ray-project__ray | python/ray/tune/error.py | {
"start": 146,
"end": 259
} | class ____(TuneError):
"""Error that indicates a trial should not be retried."""
pass
| _AbortTrialExecution |
python | ray-project__ray | python/ray/data/_internal/datasource/numpy_datasource.py | {
"start": 283,
"end": 1251
} | class ____(FileBasedDatasource):
"""Numpy datasource, for reading and writing Numpy files."""
_COLUMN_NAME = "data"
_FILE_EXTENSIONS = ["npy"]
def __init__(
self,
paths: Union[str, List[str]],
numpy_load_args: Optional[Dict[str, Any]] = None,
**file_based_datasource_kwargs,
):
super().__init__(paths, **file_based_datasource_kwargs)
if numpy_load_args is None:
numpy_load_args = {}
self.numpy_load_args = numpy_load_args
def _read_stream(self, f: "pyarrow.NativeFile", path: str) -> Iterator[Block]:
# TODO(ekl) Ideally numpy can read directly from the file, but it
# seems like it requires the file to be seekable.
buf = BytesIO()
data = f.readall()
buf.write(data)
buf.seek(0)
yield BlockAccessor.batch_to_block(
{"data": np.load(buf, allow_pickle=True, **self.numpy_load_args)}
)
| NumpyDatasource |
python | spyder-ide__spyder | spyder/widgets/github/backend.py | {
"start": 3126,
"end": 10214
} | class ____(BaseBackend):
"""
This backend sends the crash report on a github issue tracker::
https://github.com/gh_owner/gh_repo
Usage::
github_backend = spyder.widgets.github.backend.GithubBackend(
'spyder-ide', 'spyder')
"""
def __init__(self, gh_owner, gh_repo, formatter=None, parent_widget=None):
"""
:param gh_owner: Name of the owner of the github repository.
:param gh_repo: Name of the repository on github.
"""
super().__init__(
formatter, "Submit on github",
"Submit the issue on our issue tracker on github", None,
parent_widget=parent_widget)
self.gh_owner = gh_owner
self.gh_repo = gh_repo
self._show_msgbox = True # False when running the test suite
def send_report(self, title, body, application_log=None):
logger.debug('sending bug report on github\ntitle=%s\nbody=%s',
title, body)
# Credentials
credentials = self.get_user_credentials()
token = credentials['token']
if token is None:
return False
logger.debug('got user credentials')
try:
auth = github.Auth.Token(token)
except Exception as exc:
logger.warning("Invalid token.")
if self._show_msgbox:
# Raise error so that SpyderErrorDialog can capture and
# redirect user to web interface.
raise exc
return False
gh = github.Github(auth=auth)
# upload log file as a gist
if application_log:
url = self.upload_log_file(gh, application_log)
body += '\nApplication log: %s' % url
try:
repo = gh.get_repo(f"{self.gh_owner}/{self.gh_repo}")
issue = repo.create_issue(title=title, body=body)
except github.BadCredentialsException as exc:
logger.warning('Failed to create issue on Github. '
'Status=%d: %s', exc.status, exc.data['message'])
if self._show_msgbox:
QMessageBox.warning(
self.parent_widget, _('Invalid credentials'),
_('Failed to create issue on Github, '
'invalid credentials...')
)
# Raise error so that SpyderErrorDialog can capture and
# redirect user to web interface.
raise exc
return False
except github.GithubException as exc:
logger.warning('Failed to create issue on Github. '
'Status=%d: %s', exc.status, exc.data['message'])
if self._show_msgbox:
QMessageBox.warning(
self.parent_widget,
_('Failed to create issue'),
_('Failed to create issue on Github. Status %d: %s') %
(exc.status, exc.data['message'])
)
# Raise error so that SpyderErrorDialog can capture and
# redirect user to web interface.
raise exc
return False
except Exception as exc:
logger.warning('Failed to create issue on Github.\n%s', exc)
if self._show_msgbox:
# Raise error so that SpyderErrorDialog can capture and
# redirect user to web interface.
raise exc
return False
else:
if self._show_msgbox:
ret = QMessageBox.question(
self.parent_widget, _('Issue created on Github'),
_('Issue successfully created. Would you like to open the '
'issue in your web browser?'))
if ret in [QMessageBox.Yes, QMessageBox.Ok]:
webbrowser.open(issue.html_url)
return True
def _get_credentials_from_settings(self):
"""Get the stored credentials if any."""
remember_token = CONF.get('main', 'report_error/remember_token')
return remember_token
def _store_token(self, token, remember=False):
"""Store token for future use."""
if token and remember:
try:
keyring.set_password('github', 'token', token)
except Exception:
if self._show_msgbox:
QMessageBox.warning(self.parent_widget,
_('Failed to store token'),
_('It was not possible to securely '
'save your token. You will be '
'prompted for your Github token '
'next time you want to report '
'an issue.'))
remember = False
CONF.set('main', 'report_error/remember_token', remember)
def get_user_credentials(self):
"""Get user credentials with the login dialog."""
token = None
remember_token = self._get_credentials_from_settings()
if remember_token:
# Get token from keyring
try:
token = keyring.get_password('github', 'token')
except Exception:
# No safe keyring backend
if self._show_msgbox:
QMessageBox.warning(self.parent_widget,
_('Failed to retrieve token'),
_('It was not possible to retrieve '
'your token. Please introduce it '
'again.'))
if not running_under_pytest():
credentials = DlgGitHubLogin.login(
self.parent_widget,
token,
remember_token)
if credentials['token']:
self._store_token(credentials['token'],
credentials['remember_token'])
CONF.set('main', 'report_error/remember_token',
credentials['remember_token'])
else:
return dict(token=token,
remember_token=remember_token)
return credentials
def upload_log_file(self, gh, log_content):
auth_user = gh.get_user()
try:
qApp = QApplication.instance()
qApp.setOverrideCursor(Qt.WaitCursor)
gist = auth_user.create_gist(
description="SpyderIDE log", public=True,
files={'SpyderIDE.log': github.InputFileContent(log_content)}
)
qApp.restoreOverrideCursor()
except github.GithubException as exc:
msg = (
'Failed to upload log report as a gist. Status '
f'{exc.status}: {exc.data["message"]}'
)
logger.warning(msg)
return msg
else:
return gist.html_url
| GithubBackend |
python | realpython__materials | build-a-blog-from-scratch-django/django-blog/blog/migrations/0001_initial.py | {
"start": 125,
"end": 1606
} | class ____(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(auto_now=True)),
('categories', models.ManyToManyField(related_name='posts', to='blog.category')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=60)),
('body', models.TextField()),
('created_on', models.DateTimeField(auto_now_add=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.post')),
],
),
]
| Migration |
python | boto__boto3 | boto3/docs/docstring.py | {
"start": 2213,
"end": 2360
} | class ____(LazyLoadedDocstring):
def _write_docstring(self, *args, **kwargs):
document_batch_action(*args, **kwargs)
| BatchActionDocstring |
python | huggingface__transformers | src/transformers/models/diffllama/modular_diffllama.py | {
"start": 19678,
"end": 19971
} | class ____(LlamaForTokenClassification):
pass
__all__ = [
"DiffLlamaPreTrainedModel",
"DiffLlamaModel",
"DiffLlamaForCausalLM",
"DiffLlamaForSequenceClassification",
"DiffLlamaForQuestionAnswering",
"DiffLlamaForTokenClassification",
]
| DiffLlamaForTokenClassification |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/type_api.py | {
"start": 2618,
"end": 2713
} | class ____(Protocol[_T_co]):
def __call__(self, value: Any) -> str: ...
| _LiteralProcessorType |
python | kamyu104__LeetCode-Solutions | Python/largest-component-size-by-common-factor.py | {
"start": 700,
"end": 1889
} | class ____(object):
def largestComponentSize(self, A):
"""
:type A: List[int]
:rtype: int
"""
def prime_factors(i): # prime factor decomposition
result = []
d = 2
if i%d == 0:
while i%d == 0:
i //= d
result.append(d)
d = 3
while d*d <= i:
if i%d == 0:
while i%d == 0:
i //= d
result.append(d)
d += 2
if i != 1:
result.append(i)
return result
union_find = UnionFind(len(A))
nodesWithCommonFactor = collections.defaultdict(int)
for i in xrange(len(A)):
for factor in prime_factors(A[i]):
if factor not in nodesWithCommonFactor:
nodesWithCommonFactor[factor] = i
union_find.union_set(nodesWithCommonFactor[factor], i)
return max(union_find.size)
# Time: O(f * n), f is the max number of unique prime factors
# Space: O(p + n), p is the total number of unique primes
import collections
| Solution |
python | astropy__astropy | astropy/utils/masked/tests/test_functions.py | {
"start": 16747,
"end": 19042
} | class ____(MaskedArraySetup):
def test_broadcast_to(self):
shape = self.ma.shape
ba = np.broadcast_to(self.mb, shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(
np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True),
)
assert_masked_equal(ba, expected)
def test_broadcast_to_using_apply(self):
# Partially just to ensure we cover the relevant part of _apply.
shape = self.ma.shape
ba = self.mb._apply(np.broadcast_to, shape=shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(
np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True),
)
assert_masked_equal(ba, expected)
def test_broadcast_arrays(self):
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=True)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=True)
bm = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c)
for mb_, b_, bm_ in zip(mb, b, bm):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, bm_)
def test_broadcast_arrays_not_all_masked(self):
mb = np.broadcast_arrays(self.a, self.mb, self.c, subok=True)
assert_array_equal(mb[0], self.a)
expected1 = np.broadcast_to(self.mb, self.a.shape, subok=True)
assert_masked_equal(mb[1], expected1)
expected2 = np.broadcast_to(self.c, self.a.shape, subok=True)
assert_array_equal(mb[2], expected2)
def test_broadcast_arrays_subok_false(self):
# subok affects ndarray subclasses but not masking itself.
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=False)
assert all(type(mb_.unmasked) is np.ndarray for mb_ in mb)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=False)
mask_b = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c, subok=False)
for mb_, b_, mask_ in zip(mb, b, mask_b):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, mask_)
| TestMaskedArrayBroadcast |
python | prabhupant__python-ds | data_structures/graphs/shortest_path_unweighted_graph.py | {
"start": 529,
"end": 1619
} | class ____:
def __init__(self, vertices):
self.vertices = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def bfs(self, s):
parent = [-1] * self.vertices
visited = [False] * self.vertices
visited[s] = True
queue = []
queue.append(s)
while queue:
s = queue.pop(0)
for i in self.graph[s]:
if visited[i] == False:
queue.append(i)
parent[i] = s
visited[i] = True
return parent
def shortest_path(self, source, dest):
parent = self.bfs(source)
while True:
print(dest, end=' ')
dest = parent[dest]
if dest == source:
break
g = Graph(8)
g.add_edge(0, 1)
g.add_edge(0, 3)
g.add_edge(1, 2)
g.add_edge(3, 4)
g.add_edge(3, 7)
g.add_edge(4, 5)
g.add_edge(4, 6)
g.add_edge(4, 7)
g.add_edge(5, 6)
g.add_edge(6, 7)
g.shortest_path(0, 7) | Graph |
python | django__django | tests/prefetch_related/models.py | {
"start": 6418,
"end": 6607
} | class ____(models.Model):
name = models.CharField(max_length=50)
house = models.ForeignKey(House, models.CASCADE, related_name="rooms")
class Meta:
ordering = ["id"]
| Room |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/unit_tests/test_components.py | {
"start": 8689,
"end": 20377
} | class ____:
@dataclass
class _FakeResponse:
chunks: List[bytes]
status: int = 200
def iter_content(self, chunk_size=1):
# Ignore chunk_size; we already control chunking via self.chunks
for c in self.chunks:
yield c
def raise_for_status(self):
if self.status >= 400:
raise Exception(f"HTTP {self.status}")
@staticmethod
def _decode_all(decoder: GoogleAdsStreamingDecoder, resp: "_FakeResponse"):
return {"results": [row for decoder_output in decoder.decode(resp) for row in decoder_output["results"]]}
def test_is_stream_response_true(self, decoder):
assert decoder.is_stream_response() is True
def test_emits_each_row_from_single_message(self, decoder):
"""One server message with two results; delivered as a single chunk."""
msg = [
{
"results": [
{"campaign": {"id": "1", "name": "A"}},
{"campaign": {"id": "2", "name": "B"}},
],
"fieldMask": "campaign.id,campaign.name",
}
]
raw = json.dumps(msg).encode("utf-8")
resp = self._FakeResponse(chunks=[raw])
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_handles_chunk_boundaries_inside_item_and_between_objects(self, decoder):
"""The object is split across arbitrary byte boundaries, including within strings."""
msg = [
{
"results": [
{"segments": {"date": "2025-10-01"}, "metrics": {"clicks": 1}},
{"segments": {"date": "2025-10-02"}, "metrics": {"clicks": 2}},
]
}
]
s = json.dumps(msg)
# Split deliberately in awkward places (inside keys/values)
chunks = [
s[:27].encode(), # '[{"results": [{"segments":'
s[27:48].encode(), # ' {"date": "2025-10-01"'
s[48:73].encode(), # '}, "metrics": {"clicks": '
s[73:74].encode(), # '1'
s[74:97].encode(), # '}}, {"segments": {"date"'
s[97:122].encode(), # ': "2025-10-02"}, "metric'
s[122:].encode(), # 's": {"clicks": 2}}]}]'
]
resp = self._FakeResponse(chunks=chunks)
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_braces_inside_strings_do_not_confuse_depth(self, decoder):
"""Braces and brackets inside string values must not break item boundary tracking."""
tricky_text = "note: has braces { like this } and [also arrays]"
msg = [
{
"results": [
{"ad": {"id": "42"}, "desc": tricky_text},
{"ad": {"id": "43"}, "desc": tricky_text},
],
"fieldMask": "ad.id,desc",
}
]
raw = json.dumps(msg).encode("utf-8")
# Split to ensure the string spans chunks
resp = self._FakeResponse(chunks=[raw[:40], raw[40:120], raw[120:]])
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_nested_objects_and_arrays_within_row(self, decoder):
"""A row containing nested dicts and arrays; ensures per-item depth is tracked correctly."""
row = {
"campaign": {"id": "9", "labels": [{"id": 1}, {"id": 2}]},
"metrics": {"conversions": [0.1, 0.2, 0.3]},
}
msg = [{"results": [row]}]
raw = json.dumps(msg).encode("utf-8")
# Split across array/object boundaries
resp = self._FakeResponse(chunks=[raw[:15], raw[15:35], raw[35:60], raw[60:]])
out = self._decode_all(decoder, resp)
assert out == msg[0]
def test_empty_results_array_yields_nothing(self, decoder):
msg = [{"results": []}]
raw = json.dumps(msg).encode("utf-8")
resp = self._FakeResponse(chunks=[raw])
out = self._decode_all(decoder, resp)
assert out == msg[0]
def test_brackets_inside_strings_are_ignored_for_item_boundaries(self, decoder):
"""
Ensure [square] and {curly} brackets that appear inside quoted strings
do NOT affect depth tracking and that rows are emitted correctly.
Also covers escaped quotes and backslashes inside the same strings,
with splits across chunk boundaries.
"""
tricky1 = r"line with [brackets] and {braces} and escaped quote \" and backslash \\"
tricky2 = r"second line with closing brackets ]} \} \] \" \\ and , commas"
msg = [
{
"results": [
{"ad": {"id": "101"}, "text": tricky1},
{"ad": {"id": "102"}, "text": tricky2},
],
"fieldMask": "ad.id,text",
}
]
raw = json.dumps(msg).encode("utf-8")
# Split deliberately so the tricky strings are cut across chunk boundaries
splits = [35, 70, 105, 160, 220, len(raw)]
chunks = []
start = 0
for end in splits:
chunks.append(raw[start:end])
start = end
resp = self._FakeResponse(chunks=chunks)
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_raises_on_unfinished_record_object(self, decoder):
"""
Stream ends while a record object is not fully closed → must raise.
Example: '[{"results":[{"a":1}, {"b":2' (missing closing } ] })
"""
truncated = b'[{"results":[{"a":1}, {"b":2'
resp = self._FakeResponse(chunks=[truncated])
with pytest.raises(AirbyteTracedException):
# Force full consumption to reach EOF and trigger the strict check
_ = list(decoder.decode(resp))
def test_raises_on_unfinished_top_level_after_last_item_closed(self, decoder):
"""
Even if the last item '}' closed cleanly, if the enclosing array/object
isn't closed at EOF, we still raise.
Example: '{"results":[{"x":1}]' (missing final '}' )
"""
raw = b'[{"results":[{"x":1}]'
resp = self._FakeResponse(chunks=[raw])
with pytest.raises(AirbyteTracedException):
_ = list(decoder.decode(resp))
def test_compact_json_no_spaces(self, decoder):
msg = [{"results": [{"a": {"b": 1}}, {"a": {"b": 2}}]}]
raw = json.dumps(msg, separators=(",", ":")).encode("utf-8")
resp = self._FakeResponse(chunks=[raw])
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_pretty_printed_json_with_indent_and_newlines(self, decoder):
msg = [{"results": [{"x": [1, 2, 3], "y": {"z": "ok"}}, {"x": [], "y": {"z": "still ok"}}]}]
raw = json.dumps(msg, indent=2).encode("utf-8")
resp = self._FakeResponse(chunks=[raw[:10], raw[10:40], raw[40:100], raw[100:]])
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_whitespace_and_tabs_between_tokens(self, decoder):
msg = [{"results": [{"x": 1}, {"x": 2}]}]
s = json.dumps(msg)
noisy = s.replace("{", "{ \t\n").replace(":", " : \t").replace(",", " ,\n ").replace("}", " \n}")
resp = self._FakeResponse(chunks=[noisy.encode("utf-8")])
out = self._decode_all(decoder, resp)
assert out == {"results": msg[0]["results"]}
def test_midstream_chunked_encoding_error_propagates(self, decoder):
"""
A network break should surface as ChunkedEncodingError (not swallowed).
Some records may already have been yielded before the error.
"""
msg = [{"results": [{"i": 1}, {"i": 2}, {"i": 3}, {"i": 4}]}]
raw = json.dumps(msg).encode("utf-8")
splits = [len(raw) // 4, len(raw) // 2, 3 * len(raw) // 4, len(raw)]
chunks = [raw[: splits[0]], raw[splits[0] : splits[1]], raw[splits[1] : splits[2]], raw[splits[2] :]]
@dataclass
class _ErroringResponse:
parts: List[bytes]
raise_after_index: int
def iter_content(self, chunk_size=1):
for idx, p in enumerate(self.parts):
yield p
if idx == self.raise_after_index:
raise ChunkedEncodingError("simulated midstream break")
def raise_for_status(self):
pass
resp = _ErroringResponse(parts=chunks, raise_after_index=2)
with pytest.raises(ChunkedEncodingError):
_ = list(decoder.decode(resp))
def test_stream_consumed_error_propagates_immediately(self, decoder):
@dataclass
class _AlreadyConsumedResponse:
def iter_content(self, chunk_size=1):
raise StreamConsumedError("already consumed")
def raise_for_status(self):
pass
with pytest.raises(StreamConsumedError):
_ = list(decoder.decode(_AlreadyConsumedResponse()))
@dataclass
class _BodyResponse:
"""Respects chunk_size by slicing the body bytes."""
body: bytes
status: int = 200
def iter_content(self, chunk_size=1):
for i in range(0, len(self.body), chunk_size):
yield self.body[i : i + chunk_size]
def raise_for_status(self):
if self.status >= 400:
raise Exception(f"HTTP {self.status}")
def test_fast_path_under_threshold_uses_json_loads(self):
"""Body size == max_direct_decode_bytes - 1 - fast path is taken."""
decoder = GoogleAdsStreamingDecoder()
decoder.chunk_size = 1024
decoder.max_direct_decode_bytes = 5 * 1024
base = [{"results": [{"x": 1}]}]
raw = json.dumps(base, separators=(",", ":")).encode()
# pad to reach exactly the threshold
pad_len = decoder.max_direct_decode_bytes - len(raw)
base[0]["results"][0]["pad"] = "x" * (pad_len - 10)
raw = json.dumps(base, separators=(",", ":")).encode()
assert len(raw) == decoder.max_direct_decode_bytes - 1
resp = self._BodyResponse(raw)
with patch.object(decoder, "_parse_records_from_stream", wraps=decoder._parse_records_from_stream) as mock_stream:
outputs = list(decoder.decode(resp))
results = [row for batch in outputs for row in batch["results"]]
assert results == base[0]["results"]
mock_stream.assert_not_called()
def test_exact_threshold_forces_streaming(self):
"""Body size == max_direct_decode_bytes - fast path is taken."""
decoder = GoogleAdsStreamingDecoder(chunk_size=1024, max_direct_decode_bytes=5 * 1024)
base = [{"results": [{"x": 1}]}]
raw = json.dumps(base, separators=(",", ":")).encode()
# pad to reach exactly the threshold
pad_len = decoder.max_direct_decode_bytes - len(raw)
base[0]["results"][0]["pad"] = "x" * (pad_len - 9)
raw = json.dumps(base, separators=(",", ":")).encode()
assert len(raw) == decoder.max_direct_decode_bytes
resp = self._BodyResponse(raw)
with patch.object(decoder, "_parse_records_from_stream", wraps=decoder._parse_records_from_stream) as mock_stream:
outputs = list(decoder.decode(resp))
results = [row for batch in outputs for row in batch["results"]]
assert results == base[0]["results"]
mock_stream.assert_called_once()
| TestGoogleAdsStreamingDecoder |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 89049,
"end": 89262
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
_AutoModelForVision2Seq = auto_class_update(_AutoModelForVision2Seq, head_doc="vision-to-text modeling")
| _AutoModelForVision2Seq |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 14468,
"end": 15305
} | class ____(nn.Module):
def __init__(
self,
in_channel: int,
out_channel: int,
kernel_size: tuple[int],
stride: tuple[int],
):
super().__init__()
padding_sizes = [one_kernel - one_stride for one_kernel, one_stride in zip(kernel_size[1:], stride[1:])]
self.padding = ()
for pad_size in padding_sizes[::-1]:
self.padding += (pad_size // 2 + pad_size % 2, pad_size // 2)
self.padding += (2, 0)
self.conv = nn.Conv3d(
in_channel,
out_channel,
kernel_size,
stride=stride,
)
def forward(self, hidden_states: torch.Tensor):
hidden_states = F.pad(hidden_states, self.padding)
hidden_states = self.conv(hidden_states)
return hidden_states
| Emu3VQVAEConv3d |
python | tiangolo__fastapi | docs_src/dependencies/tutorial011_an.py | {
"start": 96,
"end": 554
} | class ____:
def __init__(self, fixed_content: str):
self.fixed_content = fixed_content
def __call__(self, q: str = ""):
if q:
return self.fixed_content in q
return False
checker = FixedContentQueryChecker("bar")
@app.get("/query-checker/")
async def read_query_check(fixed_content_included: Annotated[bool, Depends(checker)]):
return {"fixed_content_in_query": fixed_content_included}
| FixedContentQueryChecker |
python | pdm-project__pdm | src/pdm/models/venv.py | {
"start": 877,
"end": 2642
} | class ____:
root: Path
is_conda: bool
interpreter: Path
@classmethod
def get(cls, root: Path) -> VirtualEnv | None:
path = get_venv_python(root)
if not path.exists():
return None
return cls(root, is_conda_venv(root), path)
@classmethod
def from_interpreter(cls, interpreter: Path) -> VirtualEnv | None:
root, is_conda = get_venv_like_prefix(interpreter)
if root is not None:
return cls(root, is_conda, interpreter)
return None
def env_vars(self) -> dict[str, str]:
key = "CONDA_PREFIX" if self.is_conda else "VIRTUAL_ENV"
return {key: str(self.root)}
@cached_property
def venv_config(self) -> dict[str, str]:
venv_cfg = self.root / "pyvenv.cfg"
if not venv_cfg.exists():
return {}
parsed: dict[str, str] = {}
with venv_cfg.open(encoding="utf-8") as fp:
for line in fp:
if "=" in line:
k, v = line.split("=", 1)
k = k.strip().lower()
v = v.strip()
if k == "include-system-site-packages":
v = v.lower()
parsed[k] = v
return parsed
@property
def include_system_site_packages(self) -> bool:
return self.venv_config.get("include-system-site-packages") == "true"
@cached_property
def base_paths(self) -> list[str]:
home = Path(self.venv_config["home"])
base_executable = find_python_in_path(home) or find_python_in_path(home.parent)
assert base_executable is not None
paths = get_sys_config_paths(str(base_executable))
return [paths["purelib"], paths["platlib"]]
| VirtualEnv |
python | pypa__warehouse | dev/flake8/checkers.py | {
"start": 6513,
"end": 9160
} | class ____:
def __init__(self, tree: ast.AST, filename: str) -> None:
self.tree = tree
self.filename = filename
def run(self) -> Generator[tuple[int, int, str, type[Any]]]:
visitor = WarehouseVisitor(self.filename)
visitor.visit(self.tree)
for e in visitor.errors:
yield *e, type(self)
# Testing
def test_wh003_renderer_template_not_found():
# Simulate a Python file with a @view_config decorator and a non-existent template
code = dedent(
"""
from pyramid.view import view_config
@view_config(renderer="non_existent_template.html")
def my_view(request):
pass
"""
)
tree = ast.parse(code)
visitor = WarehouseVisitor(filename="test_file.py")
visitor.visit(tree)
# Assert that the WH003 error is raised
assert len(visitor.errors) == 1
assert visitor.errors[0][2] == WH003_msg
def test_wh003_renderer_template_in_package_path():
code = dedent(
"""
from pyramid.view import view_config
@view_config(renderer="warehouse.admin:templates/admin/dashboard.html")
def my_view(request):
pass
"""
)
tree = ast.parse(code)
visitor = WarehouseVisitor(filename="test_file.py")
visitor.visit(tree)
# Assert that no WH003 error is raised
assert len(visitor.errors) == 0
def test_wh004_metrics_tags_invalid_types():
# Test case: Invalid tag types (should error)
code = dedent(
"""
metrics.increment("counter", tags="string")
request.metrics.gauge("gauge", tags=("tuple",))
metrics.histogram("hist", tags={"dict": "value"})
"""
)
tree = ast.parse(code)
visitor = WarehouseVisitor(filename="test_file.py")
visitor.visit(tree)
# Assert that all 3 errors are raised
assert len(visitor.errors) == 3
assert all(error[2] == WH004_msg for error in visitor.errors)
def test_wh004_metrics_tags_valid_types():
# Test case: Valid tag types (should not error)
code = dedent(
"""
metrics.increment("counter", tags=["tag1", "tag2"])
request.metrics.gauge("gauge", tags=None)
tag_list = ["tag1"]
metrics.histogram("hist", tags=tag_list)
"""
)
tree = ast.parse(code)
visitor = WarehouseVisitor(filename="test_file.py")
visitor.visit(tree)
# Assert that no errors are raised
assert len(visitor.errors) == 0
if __name__ == "__main__":
test_wh003_renderer_template_not_found()
test_wh003_renderer_template_in_package_path()
test_wh004_metrics_tags_invalid_types()
test_wh004_metrics_tags_valid_types()
print("All tests passed!")
| WarehouseCheck |
python | pytorch__pytorch | torch/_inductor/codegen/cpp_wrapper_gpu.py | {
"start": 1458,
"end": 16865
} | class ____:
"""
When using cpp wrapper, GPU kernel load and launch needs to wait for Triton kernels
to be tuned and stored as cubin files, so use a deferred generating the final wrapper around
the triton kernel until right before the prefix is written.
"""
wrapper_name: str
kernel_name: str
kernel_name_to_body: dict[str, str]
arg_types: list[Any]
def generate(self, wrapper: CppWrapperGpu):
"""
Generate the GPU kernel definition, as well as load and launch code.
"""
prefix = wrapper.prefix
if self.kernel_name.startswith("multi_kernel_"):
# MultiKernel will select one kernel after running the autotune block
self.kernel_name = MultiKernelCall.lookup_choice(self.kernel_name)
params = CudaKernelParamCache.get(self.kernel_name)
assert params, f"CudaKernelParamCache not populated for {self.kernel_name}"
def_args = params["def_args"]
arg_types = self.arg_types
inductor_meta = params["inductor_meta"]
if "extra_launcher_args" in inductor_meta and len(def_args) > len(arg_types):
# extra_launcher_args should already be in def_args
assert len(def_args) == len(arg_types) - len(
inductor_meta["extra_launcher_args"]
)
arg_types = arg_types + [SymbolicCallArg] * len(
inductor_meta["extra_launcher_args"]
)
if not V.graph.aot_mode:
prefix.writeline(
maybe_hipify_code_wrapper(
f"static {wrapper.device_codegen.cpp_kernel_type()} {self.kernel_name} = nullptr;"
)
)
kernel_var_name = self.kernel_name
else:
kernel_var_name = f"kernels_.{self.kernel_name}"
# tensors can be RAIIAtenTensorHandle or ConstantHandle, so make them template types
template_types = [
f"typename {name}_type_"
for name, arg_type in zip(def_args, arg_types)
if isinstance(arg_type, (torch_dtype, UnwrapUnspecArg))
]
if V.graph.aot_mode:
template_types.append("typename kernels_type_")
if template_types:
prefix.writeline(f"template <{', '.join(template_types)}>")
prefix.writeline(f"static inline void {self.wrapper_name}(")
with prefix.indent():
assert len(def_args) == len(arg_types), (def_args, arg_types)
for name, arg_type in zip(def_args, arg_types):
if isinstance(arg_type, (torch_dtype, UnwrapUnspecArg)):
prefix.writeline(f"const {name}_type_& {name},")
elif issubclass(arg_type, (SymbolicCallArg, sympy.Expr, int)):
prefix.writeline(f"int64_t {name},")
elif arg_type is float:
prefix.writeline(f"float {name},")
elif arg_type is bool:
prefix.writeline(f"bool {name},")
else:
raise ValueError(f"Unexpected arg type {arg_type}")
prefix.writeline("int32_t device_idx_,")
prefix.writeline(
maybe_hipify_code_wrapper(
f"{wrapper.device_codegen.cpp_stream_type()} stream_,"
)
)
if V.graph.aot_mode:
prefix.writeline("kernels_type_& kernels_,")
prefix.writeline(
"const std::optional<std::string>& cubin_dir_ = std::nullopt"
)
prefix.writeline("){")
with prefix.indent():
if V.graph.aot_mode:
# Emit the original Triton kernel for debugging purposes
prefix.writeline("/*")
prefix.splice(self.kernel_name_to_body[self.kernel_name])
prefix.writeline("*/")
self.generate_grid(prefix, inductor_meta, params)
self.generate_load_kernel(prefix, kernel_var_name, params)
self.generate_launch_kernel(prefix, wrapper, kernel_var_name, params)
prefix.writeline("}")
if not config.aot_inductor.embed_kernel_binary:
# Ensure the cubin file is included in the package
V.graph.wrapper_code.additional_files.append(
params[get_cpp_wrapper_cubin_path_name()]
)
def generate_grid(
self,
prefix: IndentedBuffer,
inductor_meta: dict[str, Any],
params: dict[str, Any],
):
from ..runtime.triton_heuristics import GridExpr
grid = GridExpr.from_meta(inductor_meta, params["config"], mode="cpp")
for line in grid.prefix:
prefix.writeline(line)
prefix.splice(
f"""\
uint32_t grid_0 = {grid.x_grid};
uint32_t grid_1 = {grid.y_grid};
uint32_t grid_2 = {grid.z_grid};
"""
)
prefix.writeline("if (grid_0 == 0 || grid_1 == 0 || grid_2 == 0) return;")
def generate_load_kernel(self, prefix, kernel_var_name, params):
prefix.writeline(f"if ({kernel_var_name} == nullptr) {{")
with prefix.indent():
embed_kernel_args = [f"__{params['inductor_meta']['kernel_name']}_start"]
if torch.xpu.is_available():
# XPU needs the end address of the kernel to calculate the size of the kernel binary.
embed_kernel_args.append(
f"__{params['inductor_meta']['kernel_name']}_end"
)
load_kernel_args = (
[
*embed_kernel_args,
cpp_string_literal(params["mangled_name"]),
str(params["shared_mem"]),
]
if V.graph.aot_mode and config.aot_inductor.embed_kernel_binary
else [
cpp_string_literal(params[get_cpp_wrapper_cubin_path_name()]),
cpp_string_literal(params["mangled_name"]),
str(params["shared_mem"]),
"cubin_dir_",
]
)
prefix.writeline(
f"{kernel_var_name} = loadKernel({', '.join(load_kernel_args)}); "
)
prefix.writeline("}")
def generate_launch_kernel(self, prefix, wrapper, kernel_var_name, params):
"""
Generate the GPU kernel launching code.
This is where all the call args being sorted out and generated.
If enable_kernel_profile is enabled, all args related information would be packed in this function.
"""
triton_meta = params["triton_meta"]
assert len(self.arg_types) == len(params["def_args"]), (
self.arg_types,
params["def_args"],
)
arg_type_loookup = dict(zip(params["def_args"], self.arg_types))
# difference between Python and C++ wrapper: C++ wrapper strips out equal_to_1 constants
call_args = [
name for name in params["call_args"] if name not in triton_meta["constants"]
]
arg_types = [arg_type_loookup[name] for name in call_args]
arg_signatures = [triton_meta["signature"][name] for name in call_args]
scratch_spaces = {
name: params[name]
for name in ["global_scratch", "profile_scratch"]
if params.get(name, None) is not None
}
call_args_str = wrapper.generate_args_decl(
prefix,
call_args,
arg_types,
arg_signatures,
scratch_spaces=scratch_spaces,
)
prefix.writeline(f"void* kernel_args_[] = {{{call_args_str}}};")
launch_kernel_args = [
kernel_var_name,
"grid_0",
"grid_1",
"grid_2",
str(params["num_warps"]),
str(params["shared_mem"]),
"kernel_args_",
"stream_",
]
if wrapper.device == "xpu":
launch_kernel_args.append(str(params["threads_per_warp"]))
enable_kernel_profile = config.cpp.enable_kernel_profile and sys.platform in [
"linux",
"win32",
]
if enable_kernel_profile:
normalized_kernel_name = re.sub(r"[^a-zA-Z0-9_]", "_", f"{kernel_var_name}")
prefix.writeline("{")
with prefix.indent():
prefix.writelines(
[
f"std::unordered_map<std::string, C10IValueHandle> kwargs_{normalized_kernel_name};",
"",
]
)
# Add launch args info
record_launch_kernel_args = [
("grid_0", "grid_0"),
("grid_1", "grid_1"),
("grid_2", "grid_2"),
("num_warps", str(params["num_warps"])),
("shared_mem", str(params["shared_mem"])),
]
for k, v in record_launch_kernel_args:
arg_name = f"{normalized_kernel_name}_{k}"
prefix.writelines(
[
f"// Create c10::IValue for {k}",
f"C10IValueHandle tmp_{arg_name};",
f"aoti_torch_int64_to_ivalue({v}, &tmp_{arg_name});",
f"RAIIC10IValueHandle RAII_{arg_name}(tmp_{arg_name});",
f'kwargs_{normalized_kernel_name}.emplace("{k}", RAII_{arg_name});',
]
)
# Add input info (This copies the logic from args_decl)
signature2dtype = {
"i32": "int32_t",
"i64": "int64_t",
"fp32": "float",
}
def signature_is_tma_desc(sig):
if not sig:
return False
if sig == "nvTmaDesc":
return True
if sig.startswith("tensordesc<"):
return True
return False
curr_arg_id = -1
total_args = []
ordered_argsname = []
def write_dummy_scalar_ivalue(arg_name):
# We only care about the shape, therefore we create a dummy scalar here.
prefix.writelines(
[
f"// Create c10::IValue for arg_{curr_arg_id}",
f"C10IValueHandle tmp_{arg_name};",
f"aoti_torch_int64_to_ivalue(0, &tmp_{arg_name});",
f"RAIIC10IValueHandle RAII_{arg_name}(tmp_{arg_name});",
]
)
# pyrefly: ignore [bad-argument-type]
total_args.append(f"tmp_{arg_name}")
def process_args_for_input_shape(arg, arg_type, arg_signature=None):
nonlocal curr_arg_id
curr_arg_id += 1
arg_name = f"{normalized_kernel_name}_arg_{curr_arg_id}"
# ignore tma descriptors, as host-side TMA descriptors need
# to be passed to the compiled Triton kernel by value
if isinstance(
arg_type, UnwrapUnspecArg
) and not signature_is_tma_desc(arg_signature):
write_dummy_scalar_ivalue(arg_name)
elif isinstance(
arg_type, torch_dtype
) and not signature_is_tma_desc(arg_signature):
# This is an at::Tensor.
prefix.writelines(
[
f"// Create c10::IValue for arg_{curr_arg_id}",
f"C10IValueHandle tmp_{arg_name};",
f"aoti_torch_tensor_to_ivalue({arg}, &tmp_{arg_name});",
f"RAIIC10IValueHandle RAII_{arg_name}(tmp_{arg_name});",
]
)
# pyrefly: ignore [bad-argument-type]
total_args.append(f"tmp_{arg_name}")
elif (
isinstance(arg_type, type(SymbolicCallArg))
and arg_signature is not None
and arg_signature in signature2dtype
) or arg_type in (sympy.Integer, int, sympy.Float, float):
write_dummy_scalar_ivalue(arg_name)
elif arg_signature and arg_signature.startswith("tensordesc<"):
# Skip tma related args
pass
else:
write_dummy_scalar_ivalue(arg_name)
# Add input name and shape information
for arg, arg_type, arg_signature in zip_longest(
call_args, arg_types, arg_signatures
):
# pyrefly: ignore [bad-argument-type]
ordered_argsname.append(f'"{arg}"')
process_args_for_input_shape(arg, arg_type, arg_signature)
# Add input name into kwargs
name_var = f"{normalized_kernel_name}_input_names"
prefix.writelines(
[
"// Create c10::IValue for input names",
f"C10IValueHandle tmp_{name_var};",
f"std::vector<const char*> {name_var}({{{', '.join(ordered_argsname)}}});",
f"aoti_torch_strlist_to_ivalue({name_var}.data(), {len(ordered_argsname)}, &tmp_{name_var});",
f"RAIIC10IValueHandle RAII_{name_var}(tmp_{name_var});",
f'kwargs_{normalized_kernel_name}.emplace("Input Args", RAII_{name_var});',
]
)
inputs_info_ = f"{normalized_kernel_name}_inputs_info_"
# We pass in the non-RAII handles, since C10 doesn't automatically free them.
# The RAII will make sure they get freed when they are out of scope.
tmp_args = ",".join(total_args)
prefix.writelines(
[
"// Aggregate all c10::IValue for inputs",
f"std::vector<C10IValueHandle> {inputs_info_}({{{tmp_args}}});",
]
)
# Start recording Function
prefix.writelines(
[
"",
(
"torch::aot_inductor::RAIIAtenRecordFunctionHandle "
f"record_{normalized_kernel_name}_"
f'("{kernel_var_name}", '
f"reinterpret_cast<IValueMapHandle>(&kwargs_{normalized_kernel_name}), "
f"{inputs_info_});"
),
"",
f"launchKernel({', '.join(launch_kernel_args)});",
]
)
prefix.writeline("}")
else:
prefix.writeline(f"launchKernel({', '.join(launch_kernel_args)});")
| DeferredTritonCallWrapper |
python | getsentry__sentry | tests/sentry/uptime/consumers/test_eap_converter.py | {
"start": 13484,
"end": 18503
} | class ____(SentryTestCase):
def _create_base_result(self, **overrides):
"""Create a base CheckResult for testing."""
base = {
"guid": "test-guid-123",
"subscription_id": "sub-456",
"status": "success",
"status_reason": None,
"trace_id": "trace-789",
"span_id": "span-789",
"scheduled_check_time_ms": 1609459200000,
"actual_check_time_ms": 1609459205000,
"duration_ms": 150,
"request_info": None,
"region": "us-east-1",
}
base.update(overrides)
return base
def test_convert_redirect_chain_denormalized(self) -> None:
result = self._create_base_result(
duration_ms=250,
request_info_list=[
{
"url": "https://example.com",
"request_type": "GET",
"http_status_code": 301,
"request_duration_us": 100000,
},
{
"url": "https://www.example.com",
"request_type": "GET",
"http_status_code": 200,
"request_duration_us": 150000,
},
],
)
trace_items = convert_uptime_result_to_trace_items(
self.project, result, IncidentStatus.NO_INCIDENT
)
assert len(trace_items) == 2
for trace_item in trace_items:
assert trace_item.item_type == TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT
assert trace_item.client_sample_rate == 1.0
assert trace_item.server_sample_rate == 1.0
attributes = trace_item.attributes
assert attributes["guid"].string_value == "test-guid-123"
assert attributes["check_status"].string_value == "success"
assert attributes["region"].string_value == "us-east-1"
assert attributes["check_duration_us"].int_value == 250000
first_item = trace_items[0]
assert first_item.attributes["request_sequence"].int_value == 0
assert first_item.attributes["request_url"].string_value == "https://example.com"
assert first_item.attributes["http_status_code"].int_value == 301
assert first_item.attributes["request_duration_us"].int_value == 100000
second_item = trace_items[1]
assert second_item.attributes["request_sequence"].int_value == 1
assert second_item.attributes["request_url"].string_value == "https://www.example.com"
assert second_item.attributes["http_status_code"].int_value == 200
assert second_item.attributes["request_duration_us"].int_value == 150000
def test_convert_legacy_request_info_denormalized(self) -> None:
result = self._create_base_result(
region="us-west-2",
request_info={
"url": "https://example.com",
"request_type": "GET",
"http_status_code": 200,
"request_duration_us": 150000,
},
)
trace_items = convert_uptime_result_to_trace_items(
self.project, result, IncidentStatus.NO_INCIDENT
)
assert len(trace_items) == 1
trace_item = trace_items[0]
assert trace_item.client_sample_rate == 1.0
assert trace_item.server_sample_rate == 1.0
attributes = trace_item.attributes
assert attributes["check_status"].string_value == "success"
assert attributes["region"].string_value == "us-west-2"
assert attributes["request_sequence"].int_value == 0
assert attributes["request_url"].string_value == "https://example.com"
def test_convert_with_no_requests(self) -> None:
"""Test conversion when there are no requests (e.g., missed_window status)."""
result = self._create_base_result(
status="missed_window",
status_reason={"type": "missed_window", "description": "Check was not executed"},
) # Has request_info=None and no request_info_list
trace_items = convert_uptime_result_to_trace_items(
self.project, result, IncidentStatus.NO_INCIDENT
)
# Should return one item with check-level metadata but no request-specific data
assert len(trace_items) == 1
trace_item = trace_items[0]
attributes = trace_item.attributes
# Check-level metadata should be present
assert attributes["guid"].string_value == "test-guid-123"
assert attributes["subscription_id"].string_value == "sub-456"
assert attributes["check_status"].string_value == "missed_window"
assert attributes["status_reason_type"].string_value == "missed_window"
# Request-specific attributes should NOT be present
assert "request_type" not in attributes
assert "http_status_code" not in attributes
assert "request_url" not in attributes
| TestFullDenormalizedConversion |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/group_by_window_test.py | {
"start": 14695,
"end": 15733
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, components):
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(-1)
dataset = dataset.group_by_window(
key_func=lambda x: x % 3,
reduce_func=lambda _, xs: xs.batch(4),
window_size=4)
return dataset
@combinations.generate(test_base.default_test_combinations())
def test(self):
components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0],
dtype=np.int64)
self.verify_unused_iterator(
lambda: self._build_dataset(components),
num_outputs=12,
verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_dataset(components),
num_outputs=12,
verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_dataset(components),
num_outputs=12,
verify_exhausted=False)
| GroupByWindowCheckpointTest |
python | doocs__leetcode | solution/1400-1499/1460.Make Two Arrays Equal by Reversing Subarrays/Solution.py | {
"start": 0,
"end": 130
} | class ____:
def canBeEqual(self, target: List[int], arr: List[int]) -> bool:
return sorted(target) == sorted(arr)
| Solution |
python | walkccc__LeetCode | solutions/1373. Maximum Sum BST in Binary Tree/1373.py | {
"start": 161,
"end": 808
} | class ____:
def maxSumBST(self, root: TreeNode | None) -> int:
self.ans = 0
def traverse(root: TreeNode | None) -> T:
if not root:
return T(True, -math.inf, math.inf, 0)
left: T = traverse(root.left)
right: T = traverse(root.right)
if not left.isBST or not right.isBST:
return T()
if root.val <= left.mx or root.val >= right.mn:
return T()
# The `root` is a valid BST.
summ = root.val + left.summ + right.summ
self.ans = max(self.ans, summ)
return T(True, max(root.val, right.mx), min(root.val, left.mn), summ)
traverse(root)
return self.ans
| Solution |
python | doocs__leetcode | solution/0100-0199/0128.Longest Consecutive Sequence/Solution2.py | {
"start": 0,
"end": 303
} | class ____:
def longestConsecutive(self, nums: List[int]) -> int:
s = set(nums)
ans = 0
for x in s:
if x - 1 not in s:
y = x + 1
while y in s:
y += 1
ans = max(ans, y - x)
return ans
| Solution |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 73413,
"end": 73514
} | class ____(BaseModel, extra="forbid"):
neg: "Expression" = Field(..., description="")
| NegExpression |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 8586,
"end": 11926
} | class ____(Base):
def equivalent_params(self): # [useless-parent-delegation]
return super(UselessSuper, self).equivalent_params()
def equivalent_params_1(self, first): # [useless-parent-delegation]
return super(UselessSuper, self).equivalent_params_1(first)
def equivalent_params_2(self, *args): # [useless-parent-delegation]
return super(UselessSuper, self).equivalent_params_2(*args)
def equivalent_params_3(self, *args, **kwargs): # [useless-parent-delegation]
return super(UselessSuper, self).equivalent_params_3(*args, **kwargs)
def equivalent_params_4(self, first): # [useless-parent-delegation]
super(UselessSuper, self).equivalent_params_4(first)
def equivalent_params_5(self, first, *args): # [useless-parent-delegation]
super(UselessSuper, self).equivalent_params_5(first, *args)
def equivalent_params_6(self, first, *args, **kwargs): # [useless-parent-delegation]
return super(UselessSuper, self).equivalent_params_6(first, *args, **kwargs)
def with_default_argument(self, first, default_arg="default"): # [useless-parent-delegation]
# useless because the default value here is the same as in the base class
return super(UselessSuper, self).with_default_argument(first, default_arg)
def without_default_argument(self, first, second): # [useless-parent-delegation]
return super(UselessSuper, self).without_default_argument(first, second)
def with_default_argument_none(self, first, default_arg=None): # [useless-parent-delegation]
# useless because the default value here is the same as in the base class
super(UselessSuper, self).with_default_argument_none(first, default_arg)
def with_default_argument_int(self, first, default_arg=42): # [useless-parent-delegation]
super(UselessSuper, self).with_default_argument_int(first, default_arg)
def with_default_argument_tuple(self, first, default_arg=()): # [useless-parent-delegation]
super(UselessSuper, self).with_default_argument_tuple(first, default_arg)
def with_default_argument_dict(self, first, default_arg={}): # [useless-parent-delegation]
super(UselessSuper, self).with_default_argument_dict(first, default_arg)
def with_default_argument_var(self, first, default_arg=default_var): # [useless-parent-delegation]
super(UselessSuper, self).with_default_argument_var(first, default_arg)
def __init__(self): # [useless-parent-delegation]
super(UselessSuper, self).__init__()
def with_default_arg(self, first, default_arg="only_in_super_base"): # [useless-parent-delegation]
super(UselessSuper, self).with_default_arg(first, default_arg)
def with_default_arg_bis(self, first, default_arg="only_in_super_base"): # [useless-parent-delegation]
super(UselessSuper, self).with_default_arg_bis(first, default_arg)
def with_default_arg_ter(self, first, default_arg="has_been_changed"): # [useless-parent-delegation]
super(UselessSuper, self).with_default_arg_ter(first, default_arg)
def with_default_arg_quad(self, first, default_arg="has_been_changed"): # [useless-parent-delegation]
super(UselessSuper, self).with_default_arg_quad(first, default_arg)
def trigger_something(value_to_trigger):
pass
| UselessSuper |
python | bokeh__bokeh | src/bokeh/models/dom.py | {
"start": 2909,
"end": 3087
} | class ____(DOMElement):
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| Span |
python | graphql-python__graphene | examples/starwars_relay/schema.py | {
"start": 1816,
"end": 1955
} | class ____(graphene.ObjectType):
introduce_ship = IntroduceShip.Field()
schema = graphene.Schema(query=Query, mutation=Mutation)
| Mutation |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/system/utils/test_helpers.py | {
"start": 1665,
"end": 4845
} | class ____:
FETCH_VARIABLE_TEST_CASES = [
# Format is:
# (Environment Variable value, Fetched SSM value, Provided Default value, Expected Result)
(ENV_VALUE, SSM_VALUE, DEFAULT_VALUE, ENV_VALUE),
(ENV_VALUE, SSM_VALUE, None, ENV_VALUE),
(ENV_VALUE, None, DEFAULT_VALUE, ENV_VALUE),
(ENV_VALUE, None, None, ENV_VALUE),
(None, SSM_VALUE, DEFAULT_VALUE, SSM_VALUE),
(None, SSM_VALUE, None, SSM_VALUE),
(None, None, DEFAULT_VALUE, DEFAULT_VALUE),
# For the (None, None, None ) test case, see: test_fetch_variable_no_value_found_raises_exception
]
@pytest.mark.parametrize(
("env_value", "ssm_value", "default_value", "expected_result"), FETCH_VARIABLE_TEST_CASES
)
@patch.object(os, "getenv")
def test_fetch_variable_success(
self, mock_getenv, env_value, ssm_value, default_value, expected_result
) -> None:
mock_getenv.return_value = env_value or ssm_value
utils._fetch_from_ssm.cache_clear()
result = (
utils.fetch_variable(key="some_key", test_name=TEST_NAME, default_value=default_value)
if default_value
else utils.fetch_variable(key=ANY_STR, test_name=TEST_NAME)
)
utils._fetch_from_ssm.cache_clear()
assert result == expected_result
def test_fetch_variable_no_value_found_raises_exception(self):
# This would be the (None, None, None) test case from above.
with pytest.raises(ValueError, match=NO_VALUE_MSG.format(key=ANY_STR)):
utils.fetch_variable(key=ANY_STR, test_name=TEST_NAME)
ENV_ID_TEST_CASES = [
# Happy Cases
("ABCD", True),
("AbCd", True),
("abcd", True),
("ab12", True),
# Failure Cases
# Must be alphanumeric
("not_alphanumeric", False),
# Can not be empty
("", False),
# Must start with a letter
("1234", False),
("12ab", False),
("12AB", False),
("12Ab", False),
]
@pytest.mark.parametrize(("env_id", "is_valid"), ENV_ID_TEST_CASES)
def test_validate_env_id_success(self, env_id, is_valid):
if is_valid:
captured_output = StringIO()
sys.stdout = captured_output
result = _validate_env_id(env_id)
sys.stdout = sys.__stdout__
assert result == env_id.lower()
assert result.isalnum()
if not result == env_id:
assert LOWERCASE_ENV_ID_MSG in captured_output.getvalue()
else:
with pytest.raises(ValueError, match=INVALID_ENV_ID_MSG):
_validate_env_id(env_id)
def test_set_env_id_generates_if_required(self):
# No environment variable nor SSM value has been found
result = set_env_id(TEST_NAME)
assert len(result) == DEFAULT_ENV_ID_LEN + len(DEFAULT_ENV_ID_PREFIX)
assert result.isalnum()
assert result.islower()
def test_set_env_id_exports_environment_variable(self):
env_id = set_env_id(TEST_NAME)
assert os.environ[ENV_ID_ENVIRON_KEY] == env_id
| TestAmazonSystemTestHelpers |
python | kamyu104__LeetCode-Solutions | Python/sum-of-remoteness-of-all-cells.py | {
"start": 57,
"end": 1105
} | class ____(object):
def sumRemoteness(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
def bfs(i, j):
total, cnt = grid[i][j], 1
grid[i][j] = -1
q = [(i, j)]
while q:
new_q = []
for i, j in q:
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
if not (0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and grid[ni][nj] != -1):
continue
total += grid[ni][nj]
cnt += 1
grid[ni][nj] = -1
new_q.append((ni, nj))
q = new_q
return total, cnt
groups = [bfs(i, j) for i in xrange(len(grid)) for j in xrange(len(grid[0])) if grid[i][j] != -1]
total = sum(t for t, _ in groups)
return sum((total-t)*c for t, c in groups)
| Solution |
python | django__django | tests/model_fields/test_decimalfield.py | {
"start": 260,
"end": 5768
} | class ____(TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
# to_python() converts floats and honors max_digits.
self.assertEqual(f.to_python(3.1415926535897), Decimal("3.142"))
self.assertEqual(f.to_python(2.4), Decimal("2.400"))
# Uses default rounding of ROUND_HALF_EVEN.
self.assertEqual(f.to_python(2.0625), Decimal("2.062"))
self.assertEqual(f.to_python(2.1875), Decimal("2.188"))
def test_invalid_value(self):
field = models.DecimalField(max_digits=4, decimal_places=2)
msg = "“%s” value must be a decimal number."
tests = [
(),
[],
{},
set(),
object(),
complex(),
"non-numeric string",
b"non-numeric byte-string",
]
for value in tests:
with self.subTest(value):
with self.assertRaisesMessage(ValidationError, msg % (value,)):
field.clean(value, None)
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_get_prep_value(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertIsNone(f.get_prep_value(None))
self.assertEqual(f.get_prep_value("2.4"), Decimal("2.4"))
def test_get_db_prep_value(self):
"""
DecimalField.get_db_prep_value() must call
DatabaseOperations.adapt_decimalfield_value().
"""
f = models.DecimalField(max_digits=5, decimal_places=1)
# None of the built-in database backends implement
# adapt_decimalfield_value(), so this must be confirmed with mocking.
with mock.patch.object(
connection.ops.__class__, "adapt_decimalfield_value"
) as adapt_decimalfield_value:
f.get_db_prep_value("2.4", connection)
adapt_decimalfield_value.assert_called_with(Decimal("2.4"), 5, 1)
def test_filter_with_strings(self):
"""
Should be able to filter decimal fields using strings (#8023).
"""
foo = Foo.objects.create(a="abc", d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d="12.34")), [foo])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_save_nan_invalid(self):
msg = "“nan” value must be a decimal number."
for value in [float("nan"), math.nan, "nan"]:
with self.subTest(value), self.assertRaisesMessage(ValidationError, msg):
BigD.objects.create(d=value)
def test_save_inf_invalid(self):
msg = "“inf” value must be a decimal number."
for value in [float("inf"), math.inf, "inf"]:
with self.subTest(value), self.assertRaisesMessage(ValidationError, msg):
BigD.objects.create(d=value)
msg = "“-inf” value must be a decimal number."
for value in [float("-inf"), -math.inf, "-inf"]:
with self.subTest(value), self.assertRaisesMessage(ValidationError, msg):
BigD.objects.create(d=value)
def test_fetch_from_db_without_float_rounding(self):
big_decimal = BigD.objects.create(d=Decimal(".100000000000000000000000000005"))
big_decimal.refresh_from_db()
self.assertEqual(big_decimal.d, Decimal(".100000000000000000000000000005"))
def test_lookup_really_big_value(self):
"""
Really big values can be used in a filter statement.
"""
# This should not crash.
self.assertSequenceEqual(Foo.objects.filter(d__gte=100000000000), [])
def test_lookup_decimal_larger_than_max_digits(self):
self.assertSequenceEqual(Foo.objects.filter(d__lte=Decimal("123456")), [])
def test_max_digits_validation(self):
field = models.DecimalField(max_digits=2)
expected_message = validators.DecimalValidator.messages["max_digits"] % {
"max": 2
}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(100, None)
def test_max_decimal_places_validation(self):
field = models.DecimalField(decimal_places=1)
expected_message = validators.DecimalValidator.messages[
"max_decimal_places"
] % {"max": 1}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal("0.99"), None)
def test_max_whole_digits_validation(self):
field = models.DecimalField(max_digits=3, decimal_places=1)
expected_message = validators.DecimalValidator.messages["max_whole_digits"] % {
"max": 2
}
with self.assertRaisesMessage(ValidationError, expected_message):
field.clean(Decimal("999"), None)
def test_roundtrip_with_trailing_zeros(self):
"""Trailing zeros in the fractional part aren't truncated."""
obj = Foo.objects.create(a="bar", d=Decimal("8.320"))
obj.refresh_from_db()
self.assertEqual(obj.d.compare_total(Decimal("8.320")), Decimal("0"))
| DecimalFieldTests |
python | astropy__astropy | astropy/extern/configobj/validate.py | {
"start": 12525,
"end": 12926
} | class ____(ValidateError):
"""The value supplied was of the correct type, but was not an allowed value."""
def __init__(self, value):
"""
>>> raise VdtValueError('jedi')
Traceback (most recent call last):
VdtValueError: the value "jedi" is unacceptable.
"""
ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
| VdtValueError |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module2.py | {
"start": 264,
"end": 402
} | class ____:
def __init__(self, x: int) -> None:
self.x = x
c = C(5)
c.new_attr: int = 10
__annotations__ = {}
@no_type_check
| C |
python | astropy__astropy | astropy/coordinates/tests/test_representation_arithmetic.py | {
"start": 31767,
"end": 36984
} | class ____:
def _setup(self, omit_coslat):
if omit_coslat:
self.USD_cls = UnitSphericalCosLatDifferential
else:
self.USD_cls = UnitSphericalDifferential
s = UnitSphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle, lat=[0.0, -30.0, 85.0] * u.deg
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors(omit_coslat=omit_coslat)
def test_name_coslat(self, omit_coslat):
self._setup(omit_coslat)
if omit_coslat:
assert self.USD_cls is UnitSphericalCosLatDifferential
assert self.USD_cls.name == "unitsphericalcoslat"
else:
assert self.USD_cls is UnitSphericalDifferential
assert self.USD_cls.name == "unitspherical"
assert self.USD_cls.name in DIFFERENTIAL_CLASSES
def test_simple_differentials(self, omit_coslat):
self._setup(omit_coslat)
s, e, sf = self.s, self.e, self.sf
o_lon = self.USD_cls(1.0 * u.arcsec, 0.0 * u.arcsec)
o_lonc = o_lon.to_cartesian(base=s)
o_lon2 = self.USD_cls.from_cartesian(o_lonc, base=s)
assert_differential_allclose(o_lon, o_lon2)
# simple check by hand for first element
# (lat[0]=0, so works for both normal and CosLat differential)
assert_quantity_allclose(
o_lonc[0].xyz, [0.0, np.pi / 180.0 / 3600.0, 0.0] * u.one
)
# check all using unit vectors and scale factors.
s_lon = s + 1.0 * u.arcsec * sf["lon"] * e["lon"]
assert type(s_lon) is SphericalRepresentation
assert_representation_allclose(o_lonc, s_lon - s, atol=1e-10 * u.one)
s_lon2 = s + o_lon
assert_representation_allclose(s_lon2, s_lon, atol=1e-10 * u.one)
o_lat = self.USD_cls(0.0 * u.arcsec, 1.0 * u.arcsec)
o_latc = o_lat.to_cartesian(base=s)
assert_quantity_allclose(
o_latc[0].xyz,
[0.0, 0.0, np.pi / 180.0 / 3600.0] * u.one,
atol=1e-10 * u.one,
)
s_lat = s + 1.0 * u.arcsec * sf["lat"] * e["lat"]
assert type(s_lat) is SphericalRepresentation
assert_representation_allclose(o_latc, s_lat - s, atol=1e-10 * u.one)
s_lat2 = s + o_lat
assert_representation_allclose(s_lat2, s_lat, atol=1e-10 * u.one)
def test_differential_arithmetic(self, omit_coslat):
self._setup(omit_coslat)
s = self.s
o_lon = self.USD_cls(1.0 * u.arcsec, 0.0 * u.arcsec)
o_lon_by_2 = o_lon / 2.0
assert type(o_lon_by_2) is self.USD_cls
assert_representation_allclose(
o_lon_by_2.to_cartesian(s) * 2.0, o_lon.to_cartesian(s), atol=1e-10 * u.one
)
s_lon = s + o_lon
s_lon2 = s + 2 * o_lon_by_2
assert type(s_lon) is SphericalRepresentation
assert_representation_allclose(s_lon, s_lon2, atol=1e-10 * u.one)
o_lon_rec = o_lon_by_2 + o_lon_by_2
assert type(o_lon_rec) is self.USD_cls
assert representation_equal(o_lon, o_lon_rec)
assert_representation_allclose(s + o_lon, s + o_lon_rec, atol=1e-10 * u.one)
o_lon_0 = o_lon - o_lon
assert type(o_lon_0) is self.USD_cls
for c in o_lon_0.components:
assert np.all(getattr(o_lon_0, c) == 0.0)
o_lon2 = self.USD_cls(1.0 * u.mas / u.yr, 0.0 * u.mas / u.yr)
kks = u.km / u.kpc / u.s
assert_quantity_allclose(o_lon2.norm(s)[0], 4.74047 * kks, atol=1e-4 * kks)
assert_representation_allclose(
o_lon2.to_cartesian(s) * 1000.0 * u.yr,
o_lon.to_cartesian(s),
atol=1e-10 * u.one,
)
s_off = s + o_lon
s_off2 = s + o_lon2 * 1000.0 * u.yr
assert_representation_allclose(s_off, s_off2, atol=1e-10 * u.one)
factor = 1e5 * u.radian / u.arcsec
if not omit_coslat:
factor = factor / np.cos(s.lat)
s_off_big = s + o_lon * factor
assert_representation_allclose(
s_off_big,
SphericalRepresentation(s.lon + 90.0 * u.deg, 0.0 * u.deg, 1e5),
atol=5.0 * u.one,
)
o_lon3c = CartesianRepresentation(0.0, 4.74047, 0.0, unit=kks)
# This looses information!!
o_lon3 = self.USD_cls.from_cartesian(o_lon3c, base=s)
expected0 = self.USD_cls(1.0 * u.mas / u.yr, 0.0 * u.mas / u.yr)
assert_differential_allclose(o_lon3[0], expected0)
# Part of motion kept.
part_kept = s.cross(CartesianRepresentation(0, 1, 0, unit=u.one)).norm()
assert_quantity_allclose(
o_lon3.norm(s), 4.74047 * part_kept * kks, atol=1e-10 * kks
)
# (lat[0]=0, so works for both normal and CosLat differential)
s_off_big2 = s + o_lon3 * 1e5 * u.yr * u.radian / u.mas
expected0 = SphericalRepresentation(90.0 * u.deg, 0.0 * u.deg, 1e5 * u.one)
assert_representation_allclose(s_off_big2[0], expected0, atol=5.0 * u.one)
def test_differential_init_errors(self, omit_coslat):
self._setup(omit_coslat)
with pytest.raises(u.UnitsError):
self.USD_cls(0.0 * u.deg, 10.0 * u.deg / u.yr)
| TestUnitSphericalDifferential |
python | numpy__numpy | numpy/polynomial/tests/test_printing.py | {
"start": 289,
"end": 3778
} | class ____:
@pytest.fixture(scope='class', autouse=True)
def use_unicode(self):
poly.set_default_printstyle('unicode')
@pytest.mark.parametrize(('inp', 'tgt'), (
([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"),
([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"),
(arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + "
"6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + "
"11.0·x¹¹")),
))
def test_polynomial_str(self, inp, tgt):
p = poly.Polynomial(inp)
res = str(p)
assert_equal(res, tgt)
@pytest.mark.parametrize(('inp', 'tgt'), (
([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"),
([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"),
(arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + "
"5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + "
"9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")),
))
def test_chebyshev_str(self, inp, tgt):
res = str(poly.Chebyshev(inp))
assert_equal(res, tgt)
@pytest.mark.parametrize(('inp', 'tgt'), (
([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"),
([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"),
(arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + "
"5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + "
"9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")),
))
def test_legendre_str(self, inp, tgt):
res = str(poly.Legendre(inp))
assert_equal(res, tgt)
@pytest.mark.parametrize(('inp', 'tgt'), (
([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"),
([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"),
(arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + "
"5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + "
"9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")),
))
def test_hermite_str(self, inp, tgt):
res = str(poly.Hermite(inp))
assert_equal(res, tgt)
@pytest.mark.parametrize(('inp', 'tgt'), (
([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"),
([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"),
(arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + "
"4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + "
"8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n"
"11.0·He₁₁(x)")),
))
def test_hermiteE_str(self, inp, tgt):
res = str(poly.HermiteE(inp))
assert_equal(res, tgt)
@pytest.mark.parametrize(('inp', 'tgt'), (
([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"),
([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"),
(arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + "
"5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + "
"9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")),
))
def test_laguerre_str(self, inp, tgt):
res = str(poly.Laguerre(inp))
assert_equal(res, tgt)
def test_polynomial_str_domains(self):
res = str(poly.Polynomial([0, 1]))
tgt = '0.0 + 1.0·x'
assert_equal(res, tgt)
res = str(poly.Polynomial([0, 1], domain=[1, 2]))
tgt = '0.0 + 1.0·(-3.0 + 2.0x)'
assert_equal(res, tgt)
| TestStrUnicodeSuperSubscripts |
python | pytest-dev__pytest-asyncio | tests/markers/test_class_scope.py | {
"start": 164,
"end": 9888
} | class ____:
pytestmark = pytest.mark.asyncio
async def test_is_asyncio(self, sample_fixture):
assert asyncio.get_event_loop()
counter = 1
async def inc():
nonlocal counter
counter += 1
await asyncio.sleep(0)
await asyncio.ensure_future(inc())
assert counter == 2
@pytest.fixture
def sample_fixture():
return None
def test_asyncio_mark_provides_class_scoped_loop_when_applied_to_functions(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
class TestClassScopedLoop:
loop: asyncio.AbstractEventLoop
@pytest.mark.asyncio(loop_scope="class")
async def test_remember_loop(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
@pytest.mark.asyncio(loop_scope="class")
async def test_this_runs_in_same_loop(self):
assert asyncio.get_running_loop() is TestClassScopedLoop.loop
"""
)
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(passed=2)
def test_asyncio_mark_provides_class_scoped_loop_when_applied_to_class(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
@pytest.mark.asyncio(loop_scope="class")
class TestClassScopedLoop:
loop: asyncio.AbstractEventLoop
async def test_remember_loop(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
async def test_this_runs_in_same_loop(self):
assert asyncio.get_running_loop() is TestClassScopedLoop.loop
"""
)
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(passed=2)
def test_asyncio_mark_is_inherited_to_subclasses(pytester: pytest.Pytester):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
@pytest.mark.asyncio(loop_scope="class")
class TestSuperClassWithMark:
pass
class TestWithoutMark(TestSuperClassWithMark):
loop: asyncio.AbstractEventLoop
async def test_remember_loop(self):
TestWithoutMark.loop = asyncio.get_running_loop()
async def test_this_runs_in_same_loop(self):
assert asyncio.get_running_loop() is TestWithoutMark.loop
"""
)
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(passed=2)
def test_asyncio_mark_respects_the_loop_policy(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
class CustomEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
pass
class TestUsesCustomEventLoop:
@pytest.fixture(scope="class")
def event_loop_policy(self):
return CustomEventLoopPolicy()
@pytest.mark.asyncio
async def test_uses_custom_event_loop_policy(self):
assert isinstance(
asyncio.get_event_loop_policy(),
CustomEventLoopPolicy,
)
@pytest.mark.asyncio
async def test_does_not_use_custom_event_loop_policy():
assert not isinstance(
asyncio.get_event_loop_policy(),
CustomEventLoopPolicy,
)
"""
)
)
pytest_args = ["--asyncio-mode=strict"]
if sys.version_info >= (3, 14):
pytest_args.extend(["-W", "default"])
result = pytester.runpytest(*pytest_args)
if sys.version_info >= (3, 14):
result.assert_outcomes(passed=2, warnings=3)
result.stdout.fnmatch_lines("*DefaultEventLoopPolicy*")
else:
result.assert_outcomes(passed=2)
def test_asyncio_mark_respects_parametrized_loop_policies(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
@pytest.fixture(
scope="class",
params=[
asyncio.DefaultEventLoopPolicy(),
asyncio.DefaultEventLoopPolicy(),
]
)
def event_loop_policy(request):
return request.param
@pytest.mark.asyncio(loop_scope="class")
class TestWithDifferentLoopPolicies:
async def test_parametrized_loop(self, request):
pass
"""
)
)
pytest_args = ["--asyncio-mode=strict"]
if sys.version_info >= (3, 14):
pytest_args.extend(["-W", "default"])
result = pytester.runpytest(*pytest_args)
if sys.version_info >= (3, 14):
result.assert_outcomes(passed=2, warnings=2)
result.stdout.fnmatch_lines("*DefaultEventLoopPolicy*")
else:
result.assert_outcomes(passed=2)
def test_asyncio_mark_provides_class_scoped_loop_to_fixtures(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
import pytest_asyncio
@pytest.mark.asyncio(loop_scope="class")
class TestClassScopedLoop:
loop: asyncio.AbstractEventLoop
@pytest_asyncio.fixture
async def my_fixture(self):
TestClassScopedLoop.loop = asyncio.get_running_loop()
@pytest.mark.asyncio
async def test_runs_is_same_loop_as_fixture(self, my_fixture):
assert asyncio.get_running_loop() is TestClassScopedLoop.loop
"""
)
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(passed=1)
def test_asyncio_mark_allows_combining_class_scoped_fixture_with_function_scoped_test(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import asyncio
import pytest
import pytest_asyncio
loop: asyncio.AbstractEventLoop
class TestMixedScopes:
@pytest_asyncio.fixture(loop_scope="class", scope="class")
async def async_fixture(self):
global loop
loop = asyncio.get_running_loop()
@pytest.mark.asyncio(loop_scope="function")
async def test_runs_in_different_loop_as_fixture(self, async_fixture):
global loop
assert asyncio.get_running_loop() is not loop
"""
),
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(passed=1)
def test_asyncio_mark_handles_missing_event_loop_triggered_by_fixture(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import pytest
import asyncio
class TestClass:
@pytest.fixture(scope="class")
def sets_event_loop_to_none(self):
# asyncio.run() creates a new event loop without closing the
# existing one. For any test, but the first one, this leads to
# a ResourceWarning when the discarded loop is destroyed by the
# garbage collector. We close the current loop to avoid this.
try:
asyncio.get_event_loop().close()
except RuntimeError:
pass
return asyncio.run(asyncio.sleep(0))
# asyncio.run() sets the current event loop to None when finished
@pytest.mark.asyncio(loop_scope="class")
# parametrization may impact fixture ordering
@pytest.mark.parametrize("n", (0, 1))
async def test_does_not_fail(self, sets_event_loop_to_none, n):
pass
"""
)
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(passed=2)
def test_standalone_test_does_not_trigger_warning_about_no_current_event_loop_being_set(
pytester: pytest.Pytester,
):
pytester.makeini("[pytest]\nasyncio_default_fixture_loop_scope = function")
pytester.makepyfile(
dedent(
"""\
import pytest
@pytest.mark.asyncio(loop_scope="class")
class TestClass:
async def test_anything(self):
pass
"""
)
)
result = pytester.runpytest("--asyncio-mode=strict")
result.assert_outcomes(warnings=0, passed=1)
| TestPyTestMark |
python | ray-project__ray | python/ray/_private/runtime_env/nsight.py | {
"start": 1263,
"end": 5263
} | class ____(RuntimeEnvPlugin):
name = "_nsight"
def __init__(self, resources_dir: str):
self.nsight_cmd = []
# replace this with better way to get logs dir
session_dir, runtime_dir = os.path.split(resources_dir)
self._nsight_dir = Path(session_dir) / "logs" / "nsight"
try_to_create_directory(self._nsight_dir)
async def _check_nsight_script(
self, nsight_config: Dict[str, str]
) -> Tuple[bool, str]:
"""
Function to validate if nsight_config is a valid nsight profile options
Args:
nsight_config: dictionary mapping nsight option to it's value
Returns:
a tuple consists of a boolean indicating if the nsight_config
is valid option and an error message if the nsight_config is invalid
"""
# use empty as nsight report test filename
nsight_config_copy = copy.deepcopy(nsight_config)
nsight_config_copy["o"] = str(Path(self._nsight_dir) / "empty")
nsight_cmd = parse_nsight_config(nsight_config_copy)
try:
nsight_cmd = nsight_cmd + [sys.executable, "-c", '""']
process = await asyncio.create_subprocess_exec(
*nsight_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = await process.communicate()
error_msg = stderr.strip() if stderr.strip() != "" else stdout.strip()
# cleanup test.nsys-rep file
clean_up_cmd = ["rm", f"{nsight_config_copy['o']}.nsys-rep"]
cleanup_process = await asyncio.create_subprocess_exec(
*clean_up_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, _ = await cleanup_process.communicate()
if process.returncode == 0:
return True, None
else:
return False, error_msg
except FileNotFoundError:
return False, ("nsight is not installed")
async def create(
self,
uri: Optional[str],
runtime_env: "RuntimeEnv", # noqa: F821
context: RuntimeEnvContext,
logger: logging.Logger = default_logger,
) -> int:
nsight_config = runtime_env.nsight()
if not nsight_config:
return 0
if nsight_config and sys.platform != "linux":
raise RuntimeEnvSetupError(
"Nsight CLI is only available in Linux.\n"
"More information can be found in "
"https://docs.nvidia.com/nsight-compute/NsightComputeCli/index.html"
)
if isinstance(nsight_config, str):
if nsight_config == "default":
nsight_config = NSIGHT_DEFAULT_CONFIG
else:
raise RuntimeEnvSetupError(
f"Unsupported nsight config: {nsight_config}. "
"The supported config is 'default' or "
"Dictionary of nsight options"
)
is_valid_nsight_cmd, error_msg = await self._check_nsight_script(nsight_config)
if not is_valid_nsight_cmd:
logger.warning(error_msg)
raise RuntimeEnvSetupError(
"nsight profile failed to run with the following "
f"error message:\n {error_msg}"
)
# add set output path to logs dir
nsight_config["o"] = str(
Path(self._nsight_dir) / nsight_config.get("o", NSIGHT_DEFAULT_CONFIG["o"])
)
self.nsight_cmd = parse_nsight_config(nsight_config)
return 0
def modify_context(
self,
uris: List[str],
runtime_env: "RuntimeEnv", # noqa: F821
context: RuntimeEnvContext,
logger: Optional[logging.Logger] = default_logger,
):
logger.info("Running nsight profiler")
context.py_executable = " ".join(self.nsight_cmd) + " python"
| NsightPlugin |
python | neetcode-gh__leetcode | python/0068-text-justification.py | {
"start": 0,
"end": 1144
} | class ____:
def fullJustify(self, words: List[str], maxWidth: int) -> List[str]:
res = []
line = [] # Words in current line
length = 0 # Current line length
i = 0
while i < len(words):
if length + len(line) + len(words[i]) > maxWidth:
# Line complete
extra_space = maxWidth - length
word_cnt = len(line) - 1
spaces = extra_space // max(1, word_cnt)
remainder = extra_space % max(1, word_cnt)
for j in range(max(1, len(line) - 1)):
line[j] += " " * spaces
if remainder:
line[j] += " "
remainder -= 1
res.append("".join(line))
line, length = [], 0 # Reset line and length
line.append(words[i])
length += len(words[i])
i += 1
# Handling the last line
last_line = " ".join(line)
trail_spaces = maxWidth - len(last_line)
res.append(last_line + (trail_spaces * " "))
return res
| Solution |
python | walkccc__LeetCode | solutions/1835. Find XOR Sum of All Pairs Bitwise AND/1835.py | {
"start": 0,
"end": 185
} | class ____:
def getXORSum(self, arr1: list[int], arr2: list[int]) -> int:
return functools.reduce(
operator.xor, arr1) & functools.reduce(
operator.xor, arr2)
| Solution |
python | pytorch__pytorch | torch/storage.py | {
"start": 14637,
"end": 22007
} | class ____(torch._C.StorageBase, _StorageBase):
def __getitem__(self, *args, **kwargs):
if self.device.type == "meta":
raise NotImplementedError("Not available for 'meta' device type")
return super().__getitem__(*args, **kwargs)
@property
def is_cuda(self):
return self.device.type == "cuda"
@property
def is_hpu(self):
return self.device.type == "hpu"
@property
def filename(self) -> _Optional[str]:
"""Returns the file name associated with this storage.
The file name will be a string if the storage is on CPU and was created via
:meth:`~torch.from_file()` with ``shared`` as ``True``. This attribute is ``None`` otherwise.
"""
return self._get_filename()
@_share_memory_lock_protected
def share_memory_(self, *args, **kwargs):
"""
Moves the storage to shared memory.
This is a no-op for storages already in shared memory and for CUDA
storages, which do not need to be moved for sharing across processes.
Storages in shared memory cannot be resized.
Note that to mitigate issues like `this <https://github.com/pytorch/pytorch/issues/95606>`_
it is thread safe to call this function from multiple threads on the same object.
It is NOT thread safe though to call any other function on self without proper
synchronization. Please see :doc:`/notes/multiprocessing` for more details.
.. note::
When all references to a storage in shared memory are deleted, the associated shared memory
object will also be deleted. PyTorch has a special cleanup process to ensure that this happens
even if the current process exits unexpectedly.
It is worth noting the difference between :meth:`share_memory_` and :meth:`from_file` with ``shared = True``
#. ``share_memory_`` uses `shm_open(3) <https://man7.org/linux/man-pages/man3/shm_open.3.html>`_ to create a
POSIX shared memory object while :meth:`from_file` uses
`open(2) <https://man7.org/linux/man-pages/man2/open.2.html>`_ to open the filename passed by the user.
#. Both use an `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_ with ``MAP_SHARED``
to map the file/object into the current virtual address space
#. ``share_memory_`` will call ``shm_unlink(3)`` on the object after mapping it to make sure the shared memory
object is freed when no process has the object open. ``torch.from_file(shared=True)`` does not unlink the
file. This file is persistent and will remain until it is deleted by the user.
Returns:
``self``
"""
return super().share_memory_(*args, **kwargs)
@_share_memory_lock_protected
def _share_fd_cpu_(self, *args, **kwargs):
return super()._share_fd_cpu_(*args, **kwargs)
@_share_memory_lock_protected
def _share_filename_cpu_(self, *args, **kwargs):
return super()._share_filename_cpu_(*args, **kwargs)
def _load_from_bytes(b):
return torch.load(io.BytesIO(b), weights_only=False)
@functools.cache
def _new_dtypes():
# These are dtypes serialized as UntypedStorage unlike those in
# _dtype_to_storage_type_map
return {
torch.float8_e5m2,
torch.float8_e4m3fn,
torch.float8_e5m2fnuz,
torch.float8_e4m3fnuz,
torch.float8_e8m0fnu,
torch.float4_e2m1fn_x2,
torch.bits8,
torch.bits16,
torch.bits1x8,
torch.bits2x4,
torch.bits4x2,
torch.complex32,
torch.uint16,
torch.uint32,
torch.uint64,
}
@functools.cache
def _dtype_to_storage_type_map():
# NOTE: We should no longer add dtypes to this map. This map
# is only used for BC/FC with older PyTorch versions. Going forward,
# new dtypes of TypedStorage should not translate to a legacy
# <type>Storage class. Instead, new dtypes of TypedStorage should
# be serialized as an UntypedStorage paired with a torch.dtype
return {
torch.double: "DoubleStorage",
torch.float: "FloatStorage",
torch.half: "HalfStorage",
torch.long: "LongStorage",
torch.int: "IntStorage",
torch.int16: "ShortStorage",
torch.int8: "CharStorage",
torch.uint8: "ByteStorage",
torch.bool: "BoolStorage",
torch.bfloat16: "BFloat16Storage",
torch.cdouble: "ComplexDoubleStorage",
torch.cfloat: "ComplexFloatStorage",
torch.qint8: "QInt8Storage",
torch.qint32: "QInt32Storage",
torch.quint8: "QUInt8Storage",
torch.quint4x2: "QUInt4x2Storage",
torch.quint2x4: "QUInt2x4Storage",
}
@functools.cache
def _storage_type_to_dtype_map():
dtype_map = {val: key for key, val in _dtype_to_storage_type_map().items()}
return dtype_map
def _get_storage_from_sequence(sequence, dtype, device):
if dtype in [
torch.quint8,
torch.quint4x2,
torch.quint2x4,
torch.qint32,
torch.qint8,
]:
interpret_dtypes = {
torch.quint8: torch.uint8,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.qint32: torch.int32,
torch.qint8: torch.int8,
}
tmp_tensor = torch.tensor(
sequence, dtype=interpret_dtypes[dtype], device=device
)
else:
tmp_tensor = torch.tensor(sequence, dtype=dtype, device=device)
return tmp_tensor._typed_storage()._untyped_storage
def _isint(x):
if HAS_NUMPY:
return isinstance(x, (int, np.integer)) # pyrefly: ignore [missing-attribute]
else:
return isinstance(x, int)
_always_warn_typed_storage_removal = False
def _get_always_warn_typed_storage_removal():
return _always_warn_typed_storage_removal
def _set_always_warn_typed_storage_removal(always_warn):
global _always_warn_typed_storage_removal
assert isinstance(always_warn, bool)
_always_warn_typed_storage_removal = always_warn
def _warn_typed_storage_removal(stacklevel=2):
global _always_warn_typed_storage_removal
def is_first_time():
if not hasattr(_warn_typed_storage_removal, "has_warned"):
return True
else:
return not _warn_typed_storage_removal.__dict__["has_warned"]
if _get_always_warn_typed_storage_removal() or is_first_time():
message = (
"TypedStorage is deprecated. It will be removed in the future and "
"UntypedStorage will be the only storage class. This should only matter "
"to you if you are using storages directly. To access UntypedStorage "
"directly, use tensor.untyped_storage() instead of tensor.storage()"
)
warnings.warn(message, UserWarning, stacklevel=stacklevel + 1)
_warn_typed_storage_removal.__dict__["has_warned"] = True
def _reset_warn_typed_storage_removal():
_warn_typed_storage_removal.__dict__["has_warned"] = False
def _get_device_from_module(module: str):
last_part = module.rsplit(".", 1)[-1]
if last_part in ["cuda", torch._C._get_privateuse1_backend_name(), "hpu"]:
return last_part
else:
return "cpu"
| UntypedStorage |
python | scikit-learn__scikit-learn | sklearn/linear_model/_quantile.py | {
"start": 611,
"end": 10527
} | class ____(LinearModel, RegressorMixin, BaseEstimator):
"""Linear regression model that predicts conditional quantiles.
The linear :class:`QuantileRegressor` optimizes the pinball loss for a
desired `quantile` and is robust to outliers.
This model uses an L1 regularization like
:class:`~sklearn.linear_model.Lasso`.
Read more in the :ref:`User Guide <quantile_regression>`.
.. versionadded:: 1.0
Parameters
----------
quantile : float, default=0.5
The quantile that the model tries to predict. It must be strictly
between 0 and 1. If 0.5 (default), the model predicts the 50%
quantile, i.e. the median.
alpha : float, default=1.0
Regularization constant that multiplies the L1 penalty term.
fit_intercept : bool, default=True
Whether or not to fit the intercept.
solver : {'highs-ds', 'highs-ipm', 'highs', 'interior-point', \
'revised simplex'}, default='highs'
Method used by :func:`scipy.optimize.linprog` to solve the linear
programming formulation.
It is recommended to use the highs methods because
they are the fastest ones. Solvers "highs-ds", "highs-ipm" and "highs"
support sparse input data and, in fact, always convert to sparse csc.
From `scipy>=1.11.0`, "interior-point" is not available anymore.
.. versionchanged:: 1.4
The default of `solver` changed to `"highs"` in version 1.4.
solver_options : dict, default=None
Additional parameters passed to :func:`scipy.optimize.linprog` as
options. If `None` and if `solver='interior-point'`, then
`{"lstsq": True}` is passed to :func:`scipy.optimize.linprog` for the
sake of stability.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the features.
intercept_ : float
The intercept of the model, aka bias term.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations performed by the solver.
See Also
--------
Lasso : The Lasso is a linear model that estimates sparse coefficients
with l1 regularization.
HuberRegressor : Linear regression model that is robust to outliers.
Examples
--------
>>> from sklearn.linear_model import QuantileRegressor
>>> import numpy as np
>>> n_samples, n_features = 10, 2
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # the two following lines are optional in practice
>>> from sklearn.utils.fixes import sp_version, parse_version
>>> reg = QuantileRegressor(quantile=0.8).fit(X, y)
>>> np.mean(y <= reg.predict(X))
np.float64(0.8)
"""
_parameter_constraints: dict = {
"quantile": [Interval(Real, 0, 1, closed="neither")],
"alpha": [Interval(Real, 0, None, closed="left")],
"fit_intercept": ["boolean"],
"solver": [
StrOptions(
{
"highs-ds",
"highs-ipm",
"highs",
"interior-point",
"revised simplex",
}
),
],
"solver_options": [dict, None],
}
def __init__(
self,
*,
quantile=0.5,
alpha=1.0,
fit_intercept=True,
solver="highs",
solver_options=None,
):
self.quantile = quantile
self.alpha = alpha
self.fit_intercept = fit_intercept
self.solver = solver
self.solver_options = solver_options
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns self.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr", "coo"],
y_numeric=True,
multi_output=False,
)
sample_weight = _check_sample_weight(sample_weight, X)
n_features = X.shape[1]
n_params = n_features
if self.fit_intercept:
n_params += 1
# Note that centering y and X with _preprocess_data does not work
# for quantile regression.
# The objective is defined as 1/n * sum(pinball loss) + alpha * L1.
# So we rescale the penalty term, which is equivalent.
alpha = np.sum(sample_weight) * self.alpha
if self.solver == "interior-point" and sp_version >= parse_version("1.11.0"):
raise ValueError(
f"Solver {self.solver} is not anymore available in SciPy >= 1.11.0."
)
if sparse.issparse(X) and self.solver not in ["highs", "highs-ds", "highs-ipm"]:
raise ValueError(
f"Solver {self.solver} does not support sparse X. "
"Use solver 'highs' for example."
)
# make default solver more stable
if self.solver_options is None and self.solver == "interior-point":
solver_options = {"lstsq": True}
else:
solver_options = self.solver_options
# After rescaling alpha, the minimization problem is
# min sum(pinball loss) + alpha * L1
# Use linear programming formulation of quantile regression
# min_x c x
# A_eq x = b_eq
# 0 <= x
# x = (s0, s, t0, t, u, v) = slack variables >= 0
# intercept = s0 - t0
# coef = s - t
# c = (0, alpha * 1_p, 0, alpha * 1_p, quantile * 1_n, (1-quantile) * 1_n)
# residual = y - X@coef - intercept = u - v
# A_eq = (1_n, X, -1_n, -X, diag(1_n), -diag(1_n))
# b_eq = y
# p = n_features
# n = n_samples
# 1_n = vector of length n with entries equal one
# see https://stats.stackexchange.com/questions/384909/
#
# Filtering out zero sample weights from the beginning makes life
# easier for the linprog solver.
indices = np.nonzero(sample_weight)[0]
n_indices = len(indices) # use n_mask instead of n_samples
if n_indices < len(sample_weight):
sample_weight = sample_weight[indices]
X = _safe_indexing(X, indices)
y = _safe_indexing(y, indices)
c = np.concatenate(
[
np.full(2 * n_params, fill_value=alpha),
sample_weight * self.quantile,
sample_weight * (1 - self.quantile),
]
)
if self.fit_intercept:
# do not penalize the intercept
c[0] = 0
c[n_params] = 0
if self.solver in ["highs", "highs-ds", "highs-ipm"]:
# Note that highs methods always use a sparse CSC memory layout internally,
# even for optimization problems parametrized using dense numpy arrays.
# Therefore, we work with CSC matrices as early as possible to limit
# unnecessary repeated memory copies.
eye = sparse.eye(n_indices, dtype=X.dtype, format="csc")
if self.fit_intercept:
ones = sparse.csc_matrix(np.ones(shape=(n_indices, 1), dtype=X.dtype))
A_eq = sparse.hstack([ones, X, -ones, -X, eye, -eye], format="csc")
else:
A_eq = sparse.hstack([X, -X, eye, -eye], format="csc")
else:
eye = np.eye(n_indices)
if self.fit_intercept:
ones = np.ones((n_indices, 1))
A_eq = np.concatenate([ones, X, -ones, -X, eye, -eye], axis=1)
else:
A_eq = np.concatenate([X, -X, eye, -eye], axis=1)
b_eq = y
result = linprog(
c=c,
A_eq=A_eq,
b_eq=b_eq,
method=self.solver,
options=solver_options,
)
solution = result.x
if not result.success:
failure = {
1: "Iteration limit reached.",
2: "Problem appears to be infeasible.",
3: "Problem appears to be unbounded.",
4: "Numerical difficulties encountered.",
}
warnings.warn(
"Linear programming for QuantileRegressor did not succeed.\n"
f"Status is {result.status}: "
+ failure.setdefault(result.status, "unknown reason")
+ "\n"
+ "Result message of linprog:\n"
+ result.message,
ConvergenceWarning,
)
# positive slack - negative slack
# solution is an array with (params_pos, params_neg, u, v)
params = solution[:n_params] - solution[n_params : 2 * n_params]
self.n_iter_ = result.nit
if self.fit_intercept:
self.coef_ = params[1:]
self.intercept_ = params[0]
else:
self.coef_ = params
self.intercept_ = 0.0
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| QuantileRegressor |
python | Netflix__metaflow | metaflow/plugins/azure/azure_tail.py | {
"start": 332,
"end": 3375
} | class ____(object):
def __init__(self, blob_full_uri):
"""Location should be something like <container_name>/blob"""
container_name, blob_name = parse_azure_full_path(blob_full_uri)
if not blob_name:
raise MetaflowException(
msg="Failed to parse blob_full_uri into <container_name>/<blob_name> (got %s)"
% blob_full_uri
)
service = get_azure_blob_service_client()
container = service.get_container_client(container_name)
self._blob_client = container.get_blob_client(blob_name)
self._pos = 0
self._tail = b""
def __iter__(self):
buf = self._fill_buf()
if buf is not None:
# If there are no line breaks in the entries
# file, then we will yield nothing, ever.
#
# This apes S3 tail. We can fix it here and in S3
# if/when this becomes an issue. It boils down to
# knowing when to give up waiting on partial lines
# to become full lines (tricky, need more info).
#
# Likely this has been OK because we control the
# line-break presence in the objects we tail.
for line in buf:
if line.endswith(b"\n"):
yield line
else:
self._tail = line
break
def _make_range_request(self):
try:
# Yes we read to the end... memory blow up is possible. We can improve by specifying length param
return self._blob_client.download_blob(offset=self._pos).readall()
except ResourceNotFoundError:
# Maybe the log hasn't been uploaded yet, but will be soon.
return None
except HttpResponseError as e:
# be silent on range errors - it means log did not advance
if e.status_code != 416:
print(
"Failed to tail log from step (status code = %d)" % (e.status_code,)
)
return None
except Exception as e:
print("Failed to tail log from step (%s)" % type(e))
return None
def _fill_buf(self):
data = self._make_range_request()
if data is None:
return None
if data:
buf = BytesIO(self._tail + data)
self._pos += len(data)
self._tail = b""
return buf
else:
return None
if __name__ == "__main__":
# This main program is for debugging and testing purposes
import argparse
parser = argparse.ArgumentParser(
description="Tail an Azure Blob. Must specify METAFLOW_AZURE_STORAGE_BLOB_SERVICE_ENDPOINT in environment."
)
parser.add_argument(
"blob_full_uri", help="The blob to tail. Format is <container_name>/<blob>"
)
args = parser.parse_args()
az_tail = AzureTail(args.blob_full_uri)
for line in az_tail:
print(line.strip().decode("utf-8"))
| AzureTail |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 1469,
"end": 1517
} | class ____(Class5[Sequence[T]]): ...
| Class5_Child3 |
python | walkccc__LeetCode | solutions/1814. Count Nice Pairs in an Array/1814.py | {
"start": 0,
"end": 222
} | class ____:
def countNicePairs(self, nums: list[int]) -> int:
freqs = collections.Counter(num - int(str(num)[::-1]) for num in nums)
return sum(freq * (freq - 1) // 2 for freq in freqs.values()) % 1000000007
| Solution |
python | mwaskom__seaborn | tests/_stats/test_order.py | {
"start": 556,
"end": 2648
} | class ____(Fixtures):
def test_int_k(self, df):
ori = "x"
gb = self.get_groupby(df, ori)
res = Perc(3)(df, gb, ori, {})
percentiles = [0, 50, 100]
assert_array_equal(res["percentile"], percentiles)
assert_array_equal(res["y"], np.percentile(df["y"], percentiles))
def test_list_k(self, df):
ori = "x"
gb = self.get_groupby(df, ori)
percentiles = [0, 20, 100]
res = Perc(k=percentiles)(df, gb, ori, {})
assert_array_equal(res["percentile"], percentiles)
assert_array_equal(res["y"], np.percentile(df["y"], percentiles))
def test_orientation(self, df):
df = df.rename(columns={"x": "y", "y": "x"})
ori = "y"
gb = self.get_groupby(df, ori)
res = Perc(k=3)(df, gb, ori, {})
assert_array_equal(res["x"], np.percentile(df["x"], [0, 50, 100]))
def test_method(self, df):
ori = "x"
gb = self.get_groupby(df, ori)
method = "nearest"
res = Perc(k=5, method=method)(df, gb, ori, {})
percentiles = [0, 25, 50, 75, 100]
if _version_predates(np, "1.22.0"):
expected = np.percentile(df["y"], percentiles, interpolation=method)
else:
expected = np.percentile(df["y"], percentiles, method=method)
assert_array_equal(res["y"], expected)
def test_grouped(self, df, rng):
ori = "x"
df = df.assign(x=rng.choice(["a", "b", "c"], len(df)))
gb = self.get_groupby(df, ori)
k = [10, 90]
res = Perc(k)(df, gb, ori, {})
for x, res_x in res.groupby("x"):
assert_array_equal(res_x["percentile"], k)
expected = np.percentile(df.loc[df["x"] == x, "y"], k)
assert_array_equal(res_x["y"], expected)
def test_with_na(self, df):
ori = "x"
df.loc[:5, "y"] = np.nan
gb = self.get_groupby(df, ori)
k = [10, 90]
res = Perc(k)(df, gb, ori, {})
expected = np.percentile(df["y"].dropna(), k)
assert_array_equal(res["y"], expected)
| TestPerc |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/selector.py | {
"start": 7322,
"end": 7720
} | class ____(ComputedBase):
"""Streaming click selector composite condition class."""
@staticmethod
def visit_eq(value: list[QueryType]) -> Condition:
return contains(ClickSelectorComposite.visit_eq(value))
@staticmethod
def visit_neq(value: list[QueryType]) -> Condition:
return does_not_contain(ClickSelectorComposite.visit_eq(value))
| SumOfClickSelectorComposite |
python | PyCQA__pylint | tests/functional/r/regression/regression_no_member_1078.py | {
"start": 205,
"end": 325
} | class ____:
def test(self):
"a"
test.__doc__ += "b"
print(Cls().test.__doc__)
print(Cls.test.__doc__)
| Cls |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.