language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__warehouse | warehouse/accounts/forms.py | {
"start": 3631,
"end": 4164
} | class ____:
recovery_code_value = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(),
PreventNullBytesValidator(),
wtforms.validators.Regexp(
rf"^ *([0-9a-f] *){{{2 * RECOVERY_CODE_BYTES}}}$",
message=_(
"Recovery Codes must be ${recovery_code_length} characters.",
mapping={"recovery_code_length": 2 * RECOVERY_CODE_BYTES},
),
),
]
)
| RecoveryCodeValueMixin |
python | doocs__leetcode | solution/0100-0199/0144.Binary Tree Preorder Traversal/Solution3.py | {
"start": 192,
"end": 840
} | class ____:
def preorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
ans = []
while root:
if root.left is None:
ans.append(root.val)
root = root.right
else:
prev = root.left
while prev.right and prev.right != root:
prev = prev.right
if prev.right is None:
ans.append(root.val)
prev.right = root
root = root.left
else:
prev.right = None
root = root.right
return ans
| Solution |
python | sympy__sympy | sympy/stats/rv.py | {
"start": 4343,
"end": 5698
} | class ____(Basic):
"""
A Probability Space.
Explanation
===========
Probability Spaces encode processes that equal different values
probabilistically. These underly Random Symbols which occur in SymPy
expressions and contain the mechanics to evaluate statistical statements.
See Also
========
sympy.stats.crv.ContinuousPSpace
sympy.stats.frv.FinitePSpace
"""
is_Finite: bool | None = None # Fails test if not set to None
is_Continuous: bool | None = None # Fails test if not set to None
is_Discrete: bool | None = None # Fails test if not set to None
is_real: bool | None
@property
def domain(self):
return self.args[0]
@property
def density(self):
return self.args[1]
@property
def values(self):
return frozenset(RandomSymbol(sym, self) for sym in self.symbols)
@property
def symbols(self):
return self.domain.symbols
def where(self, condition):
raise NotImplementedError()
def compute_density(self, expr):
raise NotImplementedError()
def sample(self, size=(), library='scipy', seed=None):
raise NotImplementedError()
def probability(self, condition):
raise NotImplementedError()
def compute_expectation(self, expr):
raise NotImplementedError()
| PSpace |
python | HIPS__autograd | examples/convnet.py | {
"start": 4899,
"end": 7083
} | class ____(full_layer):
def nonlinearity(self, x):
return x - logsumexp(x, axis=1, keepdims=True)
if __name__ == "__main__":
# Network parameters
L2_reg = 1.0
input_shape = (1, 28, 28)
layer_specs = [
conv_layer((5, 5), 6),
maxpool_layer((2, 2)),
conv_layer((5, 5), 16),
maxpool_layer((2, 2)),
tanh_layer(120),
tanh_layer(84),
softmax_layer(10),
]
# Training parameters
param_scale = 0.1
learning_rate = 1e-3
momentum = 0.9
batch_size = 256
num_epochs = 50
# Load and process MNIST data
print("Loading training data...")
add_color_channel = lambda x: x.reshape((x.shape[0], 1, x.shape[1], x.shape[2]))
one_hot = lambda x, K: np.array(x[:, None] == np.arange(K)[None, :], dtype=int)
train_images, train_labels, test_images, test_labels = data_mnist.mnist()
train_images = add_color_channel(train_images) / 255.0
test_images = add_color_channel(test_images) / 255.0
train_labels = one_hot(train_labels, 10)
test_labels = one_hot(test_labels, 10)
N_data = train_images.shape[0]
# Make neural net functions
N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(input_shape, layer_specs, L2_reg)
loss_grad = grad(loss_fun)
# Initialize weights
rs = npr.RandomState()
W = rs.randn(N_weights) * param_scale
# Check the gradients numerically, just to be safe
# quick_grad_check(loss_fun, W, (train_images[:50], train_labels[:50]))
print(" Epoch | Train err | Test error ")
def print_perf(epoch, W):
test_perf = frac_err(W, test_images, test_labels)
train_perf = frac_err(W, train_images, train_labels)
print(f"{epoch:15}|{train_perf:15}|{test_perf:15}")
# Train with sgd
batch_idxs = make_batches(N_data, batch_size)
cur_dir = np.zeros(N_weights)
for epoch in range(num_epochs):
print_perf(epoch, W)
for idxs in batch_idxs:
grad_W = loss_grad(W, train_images[idxs], train_labels[idxs])
cur_dir = momentum * cur_dir + (1.0 - momentum) * grad_W
W -= learning_rate * cur_dir
| softmax_layer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_default_format03.py | {
"start": 315,
"end": 1074
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("default_format03.xlsx")
def test_create_file(self):
"""Test the creation of a file with user defined default format"""
workbook = Workbook(
self.got_filename,
{
"default_format_properties": {"font_name": "Arial", "font_size": 11},
"default_row_height": 19,
"default_column_width": 72,
},
)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/maximum-difference-between-node-and-ancestor.py | {
"start": 818,
"end": 1412
} | class ____(object):
def maxAncestorDiff(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def maxAncestorDiffHelper(node, mx, mn):
if not node:
return 0
result = max(mx-node.val, node.val-mn)
mx = max(mx, node.val)
mn = min(mn, node.val)
result = max(result, maxAncestorDiffHelper(node.left, mx, mn))
result = max(result, maxAncestorDiffHelper(node.right, mx, mn))
return result
return maxAncestorDiffHelper(root, 0, float("inf"))
| Solution2 |
python | sympy__sympy | sympy/physics/mechanics/joint.py | {
"start": 20934,
"end": 31429
} | class ____(Joint):
"""Pin (Revolute) Joint.
.. raw:: html
:file: ../../../doc/src/explanation/modules/physics/mechanics/PinJoint.svg
Explanation
===========
A pin joint is defined such that the joint rotation axis is fixed in both
the child and parent and the location of the joint is relative to the mass
center of each body. The child rotates an angle, θ, from the parent about
the rotation axis and has a simple angular speed, ω, relative to the
parent. The direction cosine matrix between the child interframe and
parent interframe is formed using a simple rotation about the joint axis.
The page on the joints framework gives a more detailed explanation of the
intermediate frames.
Parameters
==========
name : string
A unique name for the joint.
parent : Particle or RigidBody
The parent body of joint.
child : Particle or RigidBody
The child body of joint.
coordinates : dynamicsymbol, optional
Generalized coordinates of the joint.
speeds : dynamicsymbol, optional
Generalized speeds of joint.
parent_point : Point or Vector, optional
Attachment point where the joint is fixed to the parent body. If a
vector is provided, then the attachment point is computed by adding the
vector to the body's mass center. The default value is the parent's mass
center.
child_point : Point or Vector, optional
Attachment point where the joint is fixed to the child body. If a
vector is provided, then the attachment point is computed by adding the
vector to the body's mass center. The default value is the child's mass
center.
parent_axis : Vector, optional
.. deprecated:: 1.12
Axis fixed in the parent body which aligns with an axis fixed in the
child body. The default is the x axis of parent's reference frame.
For more information on this deprecation, see
:ref:`deprecated-mechanics-joint-axis`.
child_axis : Vector, optional
.. deprecated:: 1.12
Axis fixed in the child body which aligns with an axis fixed in the
parent body. The default is the x axis of child's reference frame.
For more information on this deprecation, see
:ref:`deprecated-mechanics-joint-axis`.
parent_interframe : ReferenceFrame, optional
Intermediate frame of the parent body with respect to which the joint
transformation is formulated. If a Vector is provided then an interframe
is created which aligns its X axis with the given vector. The default
value is the parent's own frame.
child_interframe : ReferenceFrame, optional
Intermediate frame of the child body with respect to which the joint
transformation is formulated. If a Vector is provided then an interframe
is created which aligns its X axis with the given vector. The default
value is the child's own frame.
joint_axis : Vector
The axis about which the rotation occurs. Note that the components
of this axis are the same in the parent_interframe and child_interframe.
parent_joint_pos : Point or Vector, optional
.. deprecated:: 1.12
This argument is replaced by parent_point and will be removed in a
future version.
See :ref:`deprecated-mechanics-joint-pos` for more information.
child_joint_pos : Point or Vector, optional
.. deprecated:: 1.12
This argument is replaced by child_point and will be removed in a
future version.
See :ref:`deprecated-mechanics-joint-pos` for more information.
Attributes
==========
name : string
The joint's name.
parent : Particle or RigidBody
The joint's parent body.
child : Particle or RigidBody
The joint's child body.
coordinates : Matrix
Matrix of the joint's generalized coordinates. The default value is
``dynamicsymbols(f'q_{joint.name}')``.
speeds : Matrix
Matrix of the joint's generalized speeds. The default value is
``dynamicsymbols(f'u_{joint.name}')``.
parent_point : Point
Attachment point where the joint is fixed to the parent body.
child_point : Point
Attachment point where the joint is fixed to the child body.
parent_axis : Vector
The axis fixed in the parent frame that represents the joint.
child_axis : Vector
The axis fixed in the child frame that represents the joint.
parent_interframe : ReferenceFrame
Intermediate frame of the parent body with respect to which the joint
transformation is formulated.
child_interframe : ReferenceFrame
Intermediate frame of the child body with respect to which the joint
transformation is formulated.
joint_axis : Vector
The axis about which the rotation occurs. Note that the components of
this axis are the same in the parent_interframe and child_interframe.
kdes : Matrix
Kinematical differential equations of the joint.
Examples
=========
A single pin joint is created from two bodies and has the following basic
attributes:
>>> from sympy.physics.mechanics import RigidBody, PinJoint
>>> parent = RigidBody('P')
>>> parent
P
>>> child = RigidBody('C')
>>> child
C
>>> joint = PinJoint('PC', parent, child)
>>> joint
PinJoint: PC parent: P child: C
>>> joint.name
'PC'
>>> joint.parent
P
>>> joint.child
C
>>> joint.parent_point
P_masscenter
>>> joint.child_point
C_masscenter
>>> joint.parent_axis
P_frame.x
>>> joint.child_axis
C_frame.x
>>> joint.coordinates
Matrix([[q_PC(t)]])
>>> joint.speeds
Matrix([[u_PC(t)]])
>>> child.frame.ang_vel_in(parent.frame)
u_PC(t)*P_frame.x
>>> child.frame.dcm(parent.frame)
Matrix([
[1, 0, 0],
[0, cos(q_PC(t)), sin(q_PC(t))],
[0, -sin(q_PC(t)), cos(q_PC(t))]])
>>> joint.child_point.pos_from(joint.parent_point)
0
To further demonstrate the use of the pin joint, the kinematics of simple
double pendulum that rotates about the Z axis of each connected body can be
created as follows.
>>> from sympy import symbols, trigsimp
>>> from sympy.physics.mechanics import RigidBody, PinJoint
>>> l1, l2 = symbols('l1 l2')
First create bodies to represent the fixed ceiling and one to represent
each pendulum bob.
>>> ceiling = RigidBody('C')
>>> upper_bob = RigidBody('U')
>>> lower_bob = RigidBody('L')
The first joint will connect the upper bob to the ceiling by a distance of
``l1`` and the joint axis will be about the Z axis for each body.
>>> ceiling_joint = PinJoint('P1', ceiling, upper_bob,
... child_point=-l1*upper_bob.frame.x,
... joint_axis=ceiling.frame.z)
The second joint will connect the lower bob to the upper bob by a distance
of ``l2`` and the joint axis will also be about the Z axis for each body.
>>> pendulum_joint = PinJoint('P2', upper_bob, lower_bob,
... child_point=-l2*lower_bob.frame.x,
... joint_axis=upper_bob.frame.z)
Once the joints are established the kinematics of the connected bodies can
be accessed. First the direction cosine matrices of pendulum link relative
to the ceiling are found:
>>> upper_bob.frame.dcm(ceiling.frame)
Matrix([
[ cos(q_P1(t)), sin(q_P1(t)), 0],
[-sin(q_P1(t)), cos(q_P1(t)), 0],
[ 0, 0, 1]])
>>> trigsimp(lower_bob.frame.dcm(ceiling.frame))
Matrix([
[ cos(q_P1(t) + q_P2(t)), sin(q_P1(t) + q_P2(t)), 0],
[-sin(q_P1(t) + q_P2(t)), cos(q_P1(t) + q_P2(t)), 0],
[ 0, 0, 1]])
The position of the lower bob's masscenter is found with:
>>> lower_bob.masscenter.pos_from(ceiling.masscenter)
l1*U_frame.x + l2*L_frame.x
The angular velocities of the two pendulum links can be computed with
respect to the ceiling.
>>> upper_bob.frame.ang_vel_in(ceiling.frame)
u_P1(t)*C_frame.z
>>> lower_bob.frame.ang_vel_in(ceiling.frame)
u_P1(t)*C_frame.z + u_P2(t)*U_frame.z
And finally, the linear velocities of the two pendulum bobs can be computed
with respect to the ceiling.
>>> upper_bob.masscenter.vel(ceiling.frame)
l1*u_P1(t)*U_frame.y
>>> lower_bob.masscenter.vel(ceiling.frame)
l1*u_P1(t)*U_frame.y + l2*(u_P1(t) + u_P2(t))*L_frame.y
"""
def __init__(self, name, parent, child, coordinates=None, speeds=None,
parent_point=None, child_point=None, parent_interframe=None,
child_interframe=None, parent_axis=None, child_axis=None,
joint_axis=None, parent_joint_pos=None, child_joint_pos=None):
self._joint_axis = joint_axis
super().__init__(name, parent, child, coordinates, speeds, parent_point,
child_point, parent_interframe, child_interframe,
parent_axis, child_axis, parent_joint_pos,
child_joint_pos)
def __str__(self):
return (f'PinJoint: {self.name} parent: {self.parent} '
f'child: {self.child}')
@property
def joint_axis(self):
"""Axis about which the child rotates with respect to the parent."""
return self._joint_axis
def _generate_coordinates(self, coordinate):
return self._fill_coordinate_list(coordinate, 1, 'q')
def _generate_speeds(self, speed):
return self._fill_coordinate_list(speed, 1, 'u')
def _orient_frames(self):
self._joint_axis = self._axis(self.joint_axis, self.parent_interframe)
self.child_interframe.orient_axis(
self.parent_interframe, self.joint_axis, self.coordinates[0])
def _set_angular_velocity(self):
self.child_interframe.set_ang_vel(self.parent_interframe, self.speeds[
0] * self.joint_axis.normalize())
def _set_linear_velocity(self):
self.child_point.set_pos(self.parent_point, 0)
self.parent_point.set_vel(self._parent_frame, 0)
self.child_point.set_vel(self._child_frame, 0)
self.child.masscenter.v2pt_theory(self.parent_point,
self._parent_frame, self._child_frame)
| PinJoint |
python | apache__airflow | airflow-core/src/airflow/timetables/trigger.py | {
"start": 10050,
"end": 14487
} | class ____(Timetable):
"""
Timetable that triggers DAG runs according to multiple cron expressions.
This combines multiple ``CronTriggerTimetable`` instances underneath, and
triggers a DAG run whenever one of the timetables want to trigger a run.
Only at most one run is triggered for any given time, even if more than one
timetable fires at the same time.
"""
def __init__(
self,
*crons: str,
timezone: str | Timezone | FixedTimezone,
interval: datetime.timedelta | relativedelta = datetime.timedelta(),
run_immediately: bool | datetime.timedelta = False,
) -> None:
if not crons:
raise ValueError("cron expression required")
self._timetables = [
CronTriggerTimetable(cron, timezone=timezone, interval=interval, run_immediately=run_immediately)
for cron in crons
]
self.description = ", ".join(t.description for t in self._timetables)
@classmethod
def deserialize(cls, data: dict[str, Any]) -> Timetable:
from airflow.serialization.serialized_objects import decode_timezone
return cls(
*data["expressions"],
timezone=decode_timezone(data["timezone"]),
interval=_deserialize_interval(data["interval"]),
run_immediately=_deserialize_run_immediately(data["run_immediately"]),
)
def serialize(self) -> dict[str, Any]:
from airflow.serialization.serialized_objects import encode_timezone
# All timetables share the same timezone, interval, and run_immediately
# values, so we can just use the first to represent them.
timetable = self._timetables[0]
return {
"expressions": [t._expression for t in self._timetables],
"timezone": encode_timezone(timetable._timezone),
"interval": _serialize_interval(timetable._interval),
"run_immediately": _serialize_run_immediately(timetable._run_immediately),
}
@property
def summary(self) -> str:
return ", ".join(t.summary for t in self._timetables)
def infer_manual_data_interval(self, *, run_after: DateTime) -> DataInterval:
return min(
(t.infer_manual_data_interval(run_after=run_after) for t in self._timetables),
key=operator.attrgetter("start"),
)
def next_dagrun_info(
self,
*,
last_automated_data_interval: DataInterval | None,
restriction: TimeRestriction,
) -> DagRunInfo | None:
infos = (
timetable.next_dagrun_info(
last_automated_data_interval=last_automated_data_interval,
restriction=restriction,
)
for timetable in self._timetables
)
if restriction.catchup:
select_key = self._dagrun_info_sort_key_catchup
else:
select_key = functools.partial(self._dagrun_info_sort_key_no_catchup, now=time.time())
return min(infos, key=select_key)
@staticmethod
def _dagrun_info_sort_key_catchup(info: DagRunInfo | None) -> float:
"""
Sort key for DagRunInfo values when catchup=True.
This is passed as the sort key to ``min`` in ``next_dagrun_info`` to
find the next closest run, ordered by logical date.
The sort is done by simply returning the logical date converted to a
Unix timestamp. If the input is *None* (no next run), *inf* is returned
so it's selected last.
"""
if info is None:
return math.inf
return info.logical_date.timestamp()
@staticmethod
def _dagrun_info_sort_key_no_catchup(info: DagRunInfo | None, *, now: float) -> float:
"""
Sort key for DagRunInfo values when catchup=False.
When catchup is disabled, we want to ignore as many runs as possible
without going over the current time, but if no runs should happen right
now, we want to choose the earliest opportunity.
Combining with the ``min`` sorter in ``next_dagrun_info``, we should
order values by ``-logical_date`` if they are earlier than or at current
time, but ``+logical_date`` if later.
"""
if info is None:
return math.inf
if (ts := info.logical_date.timestamp()) <= now:
return -ts
return ts
| MultipleCronTriggerTimetable |
python | davidhalter__parso | test/normalizer_issue_files/E30not.py | {
"start": 87,
"end": 167
} | class ____:
pass
# Okay
# -*- coding: utf-8 -*-
def foo():
pass
# Okay
| X |
python | weaviate__weaviate-python-client | weaviate/collections/classes/generative.py | {
"start": 1732,
"end": 2873
} | class ____(_GenerativeConfigRuntime):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.ANTHROPIC, frozen=True, exclude=True
)
base_url: Optional[AnyHttpUrl]
max_tokens: Optional[int]
model: Optional[str]
temperature: Optional[float]
top_k: Optional[int]
top_p: Optional[float]
stop_sequences: Optional[List[str]]
def _to_grpc(self, opts: _GenerativeConfigRuntimeOptions) -> generative_pb2.GenerativeProvider:
return generative_pb2.GenerativeProvider(
return_metadata=opts.return_metadata,
anthropic=generative_pb2.GenerativeAnthropic(
base_url=_parse_anyhttpurl(self.base_url),
max_tokens=self.max_tokens,
model=self.model,
temperature=self.temperature,
top_k=self.top_k,
top_p=self.top_p,
stop_sequences=_to_text_array(self.stop_sequences),
images=_to_text_array(opts.images),
image_properties=_to_text_array(opts.image_properties),
),
)
| _GenerativeAnthropic |
python | kamyu104__LeetCode-Solutions | Python/mice-and-cheese.py | {
"start": 68,
"end": 1588
} | class ____(object):
def miceAndCheese(self, reward1, reward2, k):
"""
:type reward1: List[int]
:type reward2: List[int]
:type k: int
:rtype: int
"""
def nth_element(nums, n, left=0, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
right = len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
for i in xrange(len(reward1)):
reward1[i] -= reward2[i]
nth_element(reward1, k-1, compare=lambda a, b: a > b)
return sum(reward2)+sum(reward1[i] for i in xrange(k))
| Solution |
python | google__jax | jax/_src/effects.py | {
"start": 2591,
"end": 2660
} | class ____:
"""A generic side-effect."""
Effects = Set[Effect]
| Effect |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC502_numpy.py | {
"start": 0,
"end": 2999
} | class ____(Exception):
...
# DOC502
def calculate_speed(distance: float, time: float) -> float:
"""
Calculate speed as distance divided by time.
Parameters
----------
distance : float
Distance traveled.
time : float
Time spent traveling.
Returns
-------
float
Speed as distance divided by time.
Raises
------
FasterThanLightError
If speed is greater than the speed of light.
"""
return distance / time
# DOC502
def calculate_speed(distance: float, time: float) -> float:
"""
Calculate speed as distance divided by time.
Parameters
----------
distance : float
Distance traveled.
time : float
Time spent traveling.
Returns
-------
float
Speed as distance divided by time.
Raises
------
FasterThanLightError
If speed is greater than the speed of light.
DivisionByZero
If attempting to divide by zero.
"""
return distance / time
# DOC502
def calculate_speed(distance: float, time: float) -> float:
"""
Calculate speed as distance divided by time.
Parameters
----------
distance : float
Distance traveled.
time : float
Time spent traveling.
Returns
-------
float
Speed as distance divided by time.
Raises
------
FasterThanLightError
If speed is greater than the speed of light.
DivisionByZero
If attempting to divide by zero.
"""
try:
return distance / time
except ZeroDivisionError as exc:
raise FasterThanLightError from exc
# This is fine
def calculate_speed(distance: float, time: float) -> float:
"""Calculate speed as distance divided by time.
ACalculate speed as distance divided by time.
Parameters
----------
distance : float
Distance traveled.
time : float
Time spent traveling.
Returns
-------
float
Speed as distance divided by time.
Raises
------
TypeError
If one or both of the parameters is not a number.
ZeroDivisionError
If attempting to divide by zero.
"""
try:
return distance / time
except ZeroDivisionError:
print("Oh no, why would you divide something by zero?")
raise
except TypeError:
print("Not a number? Shame on you!")
raise
# DOC502 regression for Sphinx directive after Raises (issue #18959)
def foo():
"""First line.
Raises
------
ValueError
some text
.. versionadded:: 0.7.0
The ``init_kwargs`` argument.
"""
raise ValueError
# Make sure we don't bail out on a Sphinx directive in the description of one
# of the exceptions
def foo():
"""First line.
Raises
------
ValueError
some text
.. math:: e^{xception}
ZeroDivisionError
Will not be raised, DOC502
"""
raise ValueError
| FasterThanLightError |
python | getsentry__sentry | tests/sentry/notifications/platform/discord/test_provider.py | {
"start": 1715,
"end": 10320
} | class ____(TestCase):
def test_default_renderer(self) -> None:
data = MockNotification(message="test")
template = MockNotificationTemplate()
rendered_template = template.render(data)
renderer = DiscordNotificationProvider.get_renderer(
data=data, category=NotificationCategory.DEBUG
)
renderable = renderer.render(data=data, rendered_template=rendered_template)
# Test basic structure
assert "content" in renderable
assert "embeds" in renderable
assert "components" in renderable
# Test embed content
embeds = renderable["embeds"]
assert len(embeds) == 1
embed = embeds[0]
description = embed["description"]
assert description == "\ntest"
assert embed["title"] == "Mock Notification"
assert embed["footer"]["text"] == "This is a mock footer"
assert (
embed["image"]["url"]
== "https://raw.githubusercontent.com/knobiknows/all-the-bufo/main/all-the-bufo/bufo-pog.png"
)
# Test components (action buttons)
components = renderable["components"]
assert len(components) == 1
action_row = components[0]
assert is_action_row(action_row)
assert len(action_row["components"]) == 1
button = action_row["components"][0]
assert_button_properties(button, "Visit Sentry", "https://www.sentry.io")
def test_renderer_without_chart(self) -> None:
"""Test rendering when no chart is provided"""
from sentry.notifications.platform.types import (
NotificationBodyFormattingBlockType,
NotificationBodyTextBlockType,
NotificationRenderedAction,
NotificationRenderedTemplate,
ParagraphBlock,
PlainTextBlock,
)
rendered_template = NotificationRenderedTemplate(
subject="Test Without Chart",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="test without chart",
)
],
)
],
actions=[
NotificationRenderedAction(label="Visit Sentry", link="https://www.sentry.io")
],
footer="Test footer",
chart=None, # No chart
)
data = MockNotification(message="test without chart")
renderer = DiscordNotificationProvider.get_renderer(
data=data, category=NotificationCategory.DEBUG
)
renderable = renderer.render(data=data, rendered_template=rendered_template)
embed = renderable["embeds"][0]
assert "image" not in embed or embed.get("image") is None
def test_renderer_without_footer(self) -> None:
"""Test rendering when no footer is provided"""
from sentry.notifications.platform.types import (
NotificationBodyFormattingBlockType,
NotificationBodyTextBlockType,
NotificationRenderedAction,
NotificationRenderedImage,
NotificationRenderedTemplate,
ParagraphBlock,
PlainTextBlock,
)
rendered_template = NotificationRenderedTemplate(
subject="Test Without Footer",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="test without footer",
)
],
)
],
actions=[
NotificationRenderedAction(label="Visit Sentry", link="https://www.sentry.io")
],
footer=None, # No footer
chart=NotificationRenderedImage(
url="https://example.com/chart.png",
alt_text="Test Chart",
),
)
data = MockNotification(message="test without footer")
renderer = DiscordNotificationProvider.get_renderer(
data=data, category=NotificationCategory.DEBUG
)
renderable = renderer.render(data=data, rendered_template=rendered_template)
embed = renderable["embeds"][0]
assert "footer" not in embed or embed.get("footer") is None
def test_renderer_without_actions(self) -> None:
"""Test rendering when no actions are provided"""
from sentry.notifications.platform.types import (
NotificationBodyFormattingBlockType,
NotificationBodyTextBlockType,
NotificationRenderedImage,
NotificationRenderedTemplate,
ParagraphBlock,
PlainTextBlock,
)
rendered_template = NotificationRenderedTemplate(
subject="Test Without Actions",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="test without actions",
)
],
)
],
actions=[], # No actions
footer="Test footer",
chart=NotificationRenderedImage(
url="https://example.com/chart.png",
alt_text="Test Chart",
),
)
data = MockNotification(message="test without actions")
renderer = DiscordNotificationProvider.get_renderer(
data=data, category=NotificationCategory.DEBUG
)
renderable = renderer.render(data=data, rendered_template=rendered_template)
# Should have no components when no actions
components = renderable["components"]
assert len(components) == 0
def test_renderer_multiple_actions(self) -> None:
"""Test rendering with multiple action buttons"""
from sentry.notifications.platform.types import (
NotificationBodyFormattingBlockType,
NotificationBodyTextBlockType,
NotificationRenderedAction,
NotificationRenderedImage,
NotificationRenderedTemplate,
ParagraphBlock,
PlainTextBlock,
)
actions = [
NotificationRenderedAction(label="Action 1", link="https://example1.com"),
NotificationRenderedAction(label="Action 2", link="https://example2.com"),
NotificationRenderedAction(label="Complex Action Name", link="https://example3.com"),
]
# Create a custom rendered template with multiple actions
rendered_template = NotificationRenderedTemplate(
subject="Test Multiple Actions",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="test with multiple actions",
)
],
)
],
actions=actions,
footer="Test footer",
chart=NotificationRenderedImage(
url="https://example.com/chart.png",
alt_text="Test Chart",
),
)
data = MockNotification(message="test with multiple actions")
renderer = DiscordNotificationProvider.get_renderer(
data=data, category=NotificationCategory.DEBUG
)
renderable = renderer.render(data=data, rendered_template=rendered_template)
components = renderable["components"]
assert len(components) == 1
action_row = components[0]
assert is_action_row(action_row) # Type guard
buttons = action_row["components"]
assert len(buttons) == 3
# Test button properties
assert_button_properties(buttons[0], "Action 1", "https://example1.com")
assert_button_properties(buttons[1], "Action 2", "https://example2.com")
assert_button_properties(buttons[2], "Complex Action Name", "https://example3.com")
| DiscordRendererTest |
python | docker__docker-py | tests/unit/api_test.py | {
"start": 11276,
"end": 14613
} | class ____(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.daemon = True
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except OSError:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [f'{len(line):x}'.encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with APIClient(
base_url=f"http+unix://{self.socket_file}",
version=DEFAULT_DOCKER_API_VERSION) as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
assert list(stream) == [
str(i).encode() for i in range(50)
]
| UnixSocketStreamTest |
python | huggingface__transformers | src/transformers/models/owlv2/modeling_owlv2.py | {
"start": 42670,
"end": 51385
} | class ____(Owlv2PreTrainedModel):
config: Owlv2Config
def __init__(self, config: Owlv2Config):
super().__init__(config)
if not isinstance(config.text_config, Owlv2TextConfig):
raise TypeError(
"config.text_config is expected to be of type Owlv2TextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, Owlv2VisionConfig):
raise TypeError(
"config.vision_config is expected to be of type Owlv2VisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = Owlv2TextTransformer(text_config)
self.vision_model = Owlv2VisionTransformer(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.tensor(config.logit_scale_init_value))
# Initialize weights and apply final processing
self.post_init()
@filter_out_non_signature_kwargs()
@auto_docstring
def get_text_features(
self,
input_ids: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.FloatTensor:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
IDs?](../glossary#input-ids)
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`Owlv2TextModel`].
Examples:
```python
>>> import torch
>>> from transformers import AutoProcessor, Owlv2Model
>>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble")
>>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
>>> inputs = processor(
... text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
... )
>>> with torch.inference_mode():
... text_features = model.get_text_features(**inputs)
```"""
# Get embeddings for all text queries in all batch samples
text_outputs: BaseModelOutputWithPooling = self.text_model(input_ids=input_ids, attention_mask=attention_mask)
text_features = self.text_projection(text_outputs.pooler_output)
return text_features
@filter_out_non_signature_kwargs()
@auto_docstring
def get_image_features(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`Owlv2VisionModel`].
Examples:
```python
>>> import torch
>>> from transformers.image_utils import load_image
>>> from transformers import AutoProcessor, Owlv2Model
>>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble")
>>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = load_image(url)
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.inference_mode():
... image_features = model.get_image_features(**inputs)
```"""
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
pixel_values=pixel_values,
interpolate_pos_encoding=interpolate_pos_encoding,
)
image_features = self.visual_projection(vision_outputs.pooler_output)
return image_features
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_base_image_embeds: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, Owlv2Output]:
r"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
return_base_image_embeds (`bool`, *optional*):
Whether or not to return the base image embeddings.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Owlv2Model
>>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble")
>>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use OWLv2 model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
# Get embeddings for all text queries in all batch samples
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
# normalized features
image_embeds = image_embeds / torch.linalg.norm(image_embeds, ord=2, dim=-1, keepdim=True)
text_embeds_norm = text_embeds / torch.linalg.norm(text_embeds, ord=2, dim=-1, keepdim=True)
# cosine similarity as logits and set it on the correct device
logit_scale = self.logit_scale.exp().to(image_embeds.device)
logits_per_text = torch.matmul(text_embeds_norm, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = owlv2_loss(logits_per_text)
text_embeds = text_embeds_norm
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return Owlv2Output(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
# Copied from transformers.models.owlvit.modeling_owlvit.OwlViTBoxPredictionHead with OwlViT->Owlv2
| Owlv2Model |
python | wandb__wandb | wandb/sdk/wandb_summary.py | {
"start": 214,
"end": 1856
} | class ____(metaclass=abc.ABCMeta):
"""dict-like wrapper for the nested dictionaries in a SummarySubDict.
Triggers self._root._callback on property changes.
"""
@abc.abstractmethod
def _as_dict(self):
raise NotImplementedError
@abc.abstractmethod
def _update(self, record: SummaryRecord):
raise NotImplementedError
def keys(self):
return [k for k in self._as_dict().keys() if k != "_wandb"]
def get(self, key, default=None):
return self._as_dict().get(key, default)
def __getitem__(self, key):
item = self._as_dict()[key]
if isinstance(item, dict):
# this nested dict needs to be wrapped:
wrapped_item = SummarySubDict()
object.__setattr__(wrapped_item, "_items", item)
object.__setattr__(wrapped_item, "_parent", self)
object.__setattr__(wrapped_item, "_parent_key", key)
return wrapped_item
# this item isn't a nested dict
return item
__getattr__ = __getitem__
def __setitem__(self, key, val):
self.update({key: val})
__setattr__ = __setitem__
def __delattr__(self, key):
record = SummaryRecord()
item = SummaryItem()
item.key = (key,)
record.remove = (item,)
self._update(record)
__delitem__ = __delattr__
def update(self, d: t.Dict):
record = SummaryRecord()
for key, value in d.items():
item = SummaryItem()
item.key = (key,)
item.value = value
record.update.append(item)
self._update(record)
| SummaryDict |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 12180,
"end": 12249
} | class ____(UnaryOp):
pass
@infer_global(operator.not_)
| UnaryPositive |
python | pallets__werkzeug | src/werkzeug/routing/rules.py | {
"start": 3326,
"end": 3788
} | class ____:
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map: Map) -> t.Iterable[Rule]:
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
| RuleFactory |
python | openai__gym | gym/utils/play.py | {
"start": 826,
"end": 956
} | class ____(Exception):
"""Raised when the environment does not have a default ``keys_to_action`` mapping."""
| MissingKeysToAction |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_image/query/executor.py | {
"start": 946,
"end": 16027
} | class ____(
Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType]
):
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[QueryReturn[Properties, References]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[QueryReturn[Properties, CrossReferences]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[QueryReturn[Properties, TReferences]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[QueryReturn[TProperties, References]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[QueryReturn[TProperties, CrossReferences]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[QueryReturn[TProperties, TReferences]]: ...
### GroupBy ###
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GroupByReturn[Properties, References]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GroupByReturn[Properties, CrossReferences]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GroupByReturn[Properties, TReferences]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GroupByReturn[TProperties, References]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GroupByReturn[TProperties, CrossReferences]]: ...
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GroupByReturn[TProperties, TReferences]]: ...
### DEFAULT ###
@overload
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
QuerySearchReturnType[Properties, References, TProperties, TReferences]
]: ...
def near_image(
self,
near_image: BLOB_INPUT,
*,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[QuerySearchReturnType[Properties, References, TProperties, TReferences]]:
"""Search for objects by image in this collection using an image-capable vectorization module and vector-based similarity search.
See the [docs](https://weaviate.io/developers/weaviate/search/image) for a more detailed explanation.
NOTE:
You must have an image-capable vectorization module installed in order to use this method, e.g. `img2vec-neural`, `multi2vec-clip`, or `multi2vec-bind.
Args:
near_image: The image file to search on, REQUIRED. This can be a base64 encoded string of the binary, a path to the file, or a file-like object.
certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used.
distance: The maximum distance to search. If not specified, the default distance specified by the server is used.
limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned.
offset: The offset to start from. If not specified, the retrieval begins from the first object in the server.
auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied.
filters: The filters to apply to the search.
group_by: How the results should be grouped by a specific property.
rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work.
target_vector: The name of the vector space to search in for named vector configurations. Required if multiple spaces are configured.
include_vector: Whether to include the vector in the results. If not specified, this is set to False.
return_metadata: The metadata to return for each object, defaults to `None`.
return_properties: The properties to return for each object.
return_references: The references to return for each object.
NOTE:
- If `return_properties` is not provided then all properties are returned except for blob properties.
- If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata.
- If `return_references` is not provided then no references are provided.
Returns:
A `QueryReturn` or `GroupByReturn` object that includes the searched objects.
If `group_by` is provided then a `GroupByReturn` object is returned, otherwise a `QueryReturn` object is returned.
Raises:
weaviate.exceptions.WeaviateQueryError: If the request to the Weaviate server fails.
"""
def resp(
res: search_get_pb2.SearchReply,
) -> QuerySearchReturnType[Properties, References, TProperties, TReferences]:
return cast(
Any,
self._result_to_query_or_groupby_return(
res,
_QueryOptions.from_input(
return_metadata,
return_properties,
include_vector,
self._references,
return_references,
rerank,
group_by,
),
),
)
request = self._query.near_media(
media=parse_blob(near_image),
type_=NearMediaType.IMAGE.value,
certainty=certainty,
distance=distance,
filters=filters,
group_by=_GroupBy.from_input(group_by),
rerank=rerank,
target_vector=target_vector,
limit=limit,
offset=offset,
autocut=auto_limit,
return_metadata=self._parse_return_metadata(return_metadata, include_vector),
return_properties=self._parse_return_properties(return_properties),
return_references=self._parse_return_references(return_references),
)
return executor.execute(
response_callback=resp,
method=self._connection.grpc_search,
request=request,
)
| _NearImageQueryExecutor |
python | PrefectHQ__prefect | src/prefect/context.py | {
"start": 18002,
"end": 25966
} | class ____(ContextModel):
"""
The asset context for a materializing task run. Contains all asset-related information needed
for asset event emission and downstream asset dependency propagation.
Attributes:
direct_asset_dependencies: Assets that this task directly depends on (from task.asset_deps)
downstream_assets: Assets that this task will create/materialize (from MaterializingTask.assets)
upstream_assets: Assets from upstream task dependencies
materialized_by: Tool that materialized the assets (from MaterializingTask.materialized_by)
task_run_id: ID of the associated task run
materialization_metadata: Metadata for materialized assets
"""
direct_asset_dependencies: set[Asset] = Field(default_factory=set)
downstream_assets: set[Asset] = Field(default_factory=set)
upstream_assets: set[Asset] = Field(default_factory=set)
materialized_by: Optional[str] = None
task_run_id: Optional[UUID] = None
materialization_metadata: dict[str, dict[str, Any]] = Field(default_factory=dict)
copy_to_child_ctx: bool = False
__var__: ClassVar[ContextVar[Self]] = ContextVar("asset_context")
@classmethod
def from_task_and_inputs(
cls,
task: "Task[Any, Any]",
task_run_id: UUID,
task_inputs: Optional[dict[str, set[Any]]] = None,
copy_to_child_ctx: bool = False,
) -> "AssetContext":
"""
Create an AssetContext from a task and its resolved inputs.
Args:
task: The task instance
task_run_id: The task run ID
task_inputs: The resolved task inputs (TaskRunResult objects)
copy_to_child_ctx: Whether this context should be copied on a child AssetContext
Returns:
Configured AssetContext
"""
from prefect.client.schemas import TaskRunResult
from prefect.tasks import MaterializingTask
upstream_assets: set[Asset] = set()
flow_ctx = FlowRunContext.get()
if task_inputs and flow_ctx:
for name, inputs in task_inputs.items():
# Parent task runs are not dependencies
# that we want to track
if name == "__parents__":
continue
for task_input in inputs:
if isinstance(task_input, TaskRunResult):
task_assets = flow_ctx.task_run_assets.get(task_input.id)
if task_assets:
upstream_assets.update(task_assets)
ctx = cls(
direct_asset_dependencies=set(task.asset_deps)
if task.asset_deps
else set(),
downstream_assets=set(task.assets)
if isinstance(task, MaterializingTask) and task.assets
else set(),
upstream_assets=upstream_assets,
materialized_by=task.materialized_by
if isinstance(task, MaterializingTask)
else None,
task_run_id=task_run_id,
copy_to_child_ctx=copy_to_child_ctx,
)
ctx.update_tracked_assets()
return ctx
def add_asset_metadata(self, asset_key: str, metadata: dict[str, Any]) -> None:
"""
Add metadata for a materialized asset.
Args:
asset_key: The asset key
metadata: Metadata dictionary to add
Raises:
ValueError: If asset_key is not in downstream_assets
"""
downstream_keys = {asset.key for asset in self.downstream_assets}
if asset_key not in downstream_keys:
raise ValueError(
"Can only add metadata to assets that are arguments to @materialize"
)
existing = self.materialization_metadata.get(asset_key, {})
self.materialization_metadata[asset_key] = existing | metadata
@staticmethod
def asset_as_resource(asset: Asset) -> dict[str, str]:
"""Convert Asset to event resource format."""
resource = {"prefect.resource.id": asset.key}
if asset.properties:
properties_dict = asset.properties.model_dump(exclude_unset=True)
if "name" in properties_dict:
resource["prefect.resource.name"] = properties_dict["name"]
if "description" in properties_dict:
resource["prefect.asset.description"] = properties_dict["description"]
if "url" in properties_dict:
resource["prefect.asset.url"] = properties_dict["url"]
if "owners" in properties_dict:
resource["prefect.asset.owners"] = json.dumps(properties_dict["owners"])
return resource
@staticmethod
def asset_as_related(asset: Asset) -> dict[str, str]:
"""Convert Asset to event related format."""
return {
"prefect.resource.id": asset.key,
"prefect.resource.role": "asset",
}
@staticmethod
def related_materialized_by(by: str) -> dict[str, str]:
"""Create a related resource for the tool that performed the materialization"""
return {
"prefect.resource.id": by,
"prefect.resource.role": "asset-materialized-by",
}
def emit_events(self, state: State) -> None:
"""
Emit asset events
"""
from prefect.events import emit_event
if state.name == "Cached":
return
elif state.is_failed():
event_status = "failed"
elif state.is_completed():
event_status = "succeeded"
else:
return
# If we have no downstream assets, this not a materialization
if not self.downstream_assets:
return
# Emit reference events for all upstream assets (direct + inherited)
all_upstream_assets = self.upstream_assets | self.direct_asset_dependencies
for asset in all_upstream_assets:
emit_event(
event="prefect.asset.referenced",
resource=self.asset_as_resource(asset),
related=[],
)
# Emit materialization events for downstream assets
upstream_related = [self.asset_as_related(a) for a in all_upstream_assets]
if self.materialized_by:
upstream_related.append(self.related_materialized_by(self.materialized_by))
for asset in self.downstream_assets:
emit_event(
event=f"prefect.asset.materialization.{event_status}",
resource=self.asset_as_resource(asset),
related=upstream_related,
payload=self.materialization_metadata.get(asset.key),
)
def update_tracked_assets(self) -> None:
"""
Update the flow run context with assets that should be propagated downstream.
"""
if not (flow_run_context := FlowRunContext.get()):
return
if not self.task_run_id:
return
if self.downstream_assets:
# MaterializingTask: propagate the downstream assets (what we create)
assets_for_downstream = set(self.downstream_assets)
else:
# Regular task: propagate upstream assets + direct dependencies
assets_for_downstream = set(
self.upstream_assets | self.direct_asset_dependencies
)
flow_run_context.task_run_assets[self.task_run_id] = assets_for_downstream
def serialize(self: Self, include_secrets: bool = True) -> dict[str, Any]:
"""Serialize the AssetContext for distributed execution."""
return self.model_dump(
# use json serialization so fields that are
# sets of pydantic models are serialized
mode="json",
exclude_unset=True,
serialize_as_any=True,
context={"include_secrets": include_secrets},
)
| AssetContext |
python | chroma-core__chroma | chromadb/test/test_api.py | {
"start": 97961,
"end": 100179
} | class ____:
"""Test Limit.from_dict() conversion."""
def test_limit_only(self):
"""Test limit without offset."""
from chromadb.execution.expression.operator import Limit
limit = Limit.from_dict({"limit": 20})
assert limit.limit == 20
assert limit.offset == 0 # default
def test_offset_only(self):
"""Test offset without limit."""
from chromadb.execution.expression.operator import Limit
limit = Limit.from_dict({"offset": 10})
assert limit.offset == 10
assert limit.limit is None
def test_limit_and_offset(self):
"""Test both limit and offset."""
from chromadb.execution.expression.operator import Limit
limit = Limit.from_dict({"limit": 20, "offset": 10})
assert limit.limit == 20
assert limit.offset == 10
def test_validation(self):
"""Test Limit validation."""
import pytest
from chromadb.execution.expression.operator import Limit
# Negative limit
with pytest.raises(ValueError, match="must be positive"):
Limit.from_dict({"limit": -1})
# Zero limit
with pytest.raises(ValueError, match="must be positive"):
Limit.from_dict({"limit": 0})
# Negative offset
with pytest.raises(ValueError, match="must be non-negative"):
Limit.from_dict({"offset": -1})
def test_invalid_types(self):
"""Test type validation."""
import pytest
from chromadb.execution.expression.operator import Limit
with pytest.raises(TypeError, match="Expected dict"):
Limit.from_dict("not a dict")
with pytest.raises(TypeError, match="must be an integer"):
Limit.from_dict({"limit": "20"})
with pytest.raises(TypeError, match="must be an integer"):
Limit.from_dict({"offset": 10.5})
def test_unexpected_keys(self):
"""Test rejection of unexpected keys."""
import pytest
from chromadb.execution.expression.operator import Limit
with pytest.raises(ValueError, match="Unexpected keys"):
Limit.from_dict({"limit": 10, "invalid": "key"})
| TestLimitFromDict |
python | PrefectHQ__prefect | tests/server/schemas/test_schedules.py | {
"start": 21306,
"end": 25445
} | class ____:
"""
Tests that DST boundaries are respected
"""
async def test_cron_schedule_hourly_daylight_savings_time_forward(self):
"""
On 3/11/2018, at 2am, America/New_York switched clocks forward an hour.
"""
dt = datetime(2018, 3, 10, 23, tzinfo=ZoneInfo("America/New_York"))
s = CronSchedule(cron="0 * * * *", timezone="America/New_York")
dates = await s.get_dates(n=5, start=dt)
# skip 2am
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
23,
0,
1,
3,
4,
]
# constant hourly clock in utc time
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [4, 5, 6, 7, 8]
async def test_cron_schedule_hourly_daylight_savings_time_backward(self):
"""
11/4/2018, at 2am, America/New_York switched clocks back an hour.
"""
dt = datetime(2018, 11, 3, 23, tzinfo=ZoneInfo("America/New_York"))
s = CronSchedule(cron="0 * * * *", timezone="America/New_York")
dates = await s.get_dates(n=5, start=dt)
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
23,
0,
1,
2,
3,
]
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [3, 4, 5, 7, 8]
async def test_cron_schedule_daily_start_daylight_savings_time_forward(self):
"""
On 3/11/2018, at 2am, America/New_York switched clocks forward an hour.
Confirm that a clock for 9am America/New_York stays 9am through the switch.
"""
dt = datetime(2018, 3, 8, 9, tzinfo=ZoneInfo("America/New_York"))
s = CronSchedule(cron="0 9 * * *", timezone="America/New_York")
dates = await s.get_dates(n=5, start=dt)
# constant 9am start
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
9,
9,
9,
9,
9,
]
# utc time shifts
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [
14,
14,
14,
13,
13,
]
async def test_cron_schedule_daily_start_daylight_savings_time_backward(self):
"""
On 11/4/2018, at 2am, America/New_York switched clocks back an hour.
Confirm that a clock for 9am America/New_York stays 9am through the switch.
"""
dt = datetime(2018, 11, 1, 9, tzinfo=ZoneInfo("America/New_York"))
s = CronSchedule(cron="0 9 * * *", timezone="America/New_York")
dates = await s.get_dates(n=5, start=dt)
# constant 9am start
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
9,
9,
9,
9,
9,
]
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [
13,
13,
13,
14,
14,
]
async def test_cron_schedule_handles_scheduling_near_dst_boundary(self):
"""
Regression test for https://github.com/PrefectHQ/nebula/issues/4048
`croniter` does not generate expected schedules when given a start
time on the day DST occurs but before the time shift actually happens.
Daylight savings occurs at 2023-03-12T02:00:00-05:00 and clocks jump
ahead to 2023-03-12T03:00:00-04:00. The timestamp below is in the 2-hour
window where it is 2023-03-12, but the DST shift has not yet occurred.
"""
dt = datetime(2023, 3, 12, 5, 10, 2, tzinfo=ZoneInfo("UTC"))
s = CronSchedule(cron="10 0 * * *", timezone="America/Montreal")
dates = await s.get_dates(n=5, start=dt)
assert [d.astimezone(ZoneInfo("America/New_York")).hour for d in dates] == [
0,
0,
0,
0,
0,
]
assert [d.astimezone(ZoneInfo("UTC")).hour for d in dates] == [4, 4, 4, 4, 4]
| TestCronScheduleDaylightSavingsTime |
python | fsspec__filesystem_spec | fsspec/implementations/tests/local/local_test.py | {
"start": 119,
"end": 194
} | class ____(abstract.AbstractCopyTests, LocalFixtures):
pass
| TestLocalCopy |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 224410,
"end": 225121
} | class ____:
def test_sf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(mpmath.mp.one - mpmath.exp(-mpmath.exp(-50)))
# 1.9287498479639178e-22
assert_allclose(stats.gumbel_r.sf(50), 1.9287498479639178e-22,
rtol=1e-14)
def test_isf(self):
# Expected value computed with mpmath:
# >>> import mpmath
# >>> mpmath.mp.dps = 40
# >>> float(-mpmath.log(-mpmath.log(mpmath.mp.one - 1e-17)))
# 39.14394658089878
assert_allclose(stats.gumbel_r.isf(1e-17), 39.14394658089878,
rtol=1e-14)
| TestGumbelR |
python | getsentry__sentry | tests/sentry/api/endpoints/test_prompts_activity.py | {
"start": 82,
"end": 7918
} | class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user, name="baz")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(
organization=self.org, teams=[self.team], name="Bengal-Elephant-Giraffe-Tree-House"
)
self.path = reverse("sentry-api-0-organization-prompts-activity", args=[self.org.slug])
def test_organization_permissions(self) -> None:
new_org = self.create_organization()
self.path = reverse("sentry-api-0-organization-prompts-activity", args=[new_org.slug])
resp = self.client.put(
self.path,
{
"organization_id": new_org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "dismissed",
},
)
assert resp.status_code == 403
def test_organization_id_mismatch(self) -> None:
new_org = self.create_organization()
resp = self.client.put(
self.path,
{
"organization_id": new_org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "dismissed",
},
)
assert resp.status_code == 400
assert resp.data["detail"] == "Organization missing or mismatched"
def test_invalid_feature(self) -> None:
# Invalid feature prompt name
resp = self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "gibberish",
"status": "dismissed",
},
)
assert resp.status_code == 400
def test_batched_invalid_feature(self) -> None:
# Invalid feature prompt name
resp = self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": ["releases", "gibberish"],
"status": "dismissed",
},
)
assert resp.status_code == 400
def test_invalid_project(self) -> None:
# Invalid project id
data = {
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
}
resp = self.client.get(self.path, data)
assert resp.status_code == 200
self.project.delete()
# project doesn't exist
resp = self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "dismissed",
},
)
assert resp.status_code == 400
def test_dismiss(self) -> None:
data = {
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
}
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert resp.data.get("data", None) is None
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "dismissed",
},
)
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert "data" in resp.data
assert "dismissed_ts" in resp.data["data"]
def test_snooze(self) -> None:
data = {
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
}
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert resp.data.get("data", None) is None
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "snoozed",
},
)
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert "data" in resp.data
assert "snoozed_ts" in resp.data["data"]
def test_visible(self) -> None:
data = {
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
}
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert resp.data.get("data", None) is None
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "visible",
},
)
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert "data" in resp.data
assert resp.data["data"].get("dismissed_ts") is None
assert resp.data["data"].get("snoozed_ts") is None
def test_visible_after_dismiss(self) -> None:
data = {
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
}
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert resp.data.get("data", None) is None
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "dismiss",
},
)
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "visible",
},
)
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert "data" in resp.data
assert resp.data["data"].get("dismissed_ts") is None
assert resp.data["data"].get("snoozed_ts") is None
def test_batched(self) -> None:
data = {
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": ["releases", "alert_stream"],
}
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert resp.data["features"].get("releases", None) is None
assert resp.data["features"].get("alert_stream", None) is None
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "releases",
"status": "dismissed",
},
)
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert "dismissed_ts" in resp.data["features"]["releases"]
assert resp.data["features"].get("alert_stream", None) is None
self.client.put(
self.path,
{
"organization_id": self.org.id,
"project_id": self.project.id,
"feature": "alert_stream",
"status": "snoozed",
},
)
resp = self.client.get(self.path, data)
assert resp.status_code == 200
assert "dismissed_ts" in resp.data["features"]["releases"]
assert "snoozed_ts" in resp.data["features"]["alert_stream"]
| PromptsActivityTest |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 7568,
"end": 7637
} | class ____(_NumcodecsBytesBytesCodec, codec_name="zlib"):
pass
| Zlib |
python | davidhalter__jedi | test/static_analysis/descriptors.py | {
"start": 14,
"end": 245
} | class ____():
@classmethod
def open(cls, name, **kwargs):
return cls.taropen(name, **kwargs)
@classmethod
def taropen(cls, name, **kwargs):
return name
# should just work
TarFile.open('hallo')
| TarFile |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/tasks.py | {
"start": 5905,
"end": 10054
} | class ____(GoogleCloudBaseOperator):
"""
Updates a queue in Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueUpdateOperator`
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"task_queue",
"project_id",
"location",
"queue_name",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
task_queue: Queue,
project_id: str = PROVIDE_PROJECT_ID,
location: str | None = None,
queue_name: str | None = None,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.task_queue = task_queue
self.project_id = project_id
self.location = location
self.queue_name = queue_name
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.update_queue(
task_queue=self.task_queue,
project_id=self.project_id,
location=self.location,
queue_name=self.queue_name,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
| CloudTasksQueueUpdateOperator |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-equalize-array.py | {
"start": 37,
"end": 229
} | class ____(object):
def minOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return 0 if all(x == nums[0] for x in nums) else 1
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataform.py | {
"start": 7576,
"end": 11737
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new WorkflowInvocation in a given Repository.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation: Required. The workflow invocation resource to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param asynchronous: Flag to return workflow_invocation_id from the Dataform API.
This is useful for submitting long-running workflows and
waiting on them asynchronously using the DataformWorkflowInvocationStateSensor
:param wait_time: Number of seconds between checks
"""
template_fields = ("project_id", "region", "repository_id", "workflow_invocation", "impersonation_chain")
operator_extra_links = (DataformWorkflowInvocationLink(),)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation: WorkflowInvocation | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: int | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
asynchronous: bool = False,
wait_time: int = 10,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workflow_invocation = workflow_invocation
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.asynchronous = asynchronous
self.wait_time = wait_time
def execute(self, context: Context):
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.create_workflow_invocation(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation=self.workflow_invocation,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
workflow_invocation_id = result.name.split("/")[-1]
DataformWorkflowInvocationLink.persist(
context=context,
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workflow_invocation_id=workflow_invocation_id,
)
if not self.asynchronous:
hook.wait_for_workflow_invocation(
workflow_invocation_id=workflow_invocation_id,
repository_id=self.repository_id,
project_id=self.project_id,
region=self.region,
timeout=self.timeout,
wait_time=self.wait_time,
)
return WorkflowInvocation.to_dict(result)
| DataformCreateWorkflowInvocationOperator |
python | walkccc__LeetCode | solutions/230. Kth Smallest Element in a BST/230-3.py | {
"start": 0,
"end": 332
} | class ____:
def kthSmallest(self, root: TreeNode | None, k: int) -> int:
stack = []
while root:
stack.append(root)
root = root.left
for _ in range(k - 1):
root = stack.pop()
root = root.right
while root:
stack.append(root)
root = root.left
return stack[-1].val
| Solution |
python | coleifer__peewee | tests/cockroachdb.py | {
"start": 12063,
"end": 12174
} | class ____(TestModel):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
| Post |
python | altair-viz__altair | altair/vegalite/v6/api.py | {
"start": 24813,
"end": 29685
} | class ____(TypedDict, closed=True, total=False): # type: ignore[call-arg]
# https://peps.python.org/pep-0728/
value: Required[Any]
__extra_items__: Any
def _reveal_parsed_shorthand(obj: Map, /) -> dict[str, Any]:
# Helper for producing error message on multiple field collision.
return {k: v for k, v in obj.items() if k in utils.SHORTHAND_KEYS}
def _is_extra(*objs: Any, kwds: Map) -> Iterator[bool]:
for el in objs:
if isinstance(el, (SchemaBase, Mapping)):
item = el.to_dict(validate=False) if isinstance(el, SchemaBase) else el
yield not (item.keys() - kwds.keys()).isdisjoint(utils.SHORTHAND_KEYS)
else:
continue
def _is_condition_extra(obj: Any, *objs: Any, kwds: Map) -> TypeIs[_Condition]:
# NOTE: Short circuits on the first conflict.
# 1 - Originated from parse_shorthand
# 2 - Used a wrapper or `dict` directly, including `extra_keys`
return isinstance(obj, str) or any(_is_extra(obj, *objs, kwds=kwds))
def _is_condition_closed(obj: Map) -> TypeIs[_ConditionClosed]:
"""Return `True` if ``obj`` can be used in a chained condition."""
return {"empty", "param", "test", "value"} >= obj.keys()
def _parse_when_constraints(
constraints: dict[str, _FieldEqualType], /
) -> Iterator[BinaryExpression]:
"""
Wrap kwargs with `alt.datum`.
```py
# before
alt.when(alt.datum.Origin == "Europe")
# after
alt.when(Origin="Europe")
```
"""
for name, value in constraints.items():
yield _expr_core.GetAttrExpression("datum", name) == value
def _validate_composables(
predicates: Iterable[Any], /
) -> Iterator[_ComposablePredicateType]:
for p in predicates:
if isinstance(p, (_expr_core.OperatorMixin, core.PredicateComposition)):
yield p
else:
msg = (
f"Predicate composition is not permitted for "
f"{type(p).__name__!r}.\n"
f"Try wrapping {p!r} in a `Parameter` first."
)
raise TypeError(msg)
def _parse_when_compose(
predicates: tuple[Any, ...],
constraints: dict[str, _FieldEqualType],
/,
) -> BinaryExpression:
"""
Compose an `&` reduction predicate.
Parameters
----------
predicates
Collected positional arguments.
constraints
Collected keyword arguments.
Raises
------
TypeError
On the first non ``_ComposablePredicateType`` of `predicates`
"""
iters = []
if predicates:
iters.append(_validate_composables(predicates))
if constraints:
iters.append(_parse_when_constraints(constraints))
r = functools.reduce(operator.and_, itertools.chain.from_iterable(iters))
return t.cast("_expr_core.BinaryExpression", r)
def _parse_when(
predicate: Optional[_PredicateType],
*more_predicates: _ComposablePredicateType,
empty: Optional[bool],
**constraints: _FieldEqualType,
) -> _Condition:
composed: _PredicateType
if utils.is_undefined(predicate):
if more_predicates or constraints:
composed = _parse_when_compose(more_predicates, constraints)
else:
msg = (
f"At least one predicate or constraint must be provided, "
f"but got: {predicate=}"
)
raise TypeError(msg)
elif more_predicates or constraints:
predicates = predicate, *more_predicates
composed = _parse_when_compose(predicates, constraints)
else:
composed = predicate
return _predicate_to_condition(composed, empty=empty)
def _parse_literal(val: Any, /) -> dict[str, Any]:
if isinstance(val, str):
return utils.parse_shorthand(val)
else:
msg = (
f"Expected a shorthand `str`, but got: {type(val).__name__!r}\n\n"
f"From `statement={val!r}`."
)
raise TypeError(msg)
def _parse_then(statement: _StatementType, kwds: dict[str, Any], /) -> dict[str, Any]:
if isinstance(statement, SchemaBase):
statement = statement.to_dict()
elif not isinstance(statement, dict):
statement = _parse_literal(statement)
statement.update(kwds)
return statement
def _parse_otherwise(
statement: _StatementType, conditions: _Conditional[Any], kwds: dict[str, Any], /
) -> SchemaBase | _Conditional[Any]:
selection: SchemaBase | _Conditional[Any]
if isinstance(statement, SchemaBase):
selection = statement.copy()
conditions.update(**kwds) # type: ignore[call-arg]
selection.condition = conditions["condition"]
else:
if not isinstance(statement, Mapping):
statement = _parse_literal(statement)
selection = conditions
selection.update(**statement, **kwds) # type: ignore[call-arg]
return selection
| _Value |
python | django__django | tests/model_forms/tests.py | {
"start": 135688,
"end": 137801
} | class ____(TestCase):
def test_unique_constraint_refs_excluded_field(self):
obj = ConstraintsModel.objects.create(name="product", price="1.00")
data = {
"id": "",
"name": obj.name,
"price": "1337.00",
"category": obj.category,
}
ConstraintsModelForm = modelform_factory(ConstraintsModel, fields="__all__")
ExcludeCategoryForm = modelform_factory(ConstraintsModel, exclude=["category"])
full_form = ConstraintsModelForm(data)
exclude_category_form = ExcludeCategoryForm(data)
self.assertTrue(exclude_category_form.is_valid())
self.assertFalse(full_form.is_valid())
self.assertEqual(
full_form.errors, {"__all__": ["This product already exists."]}
)
def test_check_constraint_refs_excluded_field(self):
data = {
"id": "",
"name": "priceless",
"price": "0.00",
"category": "category 1",
}
ConstraintsModelForm = modelform_factory(ConstraintsModel, fields="__all__")
ExcludePriceForm = modelform_factory(ConstraintsModel, exclude=["price"])
full_form = ConstraintsModelForm(data)
exclude_price_form = ExcludePriceForm(data)
self.assertTrue(exclude_price_form.is_valid())
self.assertFalse(full_form.is_valid())
self.assertEqual(
full_form.errors, {"__all__": ["Price must be greater than zero."]}
)
def test_check_constraint_refs_excluded_field_attname(self):
left = AttnameConstraintsModel.objects.create()
instance = AttnameConstraintsModel.objects.create(left=left)
data = {
"left": str(left.id),
"right": "",
}
AttnameConstraintsModelForm = modelform_factory(
AttnameConstraintsModel, fields="__all__"
)
full_form = AttnameConstraintsModelForm(data, instance=instance)
self.assertFalse(full_form.is_valid())
self.assertEqual(full_form.errors, {"right": ["This field is required."]})
| ConstraintValidationTests |
python | facebook__pyre-check | api/query.py | {
"start": 752,
"end": 823
} | class ____(NamedTuple):
name: str
annotation: str
| DefineParameter |
python | pyca__cryptography | src/cryptography/hazmat/primitives/_serialization.py | {
"start": 1554,
"end": 1611
} | class ____(utils.Enum):
PKCS3 = "PKCS3"
| ParameterFormat |
python | joke2k__faker | faker/providers/job/fr_FR/__init__.py | {
"start": 138,
"end": 28496
} | class ____(BaseProvider):
jobs = [
"BIM manager",
"accessoiriste",
"accompagnant éducatif et social éducative et sociale",
"accompagnateur de tourisme équestre",
"accompagnateur de voyages",
"accompagnateur en moyenne montagne",
"acheteur",
"acheteur d'espaces publicitaires",
"actuaire",
"adjoint administratif administrative",
"administrateur de base de données",
"administrateur de biens",
"administrateur de logiciels de laboratoire",
"administrateur de mission humanitaire",
"administrateur de spectacle",
"administrateur judiciaire",
"administrateur réseaux",
"administrateur territorial",
"affûteur",
"agenceur de cuisines et salles de bains",
"agent arboricole",
"agent artistique",
"agent d'escale",
"agent d'exploitation de l'eau",
"agent de constatation des douanes",
"agent de développement des énergies renouvelables",
"agent de développement local",
"agent de développement touristique",
"agent de propreté et d'hygiène",
"agent de propreté urbaine",
"agent de sécurité",
"agent de sûreté ferroviaire",
"agent de transit",
"agent général générale d'assurances",
"agent hydrothermal hydrothermale",
"agent immobilier immobilière",
"agriculteur",
"agronome",
"aide-chimiste",
"aide-soignant",
"ajusteur-monteur",
"ambulancier",
"analyste de crédit",
"analyste financier",
"anatomiste",
"anesthésiste-réanimateur",
"animalier de laboratoire",
"animateur 2D et 3D",
"animateur d'activités physiques et sportives pour tous",
"animateur de bassin versant",
"animateur de radio et de télévision",
"animateur du patrimoine",
"animateur nature nature",
"animateur socioculturel socioculturelle",
"antiquaire",
"apiculteur",
"aquaculteur",
"architecte",
"architecte d'intérieur",
"architecte des systèmes d'information",
"architecte naval",
"architecte produit industriel",
"architecte réseaux",
"architecte web",
"archiviste",
"archéologue",
"art-thérapeute",
"artiste de cirque",
"ascensoriste",
"assistant commercial commerciale",
"assistant de gestion en PME",
"assistant de service social",
"assistant dentaire",
"assistant en architecture",
"assistant en ressources humaines",
"assistant en études de prix",
"assistant maternel maternelle",
"assistant réalisateur réalisatrice",
"astrophysicien",
"attaché commercial commerciale",
"attaché d'administration",
"attaché de presse",
"attaché de recherche clinique (ARC)",
"attaché territorial territoriale",
"audioprothésiste",
"auditeur externe",
"auditeur interne",
"auditeur qualité",
"auteur-compositeur interprète interprète",
"auxiliaire de puériculture",
"auxiliaire spécialisé vétérinaire",
"avocat",
"aérodynamicien",
"bactériologiste",
"barman",
"batelier",
"bibliothécaire",
"bijoutier-joaillier",
"bio-informaticien",
"biologiste en environnement",
"biologiste médical",
"biostatisticien",
"botaniste",
"bottier",
"boucher",
"boulanger",
"brancardier",
"brodeur",
"bronzier",
"cadreur",
"caissier",
"canalisateur",
"carreleur",
"carrossier",
"cartographe",
"chanteur",
"charcutier-traiteur",
"chargé de recherche en recrutement",
"chargé d'affaires en génie climatique",
"chargé d'affaires en génie mécanique",
"chargé d'études en marketing",
"chargé d'études en valorisation agricole des déchets",
"chargé d'études média",
"chargé d'études naturalistes",
"chargé d'études ressources humaines",
"chargé d'études économiques",
"chargé de clientèle banque",
"chargé de communication interne",
"chargé de pharmacovigilance",
"chargé de production",
"chargé de projet événementiel",
"chargé de recherche en acoustique musicale",
"chargé de recherche et développement déchets",
"chargé de référencement web",
"chargé de valorisation de la recherche",
"chargé de veille législative et réglementaire",
"chargé des méthodes outils et qualité en informatique",
"chargé des relations publiques",
"chargé hygiène sécurité environnement (HSE)",
"charpentier bois",
"charpentier métallique",
"chaudronnier",
"chauffeur de taxi",
"chef de projet packaging",
"chef comptable",
"chef d'exploitation d'usine d'incinération",
"chef d'exploitation des remontées mécaniques",
"chef de chantier",
"chef de chantier en installations électriques",
"chef de cultures légumières",
"chef de fabrication des industries graphiques",
"chef de mission humanitaire",
"chef de produit marketing",
"chef de produit technique en informatique",
"chef de produit touristique",
"chef de projet biodiversité",
"chef de projet communication digitale",
"chef de projet démantèlement nucléaire",
"chef de projet informatique",
"chef de projet multimédia",
"chef de projet sites et sols pollués",
"chef de projet web/mobile",
"chef de projet éolien",
"chef de publicité",
"chef de rayon",
"chef de station de traitement des eaux",
"chef des ventes",
"chef monteur monteuse",
"chercheur en biologie",
"chercheur en biologie du sport",
"chercheur en chimie",
"chercheur en physique",
"chirurgien",
"chirurgien-dentiste",
"chocolatier-confiseur",
"clerc d'huissier",
"climatologue",
"coffreur-boiseur",
"cogniticien",
"coiffeur",
"collaborateur de notaire de notaire",
"collecteur de fonds",
"coloriste",
"commercial export",
"commercial à bord des trains",
"commerçant en alimentation",
"commissaire de police",
"commissaire-priseur",
"community manager",
"comptable",
"comédien",
"concepteur de jeux vidéo",
"concepteur de niveaux de jeu web",
"concepteur designer packaging",
"concepteur multimédia",
"concepteur-rédacteur",
"conducteur d'engins de travaux publics",
"conducteur d'engins forestiers de récolte en entreprises de travaux forestiers",
"conducteur de bus ou d'autocar",
"conducteur de ligne de production alimentaire",
"conducteur de machine onduleuse",
"conducteur de machines agricoles",
"conducteur de machines à imprimer",
"conducteur de métro",
"conducteur de train",
"conducteur de travaux",
"conducteur de travaux agricoles",
"conducteur opérateur opératrice de scierie",
"conducteur routier routière",
"conducteur de machines à papier",
"conseiller agricole",
"conseiller d'élevage",
"conseiller en assurances",
"conseiller en environnement",
"conseiller en fusions-acquisitions",
"conseiller en génétique",
"conseiller en insertion sociale et professionnelle",
"conseiller en séjour",
"conseiller en voyages",
"conseiller en économie sociale et familiale",
"conseiller espace info-énergie",
"conseiller principal d'éducation principale d'éducation",
"conseiller pénitentiaire d'insertion et de probation",
"conseiller sportif sportive en salle de remise en forme",
"conservateur du patrimoine",
"conservateur territorial de bibliothèques",
"consignataire de navire",
"constructeur de routes",
"consultant",
"consultant SaaS",
"consultant en conduite de changement",
"consultant en informatique décisionnelle",
"consultant en management de l'innovation",
"consultant en solutions intégrées",
"consultant en systèmes d'information",
"consultant en validation",
"consultant green IT",
"consultant informatique",
"contremaître",
"contrôleur aérien aérienne",
"contrôleur de gestion",
"contrôleur de performance",
"contrôleur des douanes et droits indirects",
"contrôleur technique automobile",
"convoyeur de fonds",
"coordonnateur d'études cliniques",
"cordiste",
"cordonnier",
"correcteur",
"costumier",
"courtier",
"couvreur",
"credit manager",
"critique d'art",
"cryptologue",
"cuisinier",
"céramiste",
"danseur",
"data manager",
"designer d'interaction",
"designer graphique",
"designer industriel industrielle",
"designer sonore",
"dessinateur de BD",
"dessinateur en construction mécanique",
"dessinateur-projeteur",
"diagnostiqueur immobilier",
"directeur artistique",
"directeur d'accueil collectif de mineurs (ACM)",
"directeur d'agence bancaire",
"directeur d'hôpital",
"directeur d'hôtel",
"directeur d'office de tourisme",
"directeur de création",
"directeur de golf",
"directeur de la photographie",
"directeur de magasin à grande surface",
"directeur de restaurant",
"directeur des services pénitentiaires",
"diététicien",
"documentaliste",
"domoticien",
"déclarant en douane",
"décolleteur",
"décorateur",
"démographe",
"déménageur",
"dépanneur en électroménager",
"développeur d'applications mobiles",
"développeur informatique",
"développeur rural rurale humanitaire",
"développeur économique",
"employé d'élevage",
"employé de chai",
"employé de pressing",
"employé de restaurant",
"encadreur",
"enquêteur privé privée",
"enseignant d'art",
"enseignant de la conduite automobile et de la sécurité routière",
"enseignant humanitaire",
"enseignant spécialisé spécialisée",
"enseignant-chercheur",
"entraîneur de chevaux",
"entraîneur sportif sportive",
"ergonome",
"ergothérapeute",
"esthéticien-cosméticien",
"ethnologue",
"expert bilan carbone",
"expert automobile",
"expert en assurances",
"expert en sécurité informatique",
"expert immobilier immobilier",
"expert-comptable",
"facteur",
"facteur d'instruments",
"façadier",
"façonnier des industries graphiques",
"femme de chambre",
"ferronnier d'art",
"fiscaliste",
"fleuriste",
"formateur d'adultes",
"formateur en informatique",
"formateur technique en agroéquipement",
"formulateur",
"garde (chasse, pêche, littoral, rivière, parcs nationaux)",
"garde à cheval",
"gardien de la paix",
"gardien de police municipale",
"garçon de café",
"gendarme",
"gestionnaire actif/passif",
"gestionnaire de contrats d'assurance",
"gestionnaire de contrats informatiques",
"gestionnaire de données cliniques",
"gestionnaire de parc micro-informatique",
"gestionnaire de patrimoine",
"glaciologue",
"gouvernant",
"greffier",
"grutier",
"guichetier",
"guide de haute montagne",
"guide-conférencier",
"généalogiste",
"généticien",
"géochimiste",
"géographe",
"géologue",
"géologue minier",
"géologue modélisateur",
"géomaticien",
"géomètre-topographe",
"géophysicien",
"géotechnicien",
"géothermicien",
"gérant de portefeuille",
"gérant de restauration collective",
"halieute",
"histologiste",
"horloger",
"horticulteur",
"hot liner",
"huissier de justice",
"hydraulicien",
"hydrogéologue",
"hydrologue",
"hôte d'accueil",
"hôtesse de l'air",
"iconographe",
"illustrateur",
"infirmier",
"infirmier humanitaire",
"informaticien industriel industrielle",
"ingénieur RD en énergies renouvelables",
"ingénieur analogicien analogicienne",
"ingénieur analyste de l'air",
"ingénieur aromaticien aromaticienne",
"ingénieur biomédical biomédicale",
"ingénieur brevets",
"ingénieur calcul",
"ingénieur chimiste",
"ingénieur chimiste en développement analytique",
"ingénieur cloud computing",
"ingénieur combustion et brûleurs",
"ingénieur concepteur conceptrice en mécanique",
"ingénieur d'affaires en génie électrique",
"ingénieur d'application",
"ingénieur d'études en sûreté nucléaire",
"ingénieur de la police technique et scientifique",
"ingénieur de maintenance industrielle",
"ingénieur de recherche (papiers cartons)",
"ingénieur de recherche clinique et épidémiologique",
"ingénieur du BTP",
"ingénieur du son",
"ingénieur efficacité énergétique du bâtiment",
"ingénieur en acoustique",
"ingénieur en automatismes",
"ingénieur en aéronautique",
"ingénieur en caractérisation des matériaux",
"ingénieur en chef territorial",
"ingénieur en construction automobile",
"ingénieur en construction navale",
"ingénieur en fonderie",
"ingénieur en génie climatique",
"ingénieur en imagerie médicale",
"ingénieur en mécanique",
"ingénieur en métrologie",
"ingénieur en production et expérimentations végétales",
"ingénieur en électronique numérique",
"ingénieur en énergie solaire",
"ingénieur environnement",
"ingénieur environnement et risques industriels",
"ingénieur essais",
"ingénieur fluides, énergies, réseaux, environnement",
"ingénieur forage",
"ingénieur forestier forestière",
"ingénieur frigoriste",
"ingénieur gaz",
"ingénieur hydrogéomorphologue",
"ingénieur hydroécologue",
"ingénieur intégration satellite",
"ingénieur logiciel",
"ingénieur logistique",
"ingénieur maintenance aéronautique",
"ingénieur mathématicien",
"ingénieur matériaux",
"ingénieur métallurgiste",
"ingénieur méthodes mécaniques",
"ingénieur nucléaire",
"ingénieur opticien opticienne",
"ingénieur papetier papetière",
"ingénieur plasturgiste",
"ingénieur process aval",
"ingénieur procédés en chimie",
"ingénieur production dans les biotechnologies",
"ingénieur production en aéronautique",
"ingénieur production en mécanique",
"ingénieur pétrolier pétrolière",
"ingénieur qualité moteur",
"ingénieur radioprotection",
"ingénieur recherche et développement (R&D) en agroéquipement",
"ingénieur recherche et développement en agroalimentaire",
"ingénieur réservoir",
"ingénieur structures",
"ingénieur support",
"ingénieur système",
"ingénieur systèmes embarqués",
"ingénieur technico-commercial technico-commerciale",
"ingénieur technico-commercial technico-commerciale en chimie",
"ingénieur technico-commercial technico-commerciale en informatique",
"ingénieur technico-commercial technico-commerciale en électronique",
"ingénieur textile",
"ingénieur traitement de l'image",
"ingénieur télécoms et réseaux",
"ingénieur écoconcepteur écoconceptrice",
"ingénieur électricien",
"ingénieur électronicien électronicienne",
"ingénieur électronicien électronicienne des systèmes de la sécurité aérienne (IESSA)",
"ingénieur études et développement en logiciels de simulation",
"inspecteur de banque",
"inspecteur des douanes, des finances publiques ou du travail",
"inspecteur du permis de conduire et de la sécurité routière",
"installateur en télécoms",
"inséminateur",
"intégrateur web",
"journaliste",
"journaliste reporter d'images",
"juge d'instruction",
"juge des contentieux de la protection",
"juge des enfants",
"juriste d'entreprise",
"juriste droit de l'environnement",
"juriste en droit social",
"juriste en propriété intellectuelle",
"lad-jockey, lad-driver",
"libraire",
"linguiste",
"machiniste-constructeur ou plateau",
"magasinier cariste",
"magistrat",
"manager de risques",
"mandataire judiciaire",
"manipulateur en électroradiologie médicale",
"maquettiste",
"maquilleur artistique",
"maraîcher",
"marchandiseur",
"maroquinier",
"maréchal-ferrant",
"masseur",
"matelot de la Marine Nationale",
"matelot de la marine marchande",
"matelot à la pêche",
"maçon",
"maître d'hôtel",
"maître-chien",
"menuisier",
"metteur en scène",
"microbiologiste",
"microtechnicien",
"militaire du rang de l'armée de terre",
"militaire technicien de l'air (MTA)",
"miroitier",
"mixeur son",
"modiste",
"modéliste",
"moniteur d'activités équestres",
"moniteur de ski",
"moniteur-éducateur",
"monteur en installations thermiques et climatiques",
"monteur en réseaux de distribution électrique",
"monteur-câbleur",
"mouleur-noyauteur",
"musicien",
"mécanicien bateaux",
"mécanicien d'entretien d'avion",
"mécanicien et technicien",
"mécanicien marine navigant",
"mécanicien-outilleur",
"mécanicien-réparateur en matériel agricole",
"mécatronicien",
"médecin de secours en montagne",
"médecin généraliste",
"médecin humanitaire",
"médecin spécialiste",
"médiateur familial familiale",
"médiateur scientifique",
"météorologiste",
"neurobiologiste",
"nivoculteur",
"notaire",
"océanologue",
"oenologue",
"officier de gendarmerie",
"officier de l'armée de l'air",
"officier de l'armée de terre",
"officier de la Marine nationale",
"officier de la marine marchande",
"officier de police",
"officier marinier marinière",
"opticien-lunetier",
"optronicien",
"opérateur de fabrication de produits alimentaires",
"opérateur de raffinerie",
"opérateur en traitement des matériaux",
"opérateur prépresse",
"opérateur sur machine à commande numérique",
"orfèvre",
"orthodontiste",
"orthophoniste",
"orthoprothésiste",
"orthoptiste",
"ostéopathe",
"ouvrier agricole",
"ouvrier forestier forestière",
"ouvrier paysagiste",
"ouvrier plasturgiste",
"palefrenier",
"paléontologue",
"parfumeur",
"patron pêcheur",
"paysagiste",
"peintre en bâtiment",
"pharmacien",
"pharmacien dans l'industrie",
"photographe",
"physicien médical médicale",
"pilote d'hélicoptère",
"pilote de ligne",
"pilote de ligne automatisée (chimie - agroalimentaire - industrie pharmaceutique)",
"pisteur secouriste",
"plombier",
"plâtrier",
"podo-orthésiste",
"poissonnier",
"professeur d'éducation physique et sportive (EPS)",
"professeur dans l'enseignement agricole",
"professeur de collège et de lycée",
"professeur de lycée professionnel",
"professeur de mathématiques ou de physique-chimie",
"professeur de musique et de danse",
"professeur des écoles des écoles",
"professeur en activité physique adaptée",
"professeur-documentaliste",
"programmiste",
"projectionniste",
"prothésiste dentaire",
"prototypiste en matériaux souples",
"préparateur en pharmacie",
"psychanalyste",
"psychologue",
"psychologue de l'Éducation nationale spécialité éducation, développement et apprentissages",
"psychologue de l'éducation nationale spécialité éducation, développement et conseil en"
+ " orientation scolaire et professionnelle",
"psychomotricien",
"puériculteur",
"pâtissier",
"pédiatre",
"pédicure-podologue",
"pédologue",
"relieur-doreur",
"reporter-photographe",
"responsable achats en chimie",
"responsable approvisionnement",
"responsable assurance qualité",
"responsable biométrie",
"responsable d'élevage agricole",
"responsable de fabrication en chimie",
"responsable de formation",
"responsable de la collecte des déchets ménagers",
"responsable de la promotion des ventes",
"responsable de laboratoire de contrôle en biologie",
"responsable de laboratoire de contrôle en chimie",
"responsable de laboratoire de recherche",
"responsable de plate-forme biotechnologique",
"responsable de production alimentaire",
"responsable de projets culturels",
"responsable de rémunération",
"responsable de réseau d'assainissement",
"responsable de réseau eau potable",
"responsable de scierie",
"responsable de site de traitement des déchets",
"responsable des ouvrages hydroélectriques",
"responsable des produits structurés actions",
"responsable des ressources humaines",
"responsable du back office",
"responsable du recrutement",
"responsable du service après-vente",
"responsable du soutien logistique intégré",
"responsable e-CRM",
"responsable qualité en agroalimentaire",
"restaurateur d'oeuvres d'art",
"roboticien",
"rudologue",
"réceptionniste",
"rédacteur en chef",
"rédacteur médical médicale",
"rédacteur on line",
"rédacteur territorial territoriale",
"réflexologue",
"régisseur de spectacles",
"régisseur général générale cinéma",
"régisseur lumière",
"régleur",
"sage-femme",
"salesman",
"sapeur-pompier",
"scripte",
"sculpteur sur bois",
"scénariste",
"secrétaire",
"secrétaire administratif",
"secrétaire d'édition",
"secrétaire de rédaction",
"secrétaire des affaires étrangères",
"secrétaire juridique",
"secrétaire médical",
"sellier",
"serrurier dépanneur dépanneuse",
"serrurier-métallier",
"sociologue",
"soigneur d'animaux",
"solier-moquettiste",
"sommelier",
"soudeur",
"souffleur de verre",
"sous-officier de l'armée de l'air",
"sous-officier de l'armée de terre",
"souscripteur",
"sportif de haut niveau",
"spécialiste de l'accessibilité numérique",
"spécialiste des affaires réglementaires en chimie",
"staffeur-ornemaniste",
"statisticien",
"statisticien en analyse sensorielle",
"statisticien en géomarketing",
"statisticien industriel industrielle",
"styliste",
"substitut du procureur",
"surveillant de centre pénitentiaire",
"syndic de copropriété",
"sérigraphe",
"tailleur de pierre",
"tailleur-couturier",
"tapissier d'ameublement",
"technicien automobile",
"technicien biologiste",
"technicien chimiste",
"technicien céramiste",
"technicien d'analyses biomédicales",
"technicien d'essais",
"technicien d'exploitation de l'eau",
"technicien d'exploitation du réseau gaz",
"technicien d'intervention clientèle gaz",
"technicien de contrôle",
"technicien de fabrication de mobilier et de menuiserie",
"technicien de forge",
"technicien de l'intervention sociale et familiale",
"technicien de la circulation ferroviaire SNCF",
"technicien de maintenance en génie climatique",
"technicien de maintenance en informatique",
"technicien de maintenance industrielle",
"technicien de police technique et scientifique",
"technicien des industries du verre",
"technicien démonstrateur démonstratrice en matériel agricole",
"technicien en automatismes",
"technicien en engins de travaux publics",
"technicien en lignes haute tension",
"technicien en métrologie",
"technicien en optique de précision",
"technicien en traitement des déchets",
"technicien en traitement des matériaux",
"technicien forestier forestière",
"technicien logistique",
"technicien packaging",
"technicien paysagiste",
"technicien plasturgiste",
"technicien prototypiste en agroéquipement",
"technicien pétrolier pétrolière",
"technicien qualité",
"technicien radioprotection",
"technicien réalisateur réalisatrice radio",
"technicien thermicien thermicienne",
"technicien télécoms et réseaux",
"technicien électronicien électronicienne",
"technicien électrotechnicien électrotechnicienne",
"technico-commercial en agroalimentaire",
"teinturier blanchisseur",
"testeur",
"toiletteur d'animaux",
"tonnelier",
"trader",
"traducteur technique",
"traducteur-interprète",
"traffic manager",
"télévendeur",
"urbaniste",
"veilleur stratégique",
"vendeur conseil caviste",
"vendeur en animalerie",
"vendeur en magasin",
"vendeur en micro-informatique et multimédia",
"vendeur-conseil en matériel agricole",
"vendeur-magasinier en fournitures automobiles",
"verrier au chalumeau",
"visiteur médical médicale",
"viticulteur",
"vitrailliste",
"volcanologue",
"vétérinaire",
"web-ergonome",
"webdesigner",
"webmestre",
"yield manager",
"zoologiste",
"ébéniste",
"éclairagiste",
"économe de flux",
"économiste de la construction",
"économètre statisticien",
"écrivain",
"éditeur",
"éducateur canin canine",
"éducateur de jeunes enfants",
"éducateur de la protection judiciaire de la jeunesse (PJJ)",
"éducateur sportif sportive",
"éducateur sportif sportive des activités aquatiques et de la natation",
"éducateur spécialisé spécialisée",
"éducateur technique spécialisé technique spécialisée",
"élagueur",
"électricien installateur installatrice",
"électromécanicien",
"électromécanicien en remontées mécaniques",
"électronicien automobile",
"énergéticien",
"étalagiste",
"étanchéiste",
]
| Provider |
python | python__mypy | test-data/unit/plugins/decimal_to_int.py | {
"start": 146,
"end": 568
} | class ____(Plugin):
def get_type_analyze_hook(self, fullname: str) -> Callable[[AnalyzeTypeContext], Type] | None:
if fullname in ("decimal.Decimal", "_decimal.Decimal"):
return decimal_to_int_hook
return None
def decimal_to_int_hook(ctx: AnalyzeTypeContext) -> Type:
return ctx.api.named_type("builtins.int", [])
def plugin(version: str) -> type[MyPlugin]:
return MyPlugin
| MyPlugin |
python | apache__airflow | providers/fab/src/airflow/providers/fab/auth_manager/views/permissions.py | {
"start": 2328,
"end": 2904
} | class ____(ViewMenuModelView):
"""Customize permission names for FAB's builtin ViewMenuModelView."""
class_permission_name = permissions.RESOURCE_RESOURCE
route_base = "/resources"
method_permission_name = {
"list": "read",
}
base_permissions = [
permissions.ACTION_CAN_READ,
]
list_title = lazy_gettext("List Resources")
show_title = lazy_gettext("Show Resource")
add_title = lazy_gettext("Add Resource")
edit_title = lazy_gettext("Edit Resource")
label_columns = {"name": lazy_gettext("Name")}
| ResourceModelView |
python | huggingface__transformers | tests/models/ovis2/test_image_processing_ovis2.py | {
"start": 1163,
"end": 3155
} | class ____(unittest.TestCase):
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
do_pad=False,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
super().__init__()
size = size if size is not None else {"height": 20, "width": 20}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_pad = do_pad
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"do_pad": self.do_pad,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| Ovis2ImageProcessingTester |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 88620,
"end": 90475
} | class ____(UserDefinedObjectVariable):
"""
Represents user defined objects that are subclasses of tuple.
Internally, it uses a TupleVariable to represent the tuple part of the
variable tracker. For everything else, it falls back to
UserDefinedObjectVariable.
"""
def __init__(self, value, tuple_vt=None, init_args=None, **kwargs):
super().__init__(value, init_args=init_args, **kwargs)
self._tuple_vt = tuple_vt
if self._tuple_vt is None:
assert self.source is None, (
"tuple_vt must be constructed by builder.py when source is present"
)
# Emulate `tuple.__new__`
# https://github.com/python/cpython/blob/3.11/Objects/tupleobject.c#L697-L710
#
# TODO this duplicates the logic in `BuiltinVariable(tuple)`
from torch._dynamo.symbolic_convert import InstructionTranslator
tx = InstructionTranslator.current_tx()
elems = init_args[0].force_unpack_var_sequence(tx)
self._tuple_vt = variables.TupleVariable(
elems, mutation_type=ValueMutationNew()
)
def call_method(
self,
tx,
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
assert self._tuple_vt is not None
method = self._maybe_get_baseclass_method(name)
if method in tuple_methods:
return self._tuple_vt.call_method(tx, name, args, kwargs)
return super().call_method(tx, name, args, kwargs)
def unpack_var_sequence(self, tx):
assert self._tuple_vt is not None
if type(self.value).__iter__ is tuple.__iter__:
return self._tuple_vt.unpack_var_sequence(tx)
raise NotImplementedError
| UserDefinedTupleVariable |
python | falconry__falcon | tests/_inspect_fixture.py | {
"start": 87,
"end": 411
} | class ____:
def on_get(self, req, res):
pass
def on_post(self, req, res):
pass
def on_delete(self, req, res):
pass
def on_get_id(self, req, res, id):
pass
def on_put_id(self, req, res, id):
pass
def on_delete_id(self, req, res, id):
pass
| MyResponder |
python | gevent__gevent | src/gevent/libev/watcher.py | {
"start": 6239,
"end": 6291
} | class ____(_base.SignalMixin, watcher):
pass
| signal |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mariadbconnector.py | {
"start": 2045,
"end": 2988
} | class ____(sqltypes.UUID[sqltypes._UUID_RETURN]):
# work around JIRA issue
# https://jira.mariadb.org/browse/CONPY-270. When that issue is fixed,
# this type can be removed.
def result_processor(
self, dialect: Dialect, coltype: object
) -> Optional[_ResultProcessorType[Any]]:
if self.as_uuid:
def process(value: Any) -> Any:
if value is not None:
if hasattr(value, "decode"):
value = value.decode("ascii")
value = _python_UUID(value)
return value
return process
else:
def process(value: Any) -> Any:
if value is not None:
if hasattr(value, "decode"):
value = value.decode("ascii")
value = str(_python_UUID(value))
return value
return process
| _MariaDBUUID |
python | lazyprogrammer__machine_learning_examples | cnn_class2/tf_resnet_first_layers_starter.py | {
"start": 755,
"end": 2344
} | class ____:
def __init__(self):
# TODO
pass
def copyFromKerasLayers(self, layers):
# TODO
pass
def predict(self, X):
# TODO
pass
def set_session(self, session):
self.session = session
# TODO: finish this
def get_params(self):
params = []
# TODO: finish this
if __name__ == '__main__':
# you can also set weights to None, it doesn't matter
resnet = ResNet50(weights='imagenet')
# you can determine the correct layer
# by looking at resnet.layers in the console
partial_model = Model(
inputs=resnet.input,
outputs=resnet.layers[16].output
)
print(partial_model.summary())
# for layer in partial_model.layers:
# layer.trainable = False
my_partial_resnet = PartialResNet()
# make a fake image
X = np.random.random((1, 224, 224, 3))
# get keras output
keras_output = partial_model.predict(X)
# get my model output
init = tf.variables_initializer(my_partial_resnet.get_params())
# note: starting a new session messes up the Keras model
session = keras.backend.get_session()
my_partial_resnet.set_session(session)
session.run(init)
# first, just make sure we can get any output
first_output = my_partial_resnet.predict(X)
print("first_output.shape:", first_output.shape)
# copy params from Keras model
my_partial_resnet.copyFromKerasLayers(partial_model.layers)
# compare the 2 models
output = my_partial_resnet.predict(X)
diff = np.abs(output - keras_output).sum()
if diff < 1e-10:
print("Everything's great!")
else:
print("diff = %s" % diff)
| PartialResNet |
python | astropy__astropy | astropy/visualization/tests/test_interval.py | {
"start": 3557,
"end": 3698
} | class ____(TestInterval):
# Make sure intervals work with 2d arrays
data = np.linspace(-20.0, 60.0, 100).reshape(100, 1)
| TestInterval2D |
python | crytic__slither | slither/printers/guidance/echidna.py | {
"start": 6193,
"end": 16554
} | class ____(NamedTuple): # pylint: disable=inherit-non-class,too-few-public-methods
# Here value should be Union[str, int, bool]
# But the json lib in Echidna does not handle large integer in json
# So we convert everything to string
value: str
type: str
def _extract_constant_from_read(
ir: Operation,
r: SourceMapping,
all_cst_used: List[ConstantValue],
all_cst_used_in_binary: Dict[str, List[ConstantValue]],
context_explored: Set[Node],
) -> None:
var_read = r.points_to_origin if isinstance(r, ReferenceVariable) else r
# Do not report struct_name in a.struct_name
if isinstance(ir, Member):
return
if isinstance(var_read, Variable) and var_read.is_constant:
# In case of type conversion we use the destination type
if isinstance(ir, TypeConversion):
if isinstance(ir.type, TypeAlias):
value_type = ir.type.type
else:
value_type = ir.type
else:
value_type = var_read.type
try:
value = ConstantFolding(var_read.expression, value_type).result()
all_cst_used.append(ConstantValue(str(value), str(value_type)))
except NotConstant:
pass
if isinstance(var_read, Constant):
all_cst_used.append(ConstantValue(str(var_read.value), str(var_read.type)))
if isinstance(var_read, StateVariable):
if var_read.node_initialization:
if var_read.node_initialization.irs:
if var_read.node_initialization in context_explored:
return
context_explored.add(var_read.node_initialization)
_extract_constants_from_irs(
var_read.node_initialization.irs,
all_cst_used,
all_cst_used_in_binary,
context_explored,
)
def _extract_constant_from_binary(
ir: Binary,
all_cst_used: List[ConstantValue],
all_cst_used_in_binary: Dict[str, List[ConstantValue]],
):
for r in ir.read:
if isinstance(r, Constant):
all_cst_used_in_binary[str(ir.type)].append(ConstantValue(str(r.value), str(r.type)))
if isinstance(ir.variable_left, Constant) or isinstance(ir.variable_right, Constant):
if ir.lvalue:
try:
type_ = ir.lvalue.type
cst = ConstantFolding(ir.expression, type_).result()
all_cst_used.append(ConstantValue(str(cst.value), str(type_)))
except NotConstant:
pass
def _extract_constants_from_irs(
irs: List[Operation],
all_cst_used: List[ConstantValue],
all_cst_used_in_binary: Dict[str, List[ConstantValue]],
context_explored: Set[Node],
) -> None:
for ir in irs:
if isinstance(ir, Binary):
_extract_constant_from_binary(ir, all_cst_used, all_cst_used_in_binary)
if isinstance(ir, TypeConversion):
if isinstance(ir.variable, Constant):
if isinstance(ir.type, TypeAlias):
value_type = ir.type.type
else:
value_type = ir.type
all_cst_used.append(ConstantValue(str(ir.variable.value), str(value_type)))
continue
if (
isinstance(ir, Member)
and isinstance(ir.variable_left, Enum)
and isinstance(ir.variable_right, Constant)
):
# enums are constant values
try:
internal_num = ir.variable_left.values.index(ir.variable_right.value)
all_cst_used.append(ConstantValue(str(internal_num), "uint256"))
except ValueError: # index could fail; should never happen in working solidity code
pass
for r in ir.read:
_extract_constant_from_read(
ir, r, all_cst_used, all_cst_used_in_binary, context_explored
)
def _extract_constants(
contracts: List[Contract],
) -> Tuple[Dict[str, Dict[str, List]], Dict[str, Dict[str, Dict]]]:
# contract -> function -> [ {"value": value, "type": type} ]
ret_cst_used: Dict[str, Dict[str, List[ConstantValue]]] = defaultdict(dict)
# contract -> function -> binary_operand -> [ {"value": value, "type": type ]
ret_cst_used_in_binary: Dict[str, Dict[str, Dict[str, List[ConstantValue]]]] = defaultdict(dict)
for contract in contracts:
for function in contract.functions_entry_points:
all_cst_used: List = []
all_cst_used_in_binary: Dict = defaultdict(list)
context_explored = set()
context_explored.add(function)
_extract_constants_from_irs(
function.all_slithir_operations(),
all_cst_used,
all_cst_used_in_binary,
context_explored,
)
# Note: use list(set()) instead of set
# As this is meant to be serialized in JSON, and JSON does not support set
if all_cst_used:
ret_cst_used[contract.name][_get_name(function)] = list(set(all_cst_used))
if all_cst_used_in_binary:
ret_cst_used_in_binary[contract.name][_get_name(function)] = {
k: list(set(v)) for k, v in all_cst_used_in_binary.items()
}
return ret_cst_used, ret_cst_used_in_binary
def _extract_function_relations(
contracts: List[Contract],
) -> Dict[str, Dict[str, Dict[str, List[str]]]]:
# contract -> function -> [functions]
ret: Dict[str, Dict[str, Dict[str, List[str]]]] = defaultdict(dict)
for contract in contracts:
ret[contract.name] = defaultdict(dict)
written = {
_get_name(function): function.all_state_variables_written()
for function in contract.functions_entry_points
}
read = {
_get_name(function): function.all_state_variables_read()
for function in contract.functions_entry_points
}
for function in contract.functions_entry_points:
ret[contract.name][_get_name(function)] = {
"impacts": [],
"is_impacted_by": [],
}
for candidate, varsWritten in written.items():
if any((r in varsWritten for r in function.all_state_variables_read())):
ret[contract.name][_get_name(function)]["is_impacted_by"].append(candidate)
for candidate, varsRead in read.items():
if any((r in varsRead for r in function.all_state_variables_written())):
ret[contract.name][_get_name(function)]["impacts"].append(candidate)
return ret
def _have_external_calls(contracts: List[Contract]) -> Dict[str, List[str]]:
"""
Detect the functions with external calls
:param slither:
:return:
"""
ret: Dict[str, List[str]] = defaultdict(list)
for contract in contracts:
for function in contract.functions_entry_points:
if function.all_high_level_calls() or function.all_low_level_calls():
ret[contract.name].append(_get_name(function))
if contract.name in ret:
ret[contract.name] = list(set(ret[contract.name]))
return ret
def _use_balance(contracts: List[Contract]) -> Dict[str, List[str]]:
"""
Detect the functions with external calls
:param slither:
:return:
"""
ret: Dict[str, List[str]] = defaultdict(list)
for contract in contracts:
for function in contract.functions_entry_points:
for ir in function.all_slithir_operations():
if isinstance(ir, SolidityCall) and ir.function == SolidityFunction(
"balance(address)"
):
ret[contract.name].append(_get_name(function))
if contract.name in ret:
ret[contract.name] = list(set(ret[contract.name]))
return ret
def _with_fallback(contracts: List[Contract]) -> Set[str]:
ret: Set[str] = set()
for contract in contracts:
for function in contract.functions_entry_points:
if function.is_fallback:
ret.add(contract.name)
return ret
def _with_receive(contracts: List[Contract]) -> Set[str]:
ret: Set[str] = set()
for contract in contracts:
for function in contract.functions_entry_points:
if function.is_receive:
ret.add(contract.name)
return ret
def _call_a_parameter(slither: SlitherCore, contracts: List[Contract]) -> Dict[str, List[Dict]]:
"""
Detect the functions with external calls
:param slither:
:return:
"""
# contract -> [ (function, idx, interface_called) ]
ret: Dict[str, List[Dict]] = defaultdict(list)
for contract in contracts: # pylint: disable=too-many-nested-blocks
for function in contract.functions_entry_points:
try:
for ir in function.all_slithir_operations():
if isinstance(ir, HighLevelCall):
for idx, parameter in enumerate(function.parameters):
if is_dependent(ir.destination, parameter, function):
ret[contract.name].append(
{
"function": _get_name(function),
"parameter_idx": idx,
"signature": _get_name(ir.function),
}
)
if isinstance(ir, LowLevelCall):
for idx, parameter in enumerate(function.parameters):
if is_dependent(ir.destination, parameter, function):
ret[contract.name].append(
{
"function": _get_name(function),
"parameter_idx": idx,
"signature": None,
}
)
except Exception as e:
if slither.no_fail:
continue
raise e
return ret
| ConstantValue |
python | facebook__pyre-check | client/commands/expression_level_coverage.py | {
"start": 1768,
"end": 1891
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
ErrorAtPath: ErrorAtPath
@dataclass(frozen=True)
| ErrorAtPathResponse |
python | pallets__werkzeug | examples/simplewiki/database.py | {
"start": 2483,
"end": 2909
} | class ____:
"""
Represents a simple page without any revisions. This is for example
used in the page index where the page contents are not relevant.
"""
query = session.query_property()
def __init__(self, name):
self.name = name
@property
def title(self):
return self.name.replace("_", " ")
def __repr__(self):
return f"<{type(self).__name__} {self.name!r}>"
| Page |
python | neetcode-gh__leetcode | python/2001-number-of-pairs-of-interchangeable-rectangles.py | {
"start": 0,
"end": 373
} | class ____:
def interchangeableRectangles(self, rectangles: List[List[int]]) -> int:
count = {} # { W / H : Count }
res = 0
for w, h in rectangles:
# Increment the count for the ratio
count[w / h] = 1 + count.get(w / h, 0)
for c in count.values():
res += (c * (c - 1)) // 2
return res
| Solution |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_int_literal.py | {
"start": 674,
"end": 1599
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
"""Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
| RedirectImportFinder |
python | tensorflow__tensorflow | tensorflow/python/ops/math_ops_test.py | {
"start": 52361,
"end": 52671
} | class ____(test_util.TensorFlowTestCase):
def testErfcinv(self):
values = np.random.uniform(0.1, 1.9, size=int(1e4)).astype(np.float32)
approx_id = math_ops.erfc(math_ops.erfcinv(values))
self.assertAllClose(values, self.evaluate(approx_id))
@test_util.run_all_in_graph_and_eager_modes
| ErfcinvTest |
python | walkccc__LeetCode | solutions/1056. Confusing Number/1056.py | {
"start": 0,
"end": 305
} | class ____:
def confusingNumber(self, n: int) -> bool:
s = str(n)
rotated = {'0': '0', '1': '1', '6': '9', '8': '8', '9': '6'}
rotatedNum = []
for c in s[::-1]:
if c not in rotated:
return False
rotatedNum.append(rotated[c])
return ''.join(rotatedNum) != s
| Solution |
python | numpy__numpy | numpy/_core/tests/test_indexing.py | {
"start": 53401,
"end": 54796
} | class ____:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
| TestCApiAccess |
python | doocs__leetcode | solution/1000-1099/1040.Moving Stones Until Consecutive II/Solution.py | {
"start": 0,
"end": 513
} | class ____:
def numMovesStonesII(self, stones: List[int]) -> List[int]:
stones.sort()
mi = n = len(stones)
mx = max(stones[-1] - stones[1] + 1, stones[-2] - stones[0] + 1) - (n - 1)
i = 0
for j, x in enumerate(stones):
while x - stones[i] + 1 > n:
i += 1
if j - i + 1 == n - 1 and x - stones[i] == n - 2:
mi = min(mi, 2)
else:
mi = min(mi, n - (j - i + 1))
return [mi, mx]
| Solution |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 34847,
"end": 38291
} | class ____(ConvBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.convbert = ConvBertModel(config)
self.classifier = ConvBertClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.convbert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| ConvBertForSequenceClassification |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/histogram_ops_test.py | {
"start": 1129,
"end": 3099
} | class ____(test.TestCase, parameterized.TestCase):
def test_empty_input_gives_all_zero_counts(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = []
expected_bins = []
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, self.evaluate(bins))
@parameterized.parameters(
np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
)
def test_1d_values_int32_output(self, dtype):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = np.array([0.0, 5.0]).astype(dtype)
values = np.array([-1.0, 0.0, 1.5, 2.0, 5.0, 15]).astype(dtype)
expected_bins = [0, 0, 1, 2, 4, 4]
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, self.evaluate(bins))
def test_2d_values(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
value_range = [0.0, 5.0]
values = constant_op.constant(
[[-1.0, 0.0, 1.5], [2.0, 5.0, 15]], shape=(2, 3))
expected_bins = [[0, 0, 1], [2, 4, 4]]
with self.cached_session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=5)
self.assertEqual(dtypes.int32, bins.dtype)
self.assertAllClose(expected_bins, self.evaluate(bins))
def test_negative_nbins(self):
value_range = [0.0, 5.0]
values = []
with self.assertRaisesRegex((errors.InvalidArgumentError, ValueError),
"must > 0"):
with self.session():
bins = histogram_ops.histogram_fixed_width_bins(
values, value_range, nbins=-1)
self.evaluate(bins)
| BinValuesFixedWidth |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 102432,
"end": 108772
} | class ____(TestCase):
"""
Tests chasing foreign keys across time points naturally with
HistoricForeignKey.
"""
def test_non_historic_to_historic(self):
"""
Non-historic table foreign key to historic table.
In this case it should simply behave like ForeignKey because
the origin model (this one) cannot be historic, so foreign key
lookups are always "current".
"""
org = TestOrganizationWithHistory.objects.create(name="original")
part = TestParticipantToHistoricOrganization.objects.create(
name="part", organization=org
)
before_mod = timezone.now()
self.assertEqual(part.organization.id, org.id)
self.assertEqual(org.participants.count(), 1)
self.assertEqual(org.participants.all()[0], part)
historg = TestOrganizationWithHistory.history.as_of(before_mod).get(
name="original"
)
self.assertEqual(historg.participants.count(), 1)
self.assertEqual(historg.participants.all()[0], part)
self.assertEqual(org.history.count(), 1)
org.name = "modified"
org.save()
self.assertEqual(org.history.count(), 2)
# drop internal caches, re-select
part = TestParticipantToHistoricOrganization.objects.get(name="part")
self.assertEqual(part.organization.name, "modified")
def test_historic_to_non_historic(self):
"""
Historic table foreign key to non-historic table.
In this case it should simply behave like ForeignKey because
the origin model (this one) can be historic but the target model
is not, so foreign key lookups are always "current".
"""
org = TestOrganization.objects.create(name="org")
part = TestHistoricParticipantToOrganization.objects.create(
name="original", organization=org
)
self.assertEqual(part.organization.id, org.id)
self.assertEqual(org.participants.count(), 1)
self.assertEqual(org.participants.all()[0], part)
histpart = TestHistoricParticipantToOrganization.objects.get(name="original")
self.assertEqual(histpart.organization.id, org.id)
def test_historic_to_historic(self):
"""
Historic table foreign key to historic table.
In this case as_of queries on the origin model (this one)
or on the target model (the other one) will traverse the
foreign key relationship honoring the timepoint of the
original query. This only happens when both tables involved
are historic.
At t1 we have one org, one participant.
At t2 we have one org, two participants, however the org's name has changed.
At t3 we have one org, and one participant has left.
"""
org = TestOrganizationWithHistory.objects.create(name="original")
p1 = TestHistoricParticipanToHistoricOrganization.objects.create(
name="p1", organization=org
)
t1_one_participant = timezone.now()
p2 = TestHistoricParticipanToHistoricOrganization.objects.create(
name="p2", organization=org
)
org.name = "modified"
org.save()
t2_two_participants = timezone.now()
p1.delete()
t3_one_participant = timezone.now()
# forward relationships - see how natural chasing timepoint relations is
p1t1 = TestHistoricParticipanToHistoricOrganization.history.as_of(
t1_one_participant
).get(name="p1")
self.assertEqual(p1t1.organization, org)
self.assertEqual(p1t1.organization.name, "original")
p1t2 = TestHistoricParticipanToHistoricOrganization.history.as_of(
t2_two_participants
).get(name="p1")
self.assertEqual(p1t2.organization, org)
self.assertEqual(p1t2.organization.name, "modified")
p2t2 = TestHistoricParticipanToHistoricOrganization.history.as_of(
t2_two_participants
).get(name="p2")
self.assertEqual(p2t2.organization, org)
self.assertEqual(p2t2.organization.name, "modified")
p2t3 = TestHistoricParticipanToHistoricOrganization.history.as_of(
t3_one_participant
).get(name="p2")
self.assertEqual(p2t3.organization, org)
self.assertEqual(p2t3.organization.name, "modified")
# reverse relationships
# at t1
ot1 = TestOrganizationWithHistory.history.as_of(t1_one_participant).all()[0]
self.assertEqual(ot1.historic_participants.count(), 1)
self.assertEqual(ot1.historic_participants.all()[0].name, p1.name)
# at t2
ot2 = TestOrganizationWithHistory.history.as_of(t2_two_participants).all()[0]
self.assertEqual(ot2.historic_participants.count(), 2)
self.assertIn(p1.name, [item.name for item in ot2.historic_participants.all()])
self.assertIn(p2.name, [item.name for item in ot2.historic_participants.all()])
# at t3
ot3 = TestOrganizationWithHistory.history.as_of(t3_one_participant).all()[0]
self.assertEqual(ot3.historic_participants.count(), 1)
self.assertEqual(ot3.historic_participants.all()[0].name, p2.name)
# current
self.assertEqual(org.historic_participants.count(), 1)
self.assertEqual(org.historic_participants.all()[0].name, p2.name)
self.assertTrue(is_historic(ot1))
self.assertFalse(is_historic(org))
self.assertIsInstance(
to_historic(ot1), TestOrganizationWithHistory.history.model
)
self.assertIsNone(to_historic(org))
# test querying directly from the history table and converting
# to an instance, it should chase the foreign key properly
# in this case if _as_of is not present we use the history_date
# https://github.com/django-commons/django-simple-history/issues/983
pt1h = TestHistoricParticipanToHistoricOrganization.history.all()[0]
pt1i = pt1h.instance
self.assertEqual(pt1i.organization.name, "modified")
pt1h = TestHistoricParticipanToHistoricOrganization.history.all().order_by(
"history_date"
)[0]
pt1i = pt1h.instance
self.assertEqual(pt1i.organization.name, "original")
| HistoricForeignKeyTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/markup.py | {
"start": 15561,
"end": 15994
} | class ____(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 2.0
"""
name = "XUL+mozpreproc"
aliases = ['xul+mozpreproc']
filenames = ['*.xul.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocXulLexer, self).__init__(
XmlLexer, MozPreprocHashLexer, **options)
| MozPreprocXulLexer |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modular_sam3_tracker.py | {
"start": 1342,
"end": 3123
} | class ____(Sam2PromptEncoderConfig):
r"""
This is the configuration class to store the configuration of a [`Sam3TrackerPromptEncoder`]. The [`Sam3TrackerPromptEncoder`]
module is used to encode the input 2D points and bounding boxes.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden states.
image_size (`int`, *optional*, defaults to 1008):
The expected output resolution of the image.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
mask_input_channels (`int`, *optional*, defaults to 16):
The number of channels to be fed to the `MaskDecoder` module.
num_point_embeddings (`int`, *optional*, defaults to 4):
The number of point embeddings to be used.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function in the encoder and pooler.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
scale (`float`, *optional*, defaults to 1):
The scale factor for the prompt encoder.
"""
base_config_key = "prompt_encoder_config"
def __init__(
self,
hidden_size=256,
image_size=1008,
patch_size=14,
mask_input_channels=16,
num_point_embeddings=4,
hidden_act="gelu",
layer_norm_eps=1e-6,
scale=1,
**kwargs,
):
super().__init__(**kwargs)
| Sam3TrackerPromptEncoderConfig |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 66051,
"end": 66475
} | class ____(TypedDict, total=False):
type: Required[Literal['include-exclude-dict']]
include: IncExDict
exclude: IncExDict
def filter_dict_schema(*, include: IncExDict | None = None, exclude: IncExDict | None = None) -> IncExDictSerSchema:
return _dict_not_none(type='include-exclude-dict', include=include, exclude=exclude)
IncExDictOrElseSerSchema = Union[IncExDictSerSchema, SerSchema]
| IncExDictSerSchema |
python | walkccc__LeetCode | solutions/944. Delete Columns to Make Sorted/944.py | {
"start": 0,
"end": 244
} | class ____:
def minDeletionSize(self, strs: list[str]) -> int:
ans = 0
for j in range(len(strs[0])):
for i in range(len(strs) - 1):
if strs[i][j] > strs[i + 1][j]:
ans += 1
break
return ans
| Solution |
python | django-haystack__django-haystack | test_haystack/elasticsearch5_tests/test_backend.py | {
"start": 3356,
"end": 3971
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True,
use_template=True,
template_name="search/indexes/core/mockmodel_template.txt",
)
author = indexes.CharField(model_attr="author", weight=2.0)
editor = indexes.CharField(model_attr="editor")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return AFourthMockModel
def prepare(self, obj):
data = super().prepare(obj)
if obj.pk == 4:
data["boost"] = 5.0
return data
| Elasticsearch5BoostMockSearchIndex |
python | readthedocs__readthedocs.org | readthedocs/invitations/backends.py | {
"start": 2875,
"end": 3208
} | class ____(Backend):
klass = Project
def get_origin_url(self):
return reverse("projects_users", args=[self.object.slug])
def get_object_url(self):
return reverse("projects_detail", args=[self.object.slug])
def redeem(self, user):
self.object.users.add(user)
return True
| ProjectBackend |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 18897,
"end": 20107
} | class ____:
param_names = ["dtype", "method", "with_nans"]
params = [
["float64", "int64", "Float64", "Int64"],
["cummin", "cummax", "cumsum"],
[True, False],
]
def setup(self, dtype, method, with_nans):
if with_nans and dtype == "int64":
raise NotImplementedError("Construction of df would raise")
N = 500_000
keys = np.random.randint(0, 100, size=N)
vals = np.random.randint(-10, 10, (N, 5))
if with_nans:
null_vals = vals.astype(float, copy=True)
null_vals[::2, :] = np.nan
null_vals[::3, :] = np.nan
if dtype in ["Int64", "Float64"]:
null_vals = null_vals.astype(object)
null_vals[::2, :] = NA
null_vals[::3, :] = NA
df = DataFrame(null_vals, columns=list("abcde"), dtype=dtype)
df["key"] = keys
self.df = df
else:
df = DataFrame(vals, columns=list("abcde")).astype(dtype, copy=False)
df["key"] = keys
self.df = df
def time_frame_transform(self, dtype, method, with_nans):
self.df.groupby("key").transform(method)
| Cumulative |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 11742,
"end": 11978
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer1 = torch.nn.Linear(10, 10)
def forward(self, x, offset=1):
x = F.relu(self.layer1(x)) + offset
return x
| IntArg |
python | ray-project__ray | python/ray/tests/test_autoscaler_drain_node_api.py | {
"start": 449,
"end": 799
} | class ____(FakeMultiNodeProvider):
"""FakeMultiNodeProvider, with Ray node process termination mocked out.
Used to check that a Ray node can be terminated by DrainNode API call
from the autoscaler.
"""
def _kill_ray_processes(self, node):
logger.info("Leaving Raylet termination to autoscaler Drain API!")
| MockFakeProvider |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 118485,
"end": 131770
} | class ____(SeamlessM4TPreTrainedModel, GenerationMixin):
input_modalities = "audio"
_keys_to_ignore_on_load_missing = ["text_encoder", "t2u_model", "vocoder"]
main_input_name = "input_features"
_tied_weights_keys = {
"lm_head.weight": "shared.weight",
"text_decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: SeamlessM4TConfig):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
self.speech_encoder = SeamlessM4TSpeechEncoder(config)
self.text_decoder = SeamlessM4TDecoder(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_encoder(self):
return self.speech_encoder
def get_decoder(self):
return self.text_decoder
def get_input_embeddings(self):
return self.text_decoder.embed_tokens
def set_input_embeddings(self, value):
self.text_decoder.embed_tokens = value
@auto_docstring(custom_args=SEAMLESS_M4T_COMMON_CUSTOM_ARGS)
def forward(
self,
input_features: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.speech_encoder(
input_features=input_features,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
encoder_attention_mask = attention_mask
if attention_mask is not None:
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(attention_mask).to(
encoder_outputs[0].device
)
encoder_attention_mask = _compute_new_attention_mask(
hidden_states=encoder_outputs[0], seq_lens=sub_sampled_lengths
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.text_decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(decoder_outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(lm_logits.device)
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
outputs = decoder_outputs + encoder_outputs
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def generate(
self,
input_features=None,
tgt_lang=None,
generation_config=None,
logits_processor=None,
stopping_criteria=None,
prefix_allowed_tokens_fn=None,
synced_gpus=False,
**kwargs,
):
"""
Generates sequences of token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](./generation_strategies).
</Tip>
Parameters:
input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_banks)`):
Input audio features. This should be returned by the [`SeamlessM4TFeatureExtractor`] class or the
[`SeamlessM4TProcessor`] class. See [`SeamlessM4TFeatureExtractor.__call__`] for details.
tgt_lang (`str`, *optional*):
The language to use as target language for translation.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`. The possible
[`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`],
- [`~generation.GenerateBeamEncoderDecoderOutput`]
"""
text_decoder_input_ids = kwargs.pop("decoder_input_ids", None)
# overwrite text_decoder_input_ids if tgt_lang is passed. The latter gets priority over decoder_input_ids.
input_features = input_features if input_features is not None else kwargs.pop("inputs")
if tgt_lang is not None:
inputs = kwargs.get("input_embeds") if input_features is None else input_features
inputs = (
inputs
if inputs is not None
else kwargs.get("encoder_outputs", {"last_hidden_state": None})["last_hidden_state"]
)
batch_size = len(inputs)
if hasattr(self.generation_config, "text_decoder_lang_to_code_id"):
# also accept __xxx__
tgt_lang = tgt_lang.replace("__", "")
if tgt_lang not in self.generation_config.text_decoder_lang_to_code_id:
raise ValueError(
f"""`tgt_lang={tgt_lang}` is not supported by this model. Please specify a `tgt_lang` in
{", ".join(self.generation_config.text_decoder_lang_to_code_id.keys())}"""
)
# tgt_lang gets priority over decoder input ids
text_tgt_lang_id = self.generation_config.text_decoder_lang_to_code_id.get(tgt_lang)
text_decoder_input_ids = torch.tensor([[text_tgt_lang_id]] * batch_size, device=self.device)
else:
raise ValueError(
"""This model generation config doesn't have a `text_decoder_lang_to_code_id` key which maps
the target language to the right token id. Make sure to load the right generation config."""
)
else:
# only a warning, otherwise errors appear in the tests
logger.warning(
"""You must either specify a `tgt_lang` or pass a correct `text_decoder_input_ids` to get
a correct generation, otherwise the generation will probably make no sense."""
)
return super().generate(
input_features,
generation_config,
logits_processor,
stopping_criteria,
prefix_allowed_tokens_fn,
synced_gpus,
decoder_input_ids=text_decoder_input_ids,
**kwargs,
)
@auto_docstring(
custom_intro="""
The text-to-speech SeamlessM4T Model transformer which can be used for T2ST.
"""
)
| SeamlessM4TForSpeechToText |
python | python-markdown__markdown | tests/test_syntax/extensions/test_code_hilite.py | {
"start": 14769,
"end": 29557
} | class ____(TestCase):
""" Test codehilite extension. """
def setUp(self):
if has_pygments and pygments.__version__ != required_pygments_version:
self.skipTest(f'Pygments=={required_pygments_version} is required')
# Define a custom Pygments formatter (same example in the documentation)
if has_pygments:
class CustomAddLangHtmlFormatter(pygments.formatters.HtmlFormatter):
def __init__(self, lang_str='', **options):
super().__init__(**options)
self.lang_str = lang_str
def _wrap_code(self, source):
yield 0, f'<code class="{self.lang_str}">'
yield from source
yield 0, '</code>'
else:
CustomAddLangHtmlFormatter = None
self.custom_pygments_formatter = CustomAddLangHtmlFormatter
maxDiff = None
def testBasicCodeHilite(self):
if has_pygments:
# Odd result as no `lang` given and a single comment is not enough for guessing.
expected = (
'<div class="codehilite"><pre><span></span><code><span class="err"># A Code Comment</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
'\t# A Code Comment',
expected,
extensions=['codehilite']
)
def testLinenumsTrue(self):
if has_pygments:
expected = (
'<table class="codehilitetable"><tr>'
'<td class="linenos"><div class="linenodiv"><pre>1</pre></div></td>'
'<td class="code"><div class="codehilite"><pre><span></span>'
'<code><span class="err"># A Code Comment</span>\n'
'</code></pre></div>\n'
'</td></tr></table>'
)
else:
expected = (
'<pre class="codehilite"><code class="linenums"># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
'\t# A Code Comment',
expected,
extensions=[CodeHiliteExtension(linenums=True)]
)
def testLinenumsFalse(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span><code><span class="c1"># A Code Comment</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-python"># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t#!Python\n'
'\t# A Code Comment'
),
expected,
extensions=[CodeHiliteExtension(linenums=False)]
)
def testLinenumsNone(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span><code><span class="err"># A Code Comment</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
'\t# A Code Comment',
expected,
extensions=[CodeHiliteExtension(linenums=None)]
)
def testLinenumsNoneWithShebang(self):
if has_pygments:
expected = (
'<table class="codehilitetable"><tr>'
'<td class="linenos"><div class="linenodiv"><pre>1</pre></div></td>'
'<td class="code"><div class="codehilite"><pre><span></span>'
'<code><span class="c1"># A Code Comment</span>\n'
'</code></pre></div>\n'
'</td></tr></table>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-python linenums"># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t#!Python\n'
'\t# A Code Comment'
),
expected,
extensions=[CodeHiliteExtension(linenums=None)]
)
def testLinenumsNoneWithColon(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span><code><span class="c1"># A Code Comment</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-python"># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t:::Python\n'
'\t# A Code Comment'
),
expected,
extensions=[CodeHiliteExtension(linenums=None)]
)
def testHighlightLinesWithColon(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span><code><span class="hll"><span class="c1">#line 1</span>\n'
'</span><span class="c1">#line 2</span>\n'
'<span class="c1">#line 3</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-python">#line 1\n'
'#line 2\n'
'#line 3\n'
'</code></pre>'
)
# Double quotes
self.assertMarkdownRenders(
(
'\t:::Python hl_lines="1"\n'
'\t#line 1\n'
'\t#line 2\n'
'\t#line 3'
),
expected,
extensions=['codehilite']
)
# Single quotes
self.assertMarkdownRenders(
(
"\t:::Python hl_lines='1'\n"
'\t#line 1\n'
'\t#line 2\n'
'\t#line 3'
),
expected,
extensions=['codehilite']
)
def testUsePygmentsFalse(self):
self.assertMarkdownRenders(
(
'\t:::Python\n'
'\t# A Code Comment'
),
(
'<pre class="codehilite"><code class="language-python"># A Code Comment\n'
'</code></pre>'
),
extensions=[CodeHiliteExtension(use_pygments=False)]
)
def testLangPrefixEmpty(self):
self.assertMarkdownRenders(
(
'\t:::Python\n'
'\t# A Code Comment'
),
(
'<pre class="codehilite"><code class="python"># A Code Comment\n'
'</code></pre>'
),
extensions=[CodeHiliteExtension(use_pygments=False, lang_prefix='')]
)
def testLangPrefix(self):
self.assertMarkdownRenders(
(
'\t:::Python\n'
'\t# A Code Comment'
),
(
'<pre class="codehilite"><code class="lang-python"># A Code Comment\n'
'</code></pre>'
),
extensions=[CodeHiliteExtension(use_pygments=False, lang_prefix='lang-')]
)
def testDoubleEscape(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre>'
'<span></span>'
'<code><span class="p"><</span><span class="nt">span</span><span class="p">></span>'
'This<span class="ni">&amp;</span>That'
'<span class="p"></</span><span class="nt">span</span><span class="p">></span>'
'\n</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-html">'
'<span>This&amp;That</span>\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t:::html\n'
'\t<span>This&That</span>'
),
expected,
extensions=['codehilite']
)
def testEntitiesIntact(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre>'
'<span></span>'
'<code>< &lt; and > &gt;'
'\n</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-text">'
'< &lt; and > &gt;\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t:::text\n'
'\t< < and > >'
),
expected,
extensions=['codehilite']
)
def testHighlightAmps(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span><code>&\n'
'&amp;\n'
'&amp;amp;\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-text">&\n'
'&amp;\n'
'&amp;amp;\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t:::text\n'
'\t&\n'
'\t&\n'
'\t&amp;'
),
expected,
extensions=['codehilite']
)
def testUnknownOption(self):
if has_pygments:
# Odd result as no `lang` given and a single comment is not enough for guessing.
expected = (
'<div class="codehilite"><pre><span></span><code><span class="err"># A Code Comment</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
'\t# A Code Comment',
expected,
extensions=[CodeHiliteExtension(unknown='some value')],
)
def testMultipleBlocksSameStyle(self):
if has_pygments:
# See also: https://github.com/Python-Markdown/markdown/issues/1240
expected = (
'<div class="codehilite" style="background: #202020"><pre style="line-height: 125%; margin: 0;">'
'<span></span><code><span style="color: #999999; font-style: italic"># First Code Block</span>\n'
'</code></pre></div>\n\n'
'<p>Normal paragraph</p>\n'
'<div class="codehilite" style="background: #202020"><pre style="line-height: 125%; margin: 0;">'
'<span></span><code><span style="color: #999999; font-style: italic"># Second Code Block</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-python"># First Code Block\n'
'</code></pre>\n\n'
'<p>Normal paragraph</p>\n'
'<pre class="codehilite"><code class="language-python"># Second Code Block\n'
'</code></pre>'
)
self.assertMarkdownRenders(
(
'\t:::Python\n'
'\t# First Code Block\n\n'
'Normal paragraph\n\n'
'\t:::Python\n'
'\t# Second Code Block'
),
expected,
extensions=[CodeHiliteExtension(pygments_style="native", noclasses=True)]
)
def testFormatterLangStr(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span><code class="language-python">'
'<span class="c1"># A Code Comment</span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code class="language-python"># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
'\t:::Python\n'
'\t# A Code Comment',
expected,
extensions=[
CodeHiliteExtension(
guess_lang=False,
pygments_formatter=self.custom_pygments_formatter
)
]
)
def testFormatterLangStrGuessLang(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span>'
'<code class="language-js+php"><span class="cp"><?php</span> '
'<span class="k">print</span><span class="p">(</span>'
'<span class="s2">"Hello World"</span>'
'<span class="p">);</span> <span class="cp">?></span>\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code><?php print("Hello World"); ?>\n'
'</code></pre>'
)
# Use PHP as the the starting `<?php` tag ensures an accurate guess.
self.assertMarkdownRenders(
'\t<?php print("Hello World"); ?>',
expected,
extensions=[CodeHiliteExtension(pygments_formatter=self.custom_pygments_formatter)]
)
def testFormatterLangStrEmptyLang(self):
if has_pygments:
expected = (
'<div class="codehilite"><pre><span></span>'
'<code class="language-text"># A Code Comment\n'
'</code></pre></div>'
)
else:
expected = (
'<pre class="codehilite"><code># A Code Comment\n'
'</code></pre>'
)
self.assertMarkdownRenders(
'\t# A Code Comment',
expected,
extensions=[
CodeHiliteExtension(
guess_lang=False,
pygments_formatter=self.custom_pygments_formatter,
)
]
)
def testDoesntCrashWithEmptyCodeTag(self):
expected = '<h1>Hello</h1>\n<pre><code></code></pre>'
self.assertMarkdownRenders(
'# Hello',
expected,
extensions=[CodeHiliteExtension(), _ExtensionThatAddsAnEmptyCodeTag()]
)
| TestCodeHiliteExtension |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_base.py | {
"start": 15675,
"end": 20850
} | class ____(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin):
"""Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses.
E.g to use this class::
class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW):
...
"""
def setup_class(self):
"""Setup for testing.
Set up as for regular FLRW test class, but remove dark energy component
since flat cosmologies are forbidden Ode0 as an argument,
see ``test_init_subclass``.
"""
super().setup_class(self)
self._cls_args.pop("Ode0")
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test initializing subclass, mostly that can't have Ode0 in init."""
super().test_init_subclass(cosmo_cls)
with pytest.raises(TypeError, match="subclasses of"):
@dataclass_decorator
class HASOde0SubClass(cosmo_cls):
def __init__(self, Ode0):
pass
_COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
super().test_init(cosmo_cls)
cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs)
assert cosmo.Ok0 == 0.0
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
super().test_Ok0(cosmo_cls, cosmo)
# for flat cosmologies, Ok0 is not *close* to 0, it *is* 0
assert cosmo.Ok0 == 0.0
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1."""
super().test_Otot0(cosmo)
# for flat cosmologies, Otot0 is not *close* to 1, it *is* 1
assert cosmo.Otot0 == 1.0
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1."""
super().test_Otot(cosmo, z)
# for flat cosmologies, Otot is 1, within precision.
assert u.allclose(cosmo.Otot(z), 1.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize(
"method", sorted(FLRWTest._FLRW_redshift_methods - {"Otot"})
)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ---------------------------------------------------------------
def test_clone_to_nonflat_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_to_nonflat_change_param(cosmo)
# change Ode0, without non-flat
msg = "Cannot set 'Ode0' in clone unless 'to_nonflat=True'. "
with pytest.raises(ValueError, match=msg):
cosmo.clone(Ode0=1)
# change to non-flat
nc = cosmo.clone(to_nonflat=True, Ode0=cosmo.Ode0)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
nc = cosmo.clone(to_nonflat=True, Ode0=1)
assert nc.Ode0 == 1.0
assert nc.name == cosmo.name + " (modified)"
# ---------------------------------------------------------------
def test_is_equivalent(self, cosmo, nonflatcosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to TestFLRW
# against non-flat Cosmology
assert not cosmo.is_equivalent(nonflatcosmo)
assert not nonflatcosmo.is_equivalent(cosmo)
# non-flat version of class
nonflat_cosmo_cls = cosmo.__nonflatclass__
# keys check in `test_is_equivalent_nonflat_class_different_params`
# non-flat
nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs)
assert not nonflat.is_equivalent(cosmo)
assert not cosmo.is_equivalent(nonflat)
# Flat, but not FlatFLRWMixin
# This will require forcing flatness by overriding attribute values.
# Since Cosmology is frozen, the easiest way is via __dict__.
flat = nonflat_cosmo_cls(
*self.cls_args,
Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0,
**self.cls_kwargs,
)
flat.__dict__["Ok0"] = 0.0 # manually forcing flatness by setting `Ok0`.
assert flat.is_equivalent(cosmo)
assert cosmo.is_equivalent(flat)
def test_repr(self, cosmo_cls, cosmo):
"""
Test method ``.__repr__()``. Skip non-flat superclass test.
e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest`
vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest`
"""
# test eliminated Ode0 from parameters
assert "Ode0" not in repr(cosmo)
| FlatFLRWMixinTest |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 79775,
"end": 79901
} | class ____:
xlLandscape = 2 # from enum XlPageOrientation
xlPortrait = 1 # from enum XlPageOrientation
| PageOrientation |
python | huggingface__transformers | src/transformers/modelcard.py | {
"start": 13621,
"end": 30474
} | class ____:
model_name: str
language: Optional[Union[str, list[str]]] = None
license: Optional[str] = None
tags: Optional[Union[str, list[str]]] = None
finetuned_from: Optional[str] = None
tasks: Optional[Union[str, list[str]]] = None
dataset: Optional[Union[str, list[str]]] = None
dataset_tags: Optional[Union[str, list[str]]] = None
dataset_args: Optional[Union[str, list[str]]] = None
dataset_metadata: Optional[dict[str, Any]] = None
eval_results: Optional[dict[str, float]] = None
eval_lines: Optional[list[str]] = None
hyperparameters: Optional[dict[str, Any]] = None
source: Optional[str] = "trainer"
def __post_init__(self):
# Infer default license from the checkpoint used, if possible.
if (
self.license is None
and not is_offline_mode()
and self.finetuned_from is not None
and len(self.finetuned_from) > 0
):
try:
info = model_info(self.finetuned_from)
for tag in info.tags:
if tag.startswith("license:"):
self.license = tag[8:]
except (httpx.HTTPError, HFValidationError, OfflineModeIsEnabled):
pass
def create_model_index(self, metric_mapping):
model_index = {"name": self.model_name}
# Dataset mapping tag -> name
dataset_names = _listify(self.dataset)
dataset_tags = _listify(self.dataset_tags)
dataset_args = _listify(self.dataset_args)
dataset_metadata = _listify(self.dataset_metadata)
if len(dataset_args) < len(dataset_tags):
dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args))
dataset_mapping = dict(zip(dataset_tags, dataset_names))
dataset_arg_mapping = dict(zip(dataset_tags, dataset_args))
dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata))
task_mapping = {
task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING
}
model_index["results"] = []
if len(task_mapping) == 0 and len(dataset_mapping) == 0:
return [model_index]
if len(task_mapping) == 0:
task_mapping = {None: None}
if len(dataset_mapping) == 0:
dataset_mapping = {None: None}
# One entry per dataset and per task
all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping]
for task_tag, ds_tag in all_possibilities:
result = {}
if task_tag is not None:
result["task"] = {"name": task_mapping[task_tag], "type": task_tag}
if ds_tag is not None:
metadata = dataset_metadata_mapping.get(ds_tag, {})
result["dataset"] = {
"name": dataset_mapping[ds_tag],
"type": ds_tag,
**metadata,
}
if dataset_arg_mapping[ds_tag] is not None:
result["dataset"]["args"] = dataset_arg_mapping[ds_tag]
if len(metric_mapping) > 0:
result["metrics"] = []
for metric_tag, metric_name in metric_mapping.items():
result["metrics"].append(
{
"name": metric_name,
"type": metric_tag,
"value": self.eval_results[metric_name],
}
)
# Remove partial results to avoid the model card being rejected.
if "task" in result and "dataset" in result and "metrics" in result:
model_index["results"].append(result)
else:
logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}")
return [model_index]
def create_metadata(self):
metric_mapping = infer_metric_tags_from_eval_results(self.eval_results)
metadata = {}
metadata = _insert_value(metadata, "library_name", "transformers")
metadata = _insert_values_as_list(metadata, "language", self.language)
metadata = _insert_value(metadata, "license", self.license)
if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and len(self.finetuned_from) > 0:
metadata = _insert_value(metadata, "base_model", self.finetuned_from)
metadata = _insert_values_as_list(metadata, "tags", self.tags)
metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags)
metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys()))
metadata["model-index"] = self.create_model_index(metric_mapping)
return metadata
def to_model_card(self):
model_card = ""
metadata = yaml.dump(self.create_metadata(), sort_keys=False)
if len(metadata) > 0:
model_card = f"---\n{metadata}---\n"
# Now the model card for realsies.
if self.source == "trainer":
model_card += AUTOGENERATED_TRAINER_COMMENT
model_card += f"\n# {self.model_name}\n\n"
if self.finetuned_from is None:
model_card += "This model was trained from scratch on "
else:
model_card += (
"This model is a fine-tuned version of"
f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on "
)
if self.dataset is None or (isinstance(self.dataset, list) and len(self.dataset) == 0):
model_card += "an unknown dataset."
else:
if isinstance(self.dataset, str):
model_card += f"the {self.dataset} dataset."
elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1:
model_card += f"the {self.dataset[0]} dataset."
else:
model_card += (
", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets."
)
if self.eval_results is not None:
model_card += "\nIt achieves the following results on the evaluation set:\n"
model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()])
model_card += "\n"
model_card += "\n## Model description\n\nMore information needed\n"
model_card += "\n## Intended uses & limitations\n\nMore information needed\n"
model_card += "\n## Training and evaluation data\n\nMore information needed\n"
model_card += "\n## Training procedure\n"
model_card += "\n### Training hyperparameters\n"
if self.hyperparameters is not None:
model_card += "\nThe following hyperparameters were used during training:\n"
model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()])
model_card += "\n"
else:
model_card += "\nMore information needed\n"
if self.eval_lines is not None:
model_card += "\n### Training results\n\n"
model_card += make_markdown_table(self.eval_lines)
model_card += "\n"
model_card += "\n### Framework versions\n\n"
model_card += f"- Transformers {__version__}\n"
if self.source == "trainer" and is_torch_available():
import torch
model_card += f"- Pytorch {torch.__version__}\n"
if is_datasets_available():
import datasets
model_card += f"- Datasets {datasets.__version__}\n"
if is_tokenizers_available():
import tokenizers
model_card += f"- Tokenizers {tokenizers.__version__}\n"
return model_card
@classmethod
def from_trainer(
cls,
trainer,
language=None,
license=None,
tags=None,
model_name=None,
finetuned_from=None,
tasks=None,
dataset_tags=None,
dataset_metadata=None,
dataset=None,
dataset_args=None,
):
# Infer default from dataset
one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset
if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None):
default_tag = one_dataset.builder_name
# Those are not real datasets from the Hub so we exclude them.
if default_tag not in ["csv", "json", "pandas", "parquet", "text"]:
if dataset_metadata is None:
dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}]
if dataset_tags is None:
dataset_tags = [default_tag]
if dataset_args is None:
dataset_args = [one_dataset.config_name]
if dataset is None and dataset_tags is not None:
dataset = dataset_tags
# Infer default finetuned_from
if (
finetuned_from is None
and hasattr(trainer.model.config, "_name_or_path")
and not os.path.isdir(trainer.model.config._name_or_path)
):
finetuned_from = trainer.model.config._name_or_path
# Infer default task tag:
if tasks is None:
model_class_name = trainer.model.__class__.__name__
for task, mapping in TASK_MAPPING.items():
if model_class_name in _get_mapping_values(mapping):
tasks = task
if model_name is None:
model_name = Path(trainer.args.output_dir).name
if len(model_name) == 0:
model_name = finetuned_from
# Add `generated_from_trainer` to the tags
if tags is None:
tags = ["generated_from_trainer"]
elif isinstance(tags, str) and tags != "generated_from_trainer":
tags = [tags, "generated_from_trainer"]
elif "generated_from_trainer" not in tags:
tags.append("generated_from_trainer")
_, eval_lines, eval_results = parse_log_history(trainer.state.log_history)
hyperparameters = extract_hyperparameters_from_trainer(trainer)
return cls(
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset=dataset,
dataset_tags=dataset_tags,
dataset_args=dataset_args,
dataset_metadata=dataset_metadata,
eval_results=eval_results,
eval_lines=eval_lines,
hyperparameters=hyperparameters,
)
def parse_log_history(log_history):
"""
Parse the `log_history` of a Trainer to get the intermediate and final evaluation results.
"""
idx = 0
while idx < len(log_history) and "train_runtime" not in log_history[idx]:
idx += 1
# If there are no training logs
if idx == len(log_history):
idx -= 1
while idx >= 0 and "eval_loss" not in log_history[idx]:
idx -= 1
if idx >= 0:
return None, None, log_history[idx]
else:
return None, None, None
# From now one we can assume we have training logs:
train_log = log_history[idx]
lines = []
training_loss = "No log"
for i in range(idx):
if "loss" in log_history[i]:
training_loss = log_history[i]["loss"]
if "eval_loss" in log_history[i]:
metrics = log_history[i].copy()
_ = metrics.pop("total_flos", None)
epoch = metrics.pop("epoch", None)
step = metrics.pop("step", None)
_ = metrics.pop("eval_runtime", None)
_ = metrics.pop("eval_samples_per_second", None)
_ = metrics.pop("eval_steps_per_second", None)
values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step}
for k, v in metrics.items():
if k == "eval_loss":
values["Validation Loss"] = v
else:
splits = k.split("_")
name = " ".join([part.capitalize() for part in splits[1:]])
values[name] = v
lines.append(values)
idx = len(log_history) - 1
while idx >= 0 and "eval_loss" not in log_history[idx]:
idx -= 1
if idx > 0:
eval_results = {}
for key, value in log_history[idx].items():
key = key.removeprefix("eval_")
if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]:
camel_cased_key = " ".join([part.capitalize() for part in key.split("_")])
eval_results[camel_cased_key] = value
return train_log, lines, eval_results
else:
return train_log, lines, None
def _maybe_round(v, decimals=4):
if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals:
return f"{v:.{decimals}f}"
return str(v)
def _regular_table_line(values, col_widths):
values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)]
return "".join(values_with_space) + "|\n"
def _second_table_line(col_widths):
values = ["|:" + "-" * w + ":" for w in col_widths]
return "".join(values) + "|\n"
def make_markdown_table(lines):
"""
Create a nice Markdown table from the results in `lines`.
"""
if lines is None or len(lines) == 0:
return ""
col_widths = {key: len(str(key)) for key in lines[0]}
for line in lines:
for key, value in line.items():
if col_widths[key] < len(_maybe_round(value)):
col_widths[key] = len(_maybe_round(value))
table = _regular_table_line(list(lines[0].keys()), list(col_widths.values()))
table += _second_table_line(list(col_widths.values()))
for line in lines:
table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values()))
return table
_TRAINING_ARGS_KEYS = [
"learning_rate",
"train_batch_size",
"eval_batch_size",
"seed",
]
def extract_hyperparameters_from_trainer(trainer):
hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS}
if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]:
hyperparameters["distributed_type"] = (
"multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value
)
if trainer.args.world_size > 1:
hyperparameters["num_devices"] = trainer.args.world_size
if trainer.args.gradient_accumulation_steps > 1:
hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps
total_train_batch_size = (
trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps
)
if total_train_batch_size != hyperparameters["train_batch_size"]:
hyperparameters["total_train_batch_size"] = total_train_batch_size
total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size
if total_eval_batch_size != hyperparameters["eval_batch_size"]:
hyperparameters["total_eval_batch_size"] = total_eval_batch_size
if trainer.args.optim:
optimizer_name = trainer.args.optim
optimizer_args = trainer.args.optim_args if trainer.args.optim_args else "No additional optimizer arguments"
if "adam" in optimizer_name.lower():
hyperparameters["optimizer"] = (
f"Use {optimizer_name} with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and"
f" epsilon={trainer.args.adam_epsilon} and optimizer_args={optimizer_args}"
)
else:
hyperparameters["optimizer"] = f"Use {optimizer_name} and the args are:\n{optimizer_args}"
hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value
if trainer.args.warmup_steps != 0.0:
hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps
if trainer.args.max_steps != -1:
hyperparameters["training_steps"] = trainer.args.max_steps
else:
hyperparameters["num_epochs"] = trainer.args.num_train_epochs
if trainer.args.fp16:
hyperparameters["mixed_precision_training"] = "Native AMP"
if trainer.args.label_smoothing_factor != 0.0:
hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor
return hyperparameters
| TrainingSummary |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 82346,
"end": 84371
} | class ____(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testJ1939Constants(self):
socket.CAN_J1939
socket.J1939_MAX_UNICAST_ADDR
socket.J1939_IDLE_ADDR
socket.J1939_NO_ADDR
socket.J1939_NO_NAME
socket.J1939_PGN_REQUEST
socket.J1939_PGN_ADDRESS_CLAIMED
socket.J1939_PGN_ADDRESS_COMMANDED
socket.J1939_PGN_PDU1_MAX
socket.J1939_PGN_MAX
socket.J1939_NO_PGN
# J1939 socket options
socket.SO_J1939_FILTER
socket.SO_J1939_PROMISC
socket.SO_J1939_SEND_PRIO
socket.SO_J1939_ERRQUEUE
socket.SCM_J1939_DEST_ADDR
socket.SCM_J1939_DEST_NAME
socket.SCM_J1939_PRIO
socket.SCM_J1939_ERRQUEUE
socket.J1939_NLA_PAD
socket.J1939_NLA_BYTES_ACKED
socket.J1939_EE_INFO_NONE
socket.J1939_EE_INFO_TX_ABORT
socket.J1939_FILTER_MAX
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testCreateJ1939Socket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
pass
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
| J1939Test |
python | viewflow__viewflow | tests/workflow/test_flow_viewset__flow.py | {
"start": 3578,
"end": 3864
} | class ____(Flow):
start = flow.StartHandle().Next(this.end)
end = flow.End()
urlpatterns = [
path('permitted/', flow.FlowAppViewset(TestFlowViewestFlow).urls),
path('unavailable/', flow.FlowAppViewset(TestUnavailableFlowViewestFlow).urls),
]
| TestUnavailableFlowViewestFlow |
python | mitmproxy__pdoc | test/testdata/demopackage/_child_d.py | {
"start": 24,
"end": 126
} | class ____:
"""The Test class from _child_d."""
def foo(self, a: int):
"""Do foo."""
| Test |
python | dask__dask | dask/dataframe/io/parquet/core.py | {
"start": 2739,
"end": 25389
} | class ____:
"""
Parquet Function-Wrapper Class
Writes a DataFrame partition into a distinct parquet
file. When called, the function also requires the
current block index (via ``blockwise.BlockIndex``).
"""
def __init__(
self,
engine,
path,
fs,
partition_on,
write_metadata_file,
i_offset,
name_function,
kwargs_pass,
):
self.engine = engine
self.path = path
self.fs = fs
self.partition_on = partition_on
self.write_metadata_file = write_metadata_file
self.i_offset = i_offset
self.name_function = name_function
self.kwargs_pass = kwargs_pass
# NOTE: __name__ must be with "to-parquet"
# for the name of the resulting `Blockwise`
# layer to begin with "to-parquet"
self.__name__ = "to-parquet"
def __dask_tokenize__(self):
return (
self.engine,
self.path,
self.fs,
self.partition_on,
self.write_metadata_file,
self.i_offset,
self.name_function,
self.kwargs_pass,
)
def __call__(self, df, block_index: tuple[int]):
# Get partition index from block index tuple
part_i = block_index[0]
filename = (
f"part.{part_i + self.i_offset}.parquet"
if self.name_function is None
else self.name_function(part_i + self.i_offset)
)
# Write out data
return self.engine.write_partition(
df,
self.path,
self.fs,
filename,
self.partition_on,
self.write_metadata_file,
**(dict(self.kwargs_pass, head=True) if part_i == 0 else self.kwargs_pass),
)
def check_multi_support(engine):
# Helper function to check that the engine
# supports a multi-partition read
return hasattr(engine, "multi_support") and engine.multi_support()
def read_parquet_part(fs, engine, meta, part, columns, index, kwargs):
"""Read a part of a parquet dataset
This function is used by `read_parquet`."""
if isinstance(part, list):
if len(part) == 1 or part[0][1] or not check_multi_support(engine):
# Part kwargs expected
func = engine.read_partition
dfs = [
func(
fs,
rg,
columns.copy(),
index,
**toolz.merge(kwargs, kw),
)
for (rg, kw) in part
]
df = concat(dfs, axis=0) if len(dfs) > 1 else dfs[0]
else:
# No part specific kwargs, let engine read
# list of parts at once
df = engine.read_partition(
fs,
[p[0] for p in part],
columns.copy(),
index,
**kwargs,
)
else:
# NOTE: `kwargs` are the same for all parts, while `part_kwargs` may
# be different for each part.
rg, part_kwargs = part
df = engine.read_partition(
fs,
rg,
columns,
index,
**toolz.merge(kwargs, part_kwargs),
)
if meta.columns.name:
df.columns.name = meta.columns.name
columns = columns or []
index = index or []
df = df[[c for c in columns if c not in index]]
if index == [NONE_LABEL]:
df.index.name = None
return df
def create_metadata_file(
paths,
root_dir=None,
out_dir=None,
engine="pyarrow",
storage_options=None,
split_every=32,
compute=True,
compute_kwargs=None,
fs=None,
):
"""Construct a global _metadata file from a list of parquet files.
Dask's read_parquet function is designed to leverage a global
_metadata file whenever one is available. The to_parquet
function will generate this file automatically by default, but it
may not exist if the dataset was generated outside of Dask. This
utility provides a mechanism to generate a _metadata file from a
list of existing parquet files.
Parameters
----------
paths : list(string)
List of files to collect footer metadata from.
root_dir : string, optional
Root directory of dataset. The `file_path` fields in the new
_metadata file will relative to this directory. If None, a common
root directory will be inferred.
out_dir : string or False, optional
Directory location to write the final _metadata file. By default,
this will be set to `root_dir`. If False is specified, the global
metadata will be returned as an in-memory object (and will not be
written to disk).
engine : str or Engine, default 'pyarrow'
Parquet Engine to use. Only 'pyarrow' is supported if a string
is passed.
storage_options : dict, optional
Key/value pairs to be passed on to the file-system backend, if any.
split_every : int, optional
The final metadata object that is written to _metadata can be much
smaller than the list of footer metadata. In order to avoid the
aggregation of all metadata within a single task, a tree reduction
is used. This argument specifies the maximum number of metadata
inputs to be handled by any one task in the tree. Defaults to 32.
compute : bool, optional
If True (default) then the result is computed immediately. If False
then a ``dask.delayed`` object is returned for future computation.
compute_kwargs : dict, optional
Options to be passed in to the compute method
fs : fsspec object, optional
File-system instance to use for file handling. If prefixes have
been removed from the elements of ``paths`` before calling this
function, an ``fs`` argument must be provided to ensure correct
behavior on remote file systems ("naked" paths cannot be used
to infer file-system information).
"""
if isinstance(engine, str):
engine = get_engine(engine)
# Process input path list
if fs is None:
# Only do this if an fsspec file-system object is not
# already defined. The prefixes may already be stripped.
fs, _, paths = get_fs_token_paths(
paths, mode="rb", storage_options=storage_options
)
ap_kwargs = {"root": root_dir} if root_dir else {}
paths, root_dir, fns = _sort_and_analyze_paths(paths, fs, **ap_kwargs)
out_dir = root_dir if out_dir is None else out_dir
# Start constructing a raw graph
dsk = {}
name = "gen-metadata-" + tokenize(paths, fs)
collect_name = f"collect-{name}"
agg_name = f"agg-{name}"
# Define a "collect" task for each file in the input list.
# Each tasks will:
# 1. Extract the footer metadata from a distinct file
# 2. Populate the `file_path` field in the metadata
# 3. Return the extracted/modified metadata
for p, (fn, path) in enumerate(zip(fns, paths)):
key = (collect_name, p, 0)
dsk[key] = (engine.collect_file_metadata, path, fs, fn)
# Build a reduction tree to aggregate all footer metadata
# into a single metadata object. Each task in the tree
# will take in a list of metadata objects as input, and will
# usually output a single (aggregated) metadata object.
# The final task in the tree will write the result to disk
# instead of returning it (this behavior is triggered by
# passing a file path to `engine.aggregate_metadata`).
parts = len(paths)
widths = [parts]
while parts > 1:
parts = math.ceil(parts / split_every)
widths.append(parts)
height = len(widths)
for depth in range(1, height):
for group in range(widths[depth]):
p_max = widths[depth - 1]
lstart = split_every * group
lstop = min(lstart + split_every, p_max)
dep_task_name = collect_name if depth == 1 else agg_name
node_list = [(dep_task_name, p, depth - 1) for p in range(lstart, lstop)]
if depth == height - 1:
assert group == 0
dsk[name] = (engine.aggregate_metadata, node_list, fs, out_dir)
else:
dsk[(agg_name, group, depth)] = (
engine.aggregate_metadata,
node_list,
None,
None,
)
# There will be no aggregation tasks if there is only one file
if len(paths) == 1:
dsk[name] = (engine.aggregate_metadata, [(collect_name, 0, 0)], fs, out_dir)
# Convert the raw graph to a `Delayed` object
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[])
out = Delayed(name, graph)
# Optionally compute the result
if compute:
if compute_kwargs is None:
compute_kwargs = dict()
out = out.compute(**compute_kwargs)
return out
_ENGINES: dict[str, type[Engine]] = {}
def get_engine(engine: Literal["auto", "pyarrow"] | type[Engine]) -> type[Engine]:
"""Get the parquet engine backend implementation.
Parameters
----------
engine : {'auto', 'pyarrow'} or Engine subclass
Parquet library to use. Defaults to 'pyarrow'.
This can be used to inject third-party engine; e.g. from dask_cudf.
"""
if isinstance(engine, type) and issubclass(engine, Engine):
return engine
if engine in ("auto", "pyarrow"):
if "pyarrow" in _ENGINES:
return _ENGINES["pyarrow"]
try:
import_required("pyarrow", "`pyarrow` not installed")
except RuntimeError:
if engine != "auto":
raise
else:
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
_ENGINES["pyarrow"] = eng = ArrowDatasetEngine
return eng
if engine == "auto":
raise RuntimeError("`pyarrow` not installed")
raise ValueError(
f'Unsupported engine: "{engine}". Valid choices are "pyarrow" or "auto".'
)
#####################
# Utility Functions #
#####################
def sorted_columns(statistics, columns=None):
"""Find sorted columns given row-group statistics
This finds all columns that are sorted, along with the
appropriate ``divisions`` for those columns. If the (optional)
``columns`` argument is used, the search will be restricted
to the specified column set.
Returns
-------
out: List of {'name': str, 'divisions': List[str]} dictionaries
"""
if not statistics:
return []
out = []
for i, c in enumerate(statistics[0]["columns"]):
if columns and c["name"] not in columns:
continue
if not all(
"min" in s["columns"][i] and "max" in s["columns"][i] for s in statistics
):
continue
divisions = [c["min"]]
max = c["max"]
success = c["min"] is not None
for stats in statistics[1:]:
c = stats["columns"][i]
if c["min"] is None:
success = False
break
if c["min"] >= max:
divisions.append(c["min"])
max = c["max"]
else:
success = False
break
if success:
divisions.append(max)
assert divisions == sorted(divisions)
out.append({"name": c["name"], "divisions": divisions})
return out
def apply_filters(parts, statistics, filters):
"""Apply filters onto parts/statistics pairs
Parameters
----------
parts: list
Tokens corresponding to row groups to read in the future
statistics: List[dict]
List of statistics for each part, including min and max values
filters: Union[List[Tuple[str, str, Any]], List[List[Tuple[str, str, Any]]]]
List of filters to apply, like ``[[('x', '=', 0), ...], ...]``. This
implements partition-level (hive) filtering only, i.e., to prevent the
loading of some row-groups and/or files.
Predicates can be expressed in disjunctive normal form (DNF). This means
that the innermost tuple describes a single column predicate. These
inner predicates are combined with an AND conjunction into a larger
predicate. The outer-most list then combines all of the combined
filters with an OR disjunction.
Predicates can also be expressed as a List[Tuple]. These are evaluated
as an AND conjunction. To express OR in predicates, one must use the
(preferred) List[List[Tuple]] notation.
Returns
-------
parts, statistics: the same as the input, but possibly a subset
"""
# Supported predicate operators
_supported_operators = {
"=",
"==",
"!=",
"<",
"<=",
">",
">=",
"is",
"is not",
"in",
"not in",
}
def apply_conjunction(parts, statistics, conjunction):
for column, operator, value in conjunction:
if operator not in _supported_operators:
# Use same error message as `_filters_to_expression`
raise ValueError(
f'"{(column, operator, value)}" is not a valid operator in predicates.'
)
elif operator in ("in", "not in") and not isinstance(
value, (list, set, tuple)
):
raise TypeError("Value of 'in' filter must be a list, set, or tuple.")
out_parts = []
out_statistics = []
for part, stats in zip(parts, statistics):
if "filter" in stats and stats["filter"]:
continue # Filtered by engine
try:
c = toolz.groupby("name", stats["columns"])[column][0]
min = c["min"]
max = c["max"]
null_count = c.get("null_count", None)
except KeyError:
out_parts.append(part)
out_statistics.append(stats)
else:
if (
# Must allow row-groups with "missing" stats
(min is None and max is None and not null_count)
# Check "is" and "is not" filters first
or operator == "is"
and null_count
or operator == "is not"
and (not pd.isna(min) or not pd.isna(max))
# Allow all-null row-groups if not filtering out nulls
or operator != "is not"
and min is None
and max is None
and null_count
# Start conventional (non-null) filtering
# (main/max cannot be None for remaining checks)
or operator in ("==", "=")
and min <= value <= max
or operator == "!="
and (null_count or min != value or max != value)
or operator == "<"
and min < value
or operator == "<="
and min <= value
or operator == ">"
and max > value
or operator == ">="
and max >= value
or operator == "in"
and any(min <= item <= max for item in value)
or operator == "not in"
and not any(min == max == item for item in value)
):
out_parts.append(part)
out_statistics.append(stats)
parts, statistics = out_parts, out_statistics
return parts, statistics
conjunction, *disjunction = filters if isinstance(filters[0], list) else [filters]
out_parts, out_statistics = apply_conjunction(parts, statistics, conjunction)
for conjunction in disjunction:
for part, stats in zip(*apply_conjunction(parts, statistics, conjunction)):
if part not in out_parts:
out_parts.append(part)
out_statistics.append(stats)
return out_parts, out_statistics
def set_index_columns(meta, index, columns, auto_index_allowed):
"""Handle index/column arguments, and modify `meta`
Used in read_parquet.
"""
ignore_index_column_intersection = False
if columns is None:
# User didn't specify columns, so ignore any intersection
# of auto-detected values with the index (if necessary)
ignore_index_column_intersection = True
# Do not allow "un-named" fields to be read in as columns.
# These were intended to be un-named indices at write time.
_index = index or []
columns = [
c for c in meta.columns if c not in (None, NONE_LABEL) or c in _index
]
if not set(columns).issubset(set(meta.columns)):
raise ValueError(
f"The following columns were not found in the dataset {set(columns) - set(meta.columns)}\n"
f"The following columns were found {meta.columns}"
)
if index:
if isinstance(index, str):
index = [index]
if isinstance(columns, str):
columns = [columns]
if ignore_index_column_intersection:
columns = [col for col in columns if col not in index]
if set(index).intersection(columns):
if auto_index_allowed:
raise ValueError(
"Specified index and column arguments must not intersect"
" (set index=False or remove the detected index from columns).\n"
f"index: {index} | column: {columns}"
)
else:
raise ValueError(
"Specified index and column arguments must not intersect.\n"
f"index: {index} | column: {columns}"
)
return meta[list(columns)], index, columns
def aggregate_row_groups(
parts, stats, blocksize, split_row_groups, fs, aggregation_depth
):
if not stats or not stats[0].get("file_path_0", None):
return parts, stats
parts_agg = []
stats_agg = []
use_row_group_criteria = split_row_groups and int(split_row_groups) > 1
use_blocksize_criteria = bool(blocksize)
if use_blocksize_criteria:
blocksize = parse_bytes(blocksize)
next_part, next_stat = [parts[0].copy()], stats[0].copy()
for i in range(1, len(parts)):
stat, part = stats[i], parts[i]
# Criteria #1 for aggregating parts: parts are within the same file
same_path = stat["file_path_0"] == next_stat["file_path_0"]
multi_path_allowed = False
if aggregation_depth:
# Criteria #2 for aggregating parts: The part does not include
# row-group information, or both parts include the same kind
# of row_group aggregation (all None, or all indices)
multi_path_allowed = len(part["piece"]) == 1
if not (same_path or multi_path_allowed):
rgs = set(list(part["piece"][1]) + list(next_part[-1]["piece"][1]))
multi_path_allowed = (rgs == {None}) or (None not in rgs)
# Criteria #3 for aggregating parts: The parts share a
# directory at the "depth" allowed by `aggregation_depth`
if not same_path and multi_path_allowed:
if aggregation_depth is True:
multi_path_allowed = True
elif isinstance(aggregation_depth, int):
# Make sure files share the same directory
root = stat["file_path_0"].split(fs.sep)[:-aggregation_depth]
next_root = next_stat["file_path_0"].split(fs.sep)[
:-aggregation_depth
]
multi_path_allowed = root == next_root
else:
raise ValueError(
f"{aggregation_depth} not supported for `aggregation_depth`"
)
def _check_row_group_criteria(stat, next_stat):
if use_row_group_criteria:
return (next_stat["num-row-groups"] + stat["num-row-groups"]) <= int(
split_row_groups
)
else:
return False
def _check_blocksize_criteria(stat, next_stat):
if use_blocksize_criteria:
return (
next_stat["total_byte_size"] + stat["total_byte_size"]
) <= blocksize
else:
return False
stat["num-row-groups"] = stat.get("num-row-groups", 1)
next_stat["num-row-groups"] = next_stat.get("num-row-groups", 1)
if (same_path or multi_path_allowed) and (
_check_row_group_criteria(stat, next_stat)
or _check_blocksize_criteria(stat, next_stat)
):
# Update part list
next_piece = next_part[-1]["piece"]
this_piece = part["piece"]
if (
same_path
and len(next_piece) > 1
and next_piece[1] != [None]
and this_piece[1] != [None]
):
next_piece[1].extend(this_piece[1])
else:
next_part.append(part)
# Update Statistics
next_stat["total_byte_size"] += stat["total_byte_size"]
next_stat["num-rows"] += stat["num-rows"]
next_stat["num-row-groups"] += stat["num-row-groups"]
for col, col_add in zip(next_stat["columns"], stat["columns"]):
if col["name"] != col_add["name"]:
raise ValueError("Columns are different!!")
if "min" in col:
col["min"] = min(col["min"], col_add["min"])
if "max" in col:
col["max"] = max(col["max"], col_add["max"])
else:
parts_agg.append(next_part)
stats_agg.append(next_stat)
next_part, next_stat = [part.copy()], stat.copy()
parts_agg.append(next_part)
stats_agg.append(next_stat)
return parts_agg, stats_agg
| ToParquetFunctionWrapper |
python | django__django | django/contrib/sites/models.py | {
"start": 2645,
"end": 3695
} | class ____(models.Model):
domain = models.CharField(
_("domain name"),
max_length=100,
validators=[_simple_domain_name_validator],
unique=True,
)
name = models.CharField(_("display name"), max_length=50)
objects = SiteManager()
class Meta:
db_table = "django_site"
verbose_name = _("site")
verbose_name_plural = _("sites")
ordering = ["domain"]
def __str__(self):
return self.domain
def natural_key(self):
return (self.domain,)
def clear_site_cache(sender, **kwargs):
"""
Clear the cache (if primed) each time a site is saved or deleted.
"""
instance = kwargs["instance"]
using = kwargs["using"]
try:
del SITE_CACHE[instance.pk]
except KeyError:
pass
try:
del SITE_CACHE[Site.objects.using(using).get(pk=instance.pk).domain]
except (KeyError, Site.DoesNotExist):
pass
pre_save.connect(clear_site_cache, sender=Site)
pre_delete.connect(clear_site_cache, sender=Site)
| Site |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 84499,
"end": 87929
} | class ____:
def __init__(self, use_swap_tensors):
self.use_swap_tensors = use_swap_tensors
def __enter__(self):
self.swap_tensors_restore = torch.__future__.get_swap_module_params_on_conversion()
if self.use_swap_tensors is not None:
torch.__future__.set_swap_module_params_on_conversion(self.use_swap_tensors)
def __exit__(self, exception_type, exception_value, traceback):
torch.__future__.set_swap_module_params_on_conversion(self.swap_tensors_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
# This decorator can be used for API tests that want to safely call
# torch.__future__.set_swap_module_params_on_conversion. `swap` can be set to
# True, False or None where None indicates that the context manager does not
# set the flag. When the test is finished, it will restore the previous swap
# flag setting.
def wrapSwapTensorsTest(swap=None):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with SwapTensorsGuard(swap):
fn(*args, **kwargs)
return wrapper
return dec_fn
# test parametrizer for swapping
| SwapTensorsGuard |
python | walkccc__LeetCode | solutions/840. Magic Squares In Grid/840.py | {
"start": 0,
"end": 489
} | class ____:
def numMagicSquaresInside(self, grid: list[list[int]]) -> int:
def isMagic(i: int, j: int) -> int:
s = "".join(str(grid[i + num // 3][j + num % 3])
for num in [0, 1, 2, 5, 8, 7, 6, 3])
return s in "43816729" * 2 or s in "43816729"[::-1] * 2
ans = 0
for i in range(len(grid) - 2):
for j in range(len(grid[0]) - 2):
if grid[i][j] % 2 == 0 and grid[i + 1][j + 1] == 5:
ans += isMagic(i, j)
return ans
| Solution |
python | pexpect__pexpect | tests/test_FSM.py | {
"start": 222,
"end": 856
} | class ____(unittest.TestCase):
def test_run_fsm(self):
def _input(prompt):
return "167 3 2 2 * * * 1 - ="
orig_input = getattr(builtins, input_name)
orig_stdout = sys.stdout
setattr(builtins, input_name, _input)
sys.stdout = sio = (io.StringIO if PY3 else io.BytesIO)()
try:
FSM.main()
finally:
setattr(builtins, input_name, orig_input)
sys.stdout = orig_stdout
printed = sio.getvalue()
assert '2003' in printed, printed
if __name__ == '__main__':
unittest.main() | FSMTestCase |
python | MongoEngine__mongoengine | tests/fields/test_fields.py | {
"start": 61269,
"end": 76599
} | class ____(MongoDBTestCase):
def setUp(self):
"""
Create two BlogPost entries in the database, each with
several EmbeddedDocuments.
"""
class Comments(EmbeddedDocument):
author = StringField()
message = StringField()
class BlogPost(Document):
comments = EmbeddedDocumentListField(Comments)
BlogPost.drop_collection()
self.Comments = Comments
self.BlogPost = BlogPost
self.post1 = self.BlogPost(
comments=[
self.Comments(author="user1", message="message1"),
self.Comments(author="user2", message="message1"),
]
).save()
self.post2 = self.BlogPost(
comments=[
self.Comments(author="user2", message="message2"),
self.Comments(author="user2", message="message3"),
self.Comments(author="user3", message="message1"),
]
).save()
def test_fails_upon_validate_if_provide_a_doc_instead_of_a_list_of_doc(self):
# Relates to Issue #1464
comment = self.Comments(author="John")
class Title(Document):
content = StringField()
# Test with an embeddedDocument instead of a list(embeddedDocument)
# It's an edge case but it used to fail with a vague error, making it difficult to troubleshoot it
post = self.BlogPost(comments=comment)
with pytest.raises(ValidationError) as exc_info:
post.validate()
error_msg = str(exc_info.value)
assert "'comments'" in error_msg
assert "Only lists and tuples may be used in a list field" in error_msg
# Test with a Document
post = self.BlogPost(comments=Title(content="garbage"))
with pytest.raises(ValidationError) as exc_info:
post.validate()
error_msg = str(exc_info.value)
assert "'comments'" in error_msg
assert "Only lists and tuples may be used in a list field" in error_msg
def test_no_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
with a no keyword.
"""
filtered = self.post1.comments.filter()
# Ensure nothing was changed
assert filtered == self.post1.comments
def test_single_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
with a single keyword.
"""
filtered = self.post1.comments.filter(author="user1")
# Ensure only 1 entry was returned.
assert len(filtered) == 1
# Ensure the entry returned is the correct entry.
assert filtered[0].author == "user1"
def test_multi_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
with multiple keywords.
"""
filtered = self.post2.comments.filter(author="user2", message="message2")
# Ensure only 1 entry was returned.
assert len(filtered) == 1
# Ensure the entry returned is the correct entry.
assert filtered[0].author == "user2"
assert filtered[0].message == "message2"
def test_chained_filter(self):
"""
Tests chained filter methods of a List of Embedded Documents
"""
filtered = self.post2.comments.filter(author="user2").filter(message="message2")
# Ensure only 1 entry was returned.
assert len(filtered) == 1
# Ensure the entry returned is the correct entry.
assert filtered[0].author == "user2"
assert filtered[0].message == "message2"
def test_unknown_keyword_filter(self):
"""
Tests the filter method of a List of Embedded Documents
when the keyword is not a known keyword.
"""
with pytest.raises(AttributeError):
self.post2.comments.filter(year=2)
def test_no_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
with a no keyword.
"""
filtered = self.post1.comments.exclude()
# Ensure everything was removed
assert filtered == []
def test_single_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
with a single keyword.
"""
excluded = self.post1.comments.exclude(author="user1")
# Ensure only 1 entry was returned.
assert len(excluded) == 1
# Ensure the entry returned is the correct entry.
assert excluded[0].author == "user2"
def test_multi_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
with multiple keywords.
"""
excluded = self.post2.comments.exclude(author="user3", message="message1")
# Ensure only 2 entries were returned.
assert len(excluded) == 2
# Ensure the entries returned are the correct entries.
assert excluded[0].author == "user2"
assert excluded[1].author == "user2"
def test_non_matching_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
when the keyword does not match any entries.
"""
excluded = self.post2.comments.exclude(author="user4")
# Ensure the 3 entries still exist.
assert len(excluded) == 3
def test_unknown_keyword_exclude(self):
"""
Tests the exclude method of a List of Embedded Documents
when the keyword is not a known keyword.
"""
with pytest.raises(AttributeError):
self.post2.comments.exclude(year=2)
def test_chained_filter_exclude(self):
"""
Tests the exclude method after a filter method of a List of
Embedded Documents.
"""
excluded = self.post2.comments.filter(author="user2").exclude(
message="message2"
)
# Ensure only 1 entry was returned.
assert len(excluded) == 1
# Ensure the entry returned is the correct entry.
assert excluded[0].author == "user2"
assert excluded[0].message == "message3"
def test_count(self):
"""
Tests the count method of a List of Embedded Documents.
"""
assert self.post1.comments.count() == 2
assert self.post1.comments.count() == len(self.post1.comments)
def test_filtered_count(self):
"""
Tests the filter + count method of a List of Embedded Documents.
"""
count = self.post1.comments.filter(author="user1").count()
assert count == 1
def test_single_keyword_get(self):
"""
Tests the get method of a List of Embedded Documents using a
single keyword.
"""
comment = self.post1.comments.get(author="user1")
assert isinstance(comment, self.Comments)
assert comment.author == "user1"
def test_multi_keyword_get(self):
"""
Tests the get method of a List of Embedded Documents using
multiple keywords.
"""
comment = self.post2.comments.get(author="user2", message="message2")
assert isinstance(comment, self.Comments)
assert comment.author == "user2"
assert comment.message == "message2"
def test_no_keyword_multiple_return_get(self):
"""
Tests the get method of a List of Embedded Documents without
a keyword to return multiple documents.
"""
with pytest.raises(MultipleObjectsReturned):
self.post1.comments.get()
def test_keyword_multiple_return_get(self):
"""
Tests the get method of a List of Embedded Documents with a keyword
to return multiple documents.
"""
with pytest.raises(MultipleObjectsReturned):
self.post2.comments.get(author="user2")
def test_unknown_keyword_get(self):
"""
Tests the get method of a List of Embedded Documents with an
unknown keyword.
"""
with pytest.raises(AttributeError):
self.post2.comments.get(year=2020)
def test_no_result_get(self):
"""
Tests the get method of a List of Embedded Documents where get
returns no results.
"""
with pytest.raises(DoesNotExist):
self.post1.comments.get(author="user3")
def test_first(self):
"""
Tests the first method of a List of Embedded Documents to
ensure it returns the first comment.
"""
comment = self.post1.comments.first()
# Ensure a Comment object was returned.
assert isinstance(comment, self.Comments)
assert comment == self.post1.comments[0]
def test_create(self):
"""
Test the create method of a List of Embedded Documents.
"""
comment = self.post1.comments.create(author="user4", message="message1")
self.post1.save()
# Ensure the returned value is the comment object.
assert isinstance(comment, self.Comments)
assert comment.author == "user4"
assert comment.message == "message1"
# Ensure the new comment was actually saved to the database.
assert comment in self.BlogPost.objects(comments__author="user4")[0].comments
def test_filtered_create(self):
"""
Test the create method of a List of Embedded Documents chained
to a call to the filter method. Filtering should have no effect
on creation.
"""
comment = self.post1.comments.filter(author="user1").create(
author="user4", message="message1"
)
self.post1.save()
# Ensure the returned value is the comment object.
assert isinstance(comment, self.Comments)
assert comment.author == "user4"
assert comment.message == "message1"
# Ensure the new comment was actually saved to the database.
assert comment in self.BlogPost.objects(comments__author="user4")[0].comments
def test_no_keyword_update(self):
"""
Tests the update method of a List of Embedded Documents with
no keywords.
"""
original = list(self.post1.comments)
number = self.post1.comments.update()
self.post1.save()
# Ensure that nothing was altered.
assert original[0] in self.BlogPost.objects(id=self.post1.id)[0].comments
assert original[1] in self.BlogPost.objects(id=self.post1.id)[0].comments
# Ensure the method returned 0 as the number of entries
# modified
assert number == 0
def test_single_keyword_update(self):
"""
Tests the update method of a List of Embedded Documents with
a single keyword.
"""
number = self.post1.comments.update(author="user4")
self.post1.save()
comments = self.BlogPost.objects(id=self.post1.id)[0].comments
# Ensure that the database was updated properly.
assert comments[0].author == "user4"
assert comments[1].author == "user4"
# Ensure the method returned 2 as the number of entries
# modified
assert number == 2
def test_unicode(self):
"""
Tests that unicode strings handled correctly
"""
post = self.BlogPost(
comments=[
self.Comments(author="user1", message="сообщение"),
self.Comments(author="user2", message="хабарлама"),
]
).save()
assert post.comments.get(message="сообщение").author == "user1"
def test_save(self):
"""
Tests the save method of a List of Embedded Documents.
"""
comments = self.post1.comments
new_comment = self.Comments(author="user4")
comments.append(new_comment)
comments.save()
# Ensure that the new comment has been added to the database.
assert new_comment in self.BlogPost.objects(id=self.post1.id)[0].comments
def test_delete(self):
"""
Tests the delete method of a List of Embedded Documents.
"""
number = self.post1.comments.delete()
self.post1.save()
# Ensure that all the comments under post1 were deleted in the
# database.
assert self.BlogPost.objects(id=self.post1.id)[0].comments == []
# Ensure that post1 comments were deleted from the list.
assert self.post1.comments == []
# Ensure that comments still returned a EmbeddedDocumentList object.
assert isinstance(self.post1.comments, EmbeddedDocumentList)
# Ensure that the delete method returned 2 as the number of entries
# deleted from the database
assert number == 2
def test_empty_list_embedded_documents_with_unique_field(self):
"""
Tests that only one document with an empty list of embedded documents
that have a unique field can be saved, but if the unique field is
also sparse than multiple documents with an empty list can be saved.
"""
class EmbeddedWithUnique(EmbeddedDocument):
number = IntField(unique=True)
class A(Document):
my_list = ListField(EmbeddedDocumentField(EmbeddedWithUnique))
A(my_list=[]).save()
with pytest.raises(NotUniqueError):
A(my_list=[]).save()
class EmbeddedWithSparseUnique(EmbeddedDocument):
number = IntField(unique=True, sparse=True)
class B(Document):
my_list = ListField(EmbeddedDocumentField(EmbeddedWithSparseUnique))
A.drop_collection()
B.drop_collection()
B(my_list=[]).save()
B(my_list=[]).save()
def test_filtered_delete(self):
"""
Tests the delete method of a List of Embedded Documents
after the filter method has been called.
"""
comment = self.post1.comments[1]
number = self.post1.comments.filter(author="user2").delete()
self.post1.save()
# Ensure that only the user2 comment was deleted.
assert comment not in self.BlogPost.objects(id=self.post1.id)[0].comments
assert len(self.BlogPost.objects(id=self.post1.id)[0].comments) == 1
# Ensure that the user2 comment no longer exists in the list.
assert comment not in self.post1.comments
assert len(self.post1.comments) == 1
# Ensure that the delete method returned 1 as the number of entries
# deleted from the database
assert number == 1
def test_custom_data(self):
"""
Tests that custom data is saved in the field object
and doesn't interfere with the rest of field functionalities.
"""
custom_data = {"a": "a_value", "b": [1, 2]}
class CustomData(Document):
a_field = IntField()
c_field = IntField(custom_data=custom_data)
CustomData.drop_collection()
a1 = CustomData(a_field=1, c_field=2).save()
assert 2 == a1.c_field
assert not hasattr(a1.c_field, "custom_data")
assert hasattr(CustomData.c_field, "custom_data")
assert custom_data["a"] == CustomData.c_field.custom_data["a"]
| TestEmbeddedDocumentListField |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_scale.py | {
"start": 6383,
"end": 11081
} | class ____:
def test_transforms(self):
a0 = 17.0
a = np.linspace(-50, 50, 100)
forward = AsinhTransform(a0)
inverse = forward.inverted()
invinv = inverse.inverted()
a_forward = forward.transform_non_affine(a)
a_inverted = inverse.transform_non_affine(a_forward)
assert_allclose(a_inverted, a)
a_invinv = invinv.transform_non_affine(a)
assert_allclose(a_invinv, a0 * np.arcsinh(a / a0))
def test_init(self):
fig, ax = plt.subplots()
s = AsinhScale(axis=None, linear_width=23.0)
assert s.linear_width == 23
assert s._base == 10
assert s._subs == (2, 5)
tx = s.get_transform()
assert isinstance(tx, AsinhTransform)
assert tx.linear_width == s.linear_width
def test_base_init(self):
fig, ax = plt.subplots()
s3 = AsinhScale(axis=None, base=3)
assert s3._base == 3
assert s3._subs == (2,)
s7 = AsinhScale(axis=None, base=7, subs=(2, 4))
assert s7._base == 7
assert s7._subs == (2, 4)
def test_fmtloc(self):
class DummyAxis:
def __init__(self):
self.fields = {}
def set(self, **kwargs):
self.fields.update(**kwargs)
def set_major_formatter(self, f):
self.fields['major_formatter'] = f
ax0 = DummyAxis()
s0 = AsinhScale(axis=ax0, base=0)
s0.set_default_locators_and_formatters(ax0)
assert isinstance(ax0.fields['major_locator'], AsinhLocator)
assert isinstance(ax0.fields['major_formatter'], str)
ax5 = DummyAxis()
s7 = AsinhScale(axis=ax5, base=5)
s7.set_default_locators_and_formatters(ax5)
assert isinstance(ax5.fields['major_locator'], AsinhLocator)
assert isinstance(ax5.fields['major_formatter'],
LogFormatterSciNotation)
def test_bad_scale(self):
fig, ax = plt.subplots()
with pytest.raises(ValueError):
AsinhScale(axis=None, linear_width=0)
with pytest.raises(ValueError):
AsinhScale(axis=None, linear_width=-1)
s0 = AsinhScale(axis=None, )
s1 = AsinhScale(axis=None, linear_width=3.0)
def test_custom_scale_without_axis():
"""
Test that one can register and use custom scales that don't take an *axis* param.
"""
class CustomTransform(IdentityTransform):
pass
class CustomScale(mscale.ScaleBase):
name = "custom"
# Important: __init__ has no *axis* parameter
def __init__(self):
self._transform = CustomTransform()
def get_transform(self):
return self._transform
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
try:
mscale.register_scale(CustomScale)
fig, ax = plt.subplots()
ax.set_xscale('custom')
assert isinstance(ax.xaxis.get_transform(), CustomTransform)
finally:
# cleanup - there's no public unregister_scale()
del mscale._scale_mapping["custom"]
del mscale._scale_has_axis_parameter["custom"]
def test_custom_scale_with_axis():
"""
Test that one can still register and use custom scales with an *axis*
parameter, but that registering issues a pending-deprecation warning.
"""
class CustomTransform(IdentityTransform):
pass
class CustomScale(mscale.ScaleBase):
name = "custom"
# Important: __init__ still has the *axis* parameter
def __init__(self, axis):
self._transform = CustomTransform()
def get_transform(self):
return self._transform
def set_default_locators_and_formatters(self, axis):
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
try:
with pytest.warns(
PendingDeprecationWarning,
match=r"'axis' parameter .* is pending-deprecated"):
mscale.register_scale(CustomScale)
fig, ax = plt.subplots()
ax.set_xscale('custom')
assert isinstance(ax.xaxis.get_transform(), CustomTransform)
finally:
# cleanup - there's no public unregister_scale()
del mscale._scale_mapping["custom"]
del mscale._scale_has_axis_parameter["custom"]
| TestAsinhScale |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 91526,
"end": 91604
} | class ____(BinOpSeries):
operation = M.eq
_operator_repr = "=="
| EQSeries |
python | sqlalchemy__sqlalchemy | test/orm/test_load_on_fks.py | {
"start": 543,
"end": 1895
} | class ____(AssertsExecutionResults, fixtures.TestBase):
def setup_test(self):
global Parent, Child, Base
Base = declarative_base()
class Parent(Base):
__tablename__ = "parent"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
name = Column(String(50), nullable=False)
children = relationship("Child", load_on_pending=True)
class Child(Base):
__tablename__ = "child"
id = Column(
Integer, primary_key=True, test_needs_autoincrement=True
)
parent_id = Column(Integer, ForeignKey("parent.id"))
Base.metadata.create_all(testing.db)
def teardown_test(self):
Base.metadata.drop_all(testing.db)
def test_annoying_autoflush_one(self):
sess = fixture_session()
p1 = Parent()
sess.add(p1)
p1.children = []
def test_annoying_autoflush_two(self):
sess = fixture_session()
p1 = Parent()
sess.add(p1)
assert p1.children == []
def test_dont_load_if_no_keys(self):
sess = fixture_session()
p1 = Parent()
sess.add(p1)
def go():
assert p1.children == []
self.assert_sql_count(testing.db, go, 0)
| FlushOnPendingTest |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py | {
"start": 2012,
"end": 2289
} | class ____(CompletionError):
"""Exception raised when controller and target options are specified out-of-order."""
def __init__(self) -> None:
super().__init__('The `--controller` option must be specified before `--target` option(s).')
| ControllerRequiredFirstError |
python | great-expectations__great_expectations | great_expectations/metrics/metric.py | {
"start": 2524,
"end": 6590
} | class ____(BaseModel, Generic[_MetricResult], metaclass=MetaMetric):
"""The abstract base class for defining all metrics.
A Metric represents a measurable property that can be computed over a specific domain
of data (e.g., a column, table, or column pair). All concrete metric implementations
must inherit from this class and specify their domain type as a mixin.
Examples:
A metric for a single column max value:
>>> class ColumnMaxResult(MetricResult[int]): ...
>>>
>>> class ColumnMax(Metric[ColumnMaxResult]):
... ...
A metric for a single batch row count value:
>>> class BatchRowCountResult(MetricResult[int]): ...
>>>
>>> class BatchRowCount(Metric[BatchRowCountResult]):
... ...
Notes:
- The Metric class cannot be instantiated directly - it must be subclassed.
- Once Metrics are instantiated, they are immutable.
See Also:
MetricConfiguration: Configuration class for metric computation
"""
# we wouldn't mind removing this `name` attribute
# it's currently our only hook into the legacy metrics system
name: ClassVar[StrictStr]
# we use this list to build a dictionary of domain-specific fields
# by introspecting inheriting models for these keys
_domain_fields: ClassVar[tuple[str, ...]] = (
"column",
"column_list",
"column_A",
"column_B",
"condition_parser",
"ignore_row_if",
"row_condition",
)
class Config:
arbitrary_types_allowed = True
frozen = True
def __new__(cls, *args, **kwargs):
if cls is Metric:
raise AbstractClassInstantiationError(cls.__name__)
return super().__new__(cls)
def metric_id_for_batch(self, batch_id: str) -> MetricConfigurationID:
return self.config(batch_id=batch_id).id
def config(self, batch_id: str) -> MetricConfiguration:
# This class is frozen so Metric._to_config will always return the same value
# when the same batch_id is passed in.
if not batch_id:
raise EmptyStrError("batch_id")
config = Metric._to_config(
instance_class=self.__class__,
batch_id=batch_id,
metric_value_set=list(self.dict().items()),
)
return config
@classmethod
def get_metric_result_type(cls) -> type[_MetricResult]:
"""Returns the MetricResult type for this Metric."""
return get_args(getattr(cls, "__orig_bases__", [])[0])[0]
@staticmethod
def _to_config(
instance_class: type["Metric"], batch_id: str, metric_value_set: list[tuple[str, Any]]
) -> MetricConfiguration:
"""Returns a MetricConfiguration instance for this Metric."""
metric_domain_kwargs = {}
metric_value_kwargs = {}
metric_values = dict(metric_value_set)
metric_fields = Metric.__fields__
domain_fields = {
field_name: field_info
for field_name, field_info in instance_class.__fields__.items()
if field_name in Metric._domain_fields and field_name not in metric_fields
}
for field_name, field_info in domain_fields.items():
metric_domain_kwargs[field_name] = metric_values.get(field_name, field_info.default)
# Set the batch_id with the passed in value
metric_domain_kwargs["batch_id"] = batch_id
value_fields = {
field_name: field_info
for field_name, field_info in instance_class.__fields__.items()
if field_name not in Metric._domain_fields and field_name not in metric_fields
}
for field_name, field_info in value_fields.items():
metric_value_kwargs[field_name] = metric_values.get(field_name, field_info.default)
return MetricConfiguration(
metric_name=instance_class.name,
metric_domain_kwargs=metric_domain_kwargs,
metric_value_kwargs=metric_value_kwargs,
)
| Metric |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 15770,
"end": 16454
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.feature_projection_input_dim, eps=config.layer_norm_eps)
self.projection = nn.Linear(config.feature_projection_input_dim, config.hidden_size)
self.dropout = nn.Dropout(config.speech_encoder_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| SeamlessM4TConformerFeatureProjection |
python | doocs__leetcode | lcof2/剑指 Offer II 041. 滑动窗口的平均值/Solution.py | {
"start": 0,
"end": 471
} | class ____:
def __init__(self, size: int):
self.arr = [0] * size
self.s = 0
self.cnt = 0
def next(self, val: int) -> float:
idx = self.cnt % len(self.arr)
self.s += val - self.arr[idx]
self.arr[idx] = val
self.cnt += 1
return self.s / min(self.cnt, len(self.arr))
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param_1 = obj.next(val)
| MovingAverage |
python | mwaskom__seaborn | tests/test_statistics.py | {
"start": 14023,
"end": 18563
} | class ____:
def test_func_estimator(self, long_df):
func = np.mean
agg = EstimateAggregator(func)
out = agg(long_df, "x")
assert out["x"] == func(long_df["x"])
def test_name_estimator(self, long_df):
agg = EstimateAggregator("mean")
out = agg(long_df, "x")
assert out["x"] == long_df["x"].mean()
def test_custom_func_estimator(self, long_df):
def func(x):
return np.asarray(x).min()
agg = EstimateAggregator(func)
out = agg(long_df, "x")
assert out["x"] == func(long_df["x"])
def test_se_errorbars(self, long_df):
agg = EstimateAggregator("mean", "se")
out = agg(long_df, "x")
assert out["x"] == long_df["x"].mean()
assert out["xmin"] == (long_df["x"].mean() - long_df["x"].sem())
assert out["xmax"] == (long_df["x"].mean() + long_df["x"].sem())
agg = EstimateAggregator("mean", ("se", 2))
out = agg(long_df, "x")
assert out["x"] == long_df["x"].mean()
assert out["xmin"] == (long_df["x"].mean() - 2 * long_df["x"].sem())
assert out["xmax"] == (long_df["x"].mean() + 2 * long_df["x"].sem())
def test_sd_errorbars(self, long_df):
agg = EstimateAggregator("mean", "sd")
out = agg(long_df, "x")
assert out["x"] == long_df["x"].mean()
assert out["xmin"] == (long_df["x"].mean() - long_df["x"].std())
assert out["xmax"] == (long_df["x"].mean() + long_df["x"].std())
agg = EstimateAggregator("mean", ("sd", 2))
out = agg(long_df, "x")
assert out["x"] == long_df["x"].mean()
assert out["xmin"] == (long_df["x"].mean() - 2 * long_df["x"].std())
assert out["xmax"] == (long_df["x"].mean() + 2 * long_df["x"].std())
def test_pi_errorbars(self, long_df):
agg = EstimateAggregator("mean", "pi")
out = agg(long_df, "y")
assert out["ymin"] == np.percentile(long_df["y"], 2.5)
assert out["ymax"] == np.percentile(long_df["y"], 97.5)
agg = EstimateAggregator("mean", ("pi", 50))
out = agg(long_df, "y")
assert out["ymin"] == np.percentile(long_df["y"], 25)
assert out["ymax"] == np.percentile(long_df["y"], 75)
def test_ci_errorbars(self, long_df):
agg = EstimateAggregator("mean", "ci", n_boot=100000, seed=0)
out = agg(long_df, "y")
agg_ref = EstimateAggregator("mean", ("se", 1.96))
out_ref = agg_ref(long_df, "y")
assert out["ymin"] == pytest.approx(out_ref["ymin"], abs=1e-2)
assert out["ymax"] == pytest.approx(out_ref["ymax"], abs=1e-2)
agg = EstimateAggregator("mean", ("ci", 68), n_boot=100000, seed=0)
out = agg(long_df, "y")
agg_ref = EstimateAggregator("mean", ("se", 1))
out_ref = agg_ref(long_df, "y")
assert out["ymin"] == pytest.approx(out_ref["ymin"], abs=1e-2)
assert out["ymax"] == pytest.approx(out_ref["ymax"], abs=1e-2)
agg = EstimateAggregator("mean", "ci", seed=0)
out_orig = agg_ref(long_df, "y")
out_test = agg_ref(long_df, "y")
assert_array_equal(out_orig, out_test)
def test_custom_errorbars(self, long_df):
f = lambda x: (x.min(), x.max()) # noqa: E731
agg = EstimateAggregator("mean", f)
out = agg(long_df, "y")
assert out["ymin"] == long_df["y"].min()
assert out["ymax"] == long_df["y"].max()
def test_singleton_errorbars(self):
agg = EstimateAggregator("mean", "ci")
val = 7
out = agg(pd.DataFrame(dict(y=[val])), "y")
assert out["y"] == val
assert pd.isna(out["ymin"])
assert pd.isna(out["ymax"])
def test_errorbar_validation(self):
method, level = _validate_errorbar_arg(("ci", 99))
assert method == "ci"
assert level == 99
method, level = _validate_errorbar_arg("sd")
assert method == "sd"
assert level == 1
f = lambda x: (x.min(), x.max()) # noqa: E731
method, level = _validate_errorbar_arg(f)
assert method is f
assert level is None
bad_args = [
("sem", ValueError),
(("std", 2), ValueError),
(("pi", 5, 95), ValueError),
(95, TypeError),
(("ci", "large"), TypeError),
]
for arg, exception in bad_args:
with pytest.raises(exception, match="`errorbar` must be"):
_validate_errorbar_arg(arg)
| TestEstimateAggregator |
python | scrapy__scrapy | scrapy/spidermiddlewares/referer.py | {
"start": 3794,
"end": 4746
} | class ____(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-no-referrer-when-downgrade
The "no-referrer-when-downgrade" policy sends a full URL along with requests
from a TLS-protected environment settings object to a potentially trustworthy URL,
and requests from clients which are not TLS-protected to any origin.
Requests from TLS-protected clients to non-potentially trustworthy URLs,
on the other hand, will contain no referrer information.
A Referer HTTP header will not be sent.
This is a user agent's default behavior, if no policy is otherwise specified.
"""
name: str = POLICY_NO_REFERRER_WHEN_DOWNGRADE
def referrer(self, response_url: str, request_url: str) -> str | None:
if not self.tls_protected(response_url) or self.tls_protected(request_url):
return self.stripped_referrer(response_url)
return None
| NoReferrerWhenDowngradePolicy |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/internal_init.py | {
"start": 25,
"end": 790
} | class ____:
"""Marker interface which indicates that this class has an dagster_internal_init method. All classes with this interface
are unit tested to ensure that the signature of their dagster_internal_init method matches the signature of their
__init__ method, and that dagster_internal_init has no defaults.
"""
@staticmethod
def dagster_internal_init(*args: Any, **kwargs: Any) -> object:
"""This method is called by the __init__ method of subclasses of IHasInternalInit. It is not intended to be called
directly.
"""
raise NotImplementedError(
"This method is called by the __init__ method of subclasses of IHasInternalInit. It is not intended to be called directly."
)
| IHasInternalInit |
python | redis__redis-py | redis/commands/json/path.py | {
"start": 0,
"end": 393
} | class ____:
"""This class represents a path in a JSON value."""
strPath = ""
@staticmethod
def root_path():
"""Return the root path's string representation."""
return "."
def __init__(self, path):
"""Make a new path based on the string representation in `path`."""
self.strPath = path
def __repr__(self):
return self.strPath
| Path |
python | scikit-learn__scikit-learn | sklearn/ensemble/_hist_gradient_boosting/binning.py | {
"start": 3082,
"end": 14091
} | class ____(TransformerMixin, BaseEstimator):
"""Transformer that maps a dataset into integer-valued bins.
For continuous features, the bins are created in a feature-wise fashion,
using quantiles so that each bins contains approximately the same number
of samples. For large datasets, quantiles are computed on a subset of the
data to speed-up the binning, but the quantiles should remain stable.
For categorical features, the raw categorical values are expected to be
in [0, 254] (this is not validated here though) and each category
corresponds to a bin. All categorical values must be known at
initialization: transform() doesn't know how to bin unknown categorical
values. Note that transform() is only used on non-training data in the
case of early stopping.
Features with a small number of values may be binned into less than
``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved
for missing values.
Parameters
----------
n_bins : int, default=256
The maximum number of bins to use (including the bin for missing
values). Should be in [3, 256]. Non-missing values are binned on
``max_bins = n_bins - 1`` bins. The last bin is always reserved for
missing values. If for a given feature the number of unique values is
less than ``max_bins``, then those unique values will be used to
compute the bin thresholds, instead of the quantiles. For categorical
features indicated by ``is_categorical``, the docstring for
``is_categorical`` details on this procedure.
subsample : int or None, default=2e5
If ``n_samples > subsample``, then ``sub_samples`` samples will be
randomly chosen to compute the quantiles. If ``None``, the whole data
is used.
is_categorical : ndarray of bool of shape (n_features,), default=None
Indicates categorical features. By default, all features are
considered continuous.
known_categories : list of {ndarray, None} of shape (n_features,), \
default=none
For each categorical feature, the array indicates the set of unique
categorical values. These should be the possible values over all the
data, not just the training data. For continuous features, the
corresponding entry should be None.
random_state: int, RandomState instance or None, default=None
Pseudo-random number generator to control the random sub-sampling.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Attributes
----------
bin_thresholds_ : list of ndarray
For each feature, each array indicates how to map a feature into a
binned feature. The semantic and size depends on the nature of the
feature:
- for real-valued features, the array corresponds to the real-valued
bin thresholds (the upper bound of each bin). There are ``max_bins
- 1`` thresholds, where ``max_bins = n_bins - 1`` is the number of
bins used for non-missing values.
- for categorical features, the array is a map from a binned category
value to the raw category value. The size of the array is equal to
``min(max_bins, category_cardinality)`` where we ignore missing
values in the cardinality.
n_bins_non_missing_ : ndarray, dtype=np.uint32
For each feature, gives the number of bins actually used for
non-missing values. For features with a lot of unique values, this is
equal to ``n_bins - 1``.
is_categorical_ : ndarray of shape (n_features,), dtype=np.uint8
Indicator for categorical features.
missing_values_bin_idx_ : np.uint8
The index of the bin where missing values are mapped. This is a
constant across all features. This corresponds to the last bin, and
it is always equal to ``n_bins - 1``. Note that if ``n_bins_non_missing_``
is less than ``n_bins - 1`` for a given feature, then there are
empty (and unused) bins.
"""
def __init__(
self,
n_bins=256,
subsample=int(2e5),
is_categorical=None,
known_categories=None,
random_state=None,
n_threads=None,
):
self.n_bins = n_bins
self.subsample = subsample
self.is_categorical = is_categorical
self.known_categories = known_categories
self.random_state = random_state
self.n_threads = n_threads
def fit(self, X, y=None):
"""Fit data X by computing the binning thresholds.
The last bin is reserved for missing values, whether missing values
are present in the data or not.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to bin.
y: None
Ignored.
Returns
-------
self : object
"""
if not (3 <= self.n_bins <= 256):
# min is 3: at least 2 distinct bins and a missing values bin
raise ValueError(
"n_bins={} should be no smaller than 3 and no larger than 256.".format(
self.n_bins
)
)
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
max_bins = self.n_bins - 1
rng = check_random_state(self.random_state)
if self.subsample is not None and X.shape[0] > self.subsample:
subset = rng.choice(X.shape[0], self.subsample, replace=False)
X = X.take(subset, axis=0)
if self.is_categorical is None:
self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8)
else:
self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8)
n_features = X.shape[1]
known_categories = self.known_categories
if known_categories is None:
known_categories = [None] * n_features
# validate is_categorical and known_categories parameters
for f_idx in range(n_features):
is_categorical = self.is_categorical_[f_idx]
known_cats = known_categories[f_idx]
if is_categorical and known_cats is None:
raise ValueError(
f"Known categories for feature {f_idx} must be provided."
)
if not is_categorical and known_cats is not None:
raise ValueError(
f"Feature {f_idx} isn't marked as a categorical feature, "
"but categories were passed."
)
self.missing_values_bin_idx_ = self.n_bins - 1
self.bin_thresholds_ = [None] * n_features
n_bins_non_missing = [None] * n_features
non_cat_thresholds = Parallel(n_jobs=self.n_threads, backend="threading")(
delayed(_find_binning_thresholds)(X[:, f_idx], max_bins)
for f_idx in range(n_features)
if not self.is_categorical_[f_idx]
)
non_cat_idx = 0
for f_idx in range(n_features):
if self.is_categorical_[f_idx]:
# Since categories are assumed to be encoded in
# [0, n_cats] and since n_cats <= max_bins,
# the thresholds *are* the unique categorical values. This will
# lead to the correct mapping in transform()
thresholds = known_categories[f_idx]
n_bins_non_missing[f_idx] = thresholds.shape[0]
self.bin_thresholds_[f_idx] = thresholds
else:
self.bin_thresholds_[f_idx] = non_cat_thresholds[non_cat_idx]
n_bins_non_missing[f_idx] = self.bin_thresholds_[f_idx].shape[0] + 1
non_cat_idx += 1
self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32)
return self
def transform(self, X):
"""Bin data X.
Missing values will be mapped to the last bin.
For categorical features, the mapping will be incorrect for unknown
categories. Since the BinMapper is given known_categories of the
entire training data (i.e. before the call to train_test_split() in
case of early-stopping), this never happens.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to bin.
Returns
-------
X_binned : array-like of shape (n_samples, n_features)
The binned data (fortran-aligned).
"""
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
check_is_fitted(self)
if X.shape[1] != self.n_bins_non_missing_.shape[0]:
raise ValueError(
"This estimator was fitted with {} features but {} got passed "
"to transform()".format(self.n_bins_non_missing_.shape[0], X.shape[1])
)
n_threads = _openmp_effective_n_threads(self.n_threads)
binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F")
_map_to_bins(
X,
self.bin_thresholds_,
self.is_categorical_,
self.missing_values_bin_idx_,
n_threads,
binned,
)
return binned
def make_known_categories_bitsets(self):
"""Create bitsets of known categories.
Returns
-------
- known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
- f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
"""
categorical_features_indices = np.flatnonzero(self.is_categorical_)
n_features = self.is_categorical_.size
n_categorical_features = categorical_features_indices.size
f_idx_map = np.zeros(n_features, dtype=np.uint32)
f_idx_map[categorical_features_indices] = np.arange(
n_categorical_features, dtype=np.uint32
)
known_categories = self.bin_thresholds_
known_cat_bitsets = np.zeros(
(n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE
)
# TODO: complexity is O(n_categorical_features * 255). Maybe this is
# worth cythonizing
for mapped_f_idx, f_idx in enumerate(categorical_features_indices):
for raw_cat_val in known_categories[f_idx]:
set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val)
return known_cat_bitsets, f_idx_map
| _BinMapper |
python | jina-ai__jina | jina/jaml/__init__.py | {
"start": 1584,
"end": 1752
} | class ____(string.Template):
delimiter = '$$' # variables that should be substituted with values from the context are internally denoted with '$$'
| ContextVarTemplate |
python | PrefectHQ__prefect | src/prefect/blocks/webhook.py | {
"start": 543,
"end": 2907
} | class ____(Block):
"""
Block that enables calling webhooks.
"""
_block_type_name = "Webhook"
_logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/c7247cb359eb6cf276734d4b1fbf00fb8930e89e-250x250.png" # type: ignore
_documentation_url = HttpUrl(
"https://docs.prefect.io/latest/automate/events/webhook-triggers"
)
method: Literal["GET", "POST", "PUT", "PATCH", "DELETE"] = Field(
default="POST", description="The webhook request method. Defaults to `POST`."
)
url: SecretStr = Field(
default=...,
title="Webhook URL",
description="The webhook URL.",
examples=["https://hooks.slack.com/XXX"],
)
headers: SecretDict = Field(
default_factory=lambda: SecretDict(dict()),
title="Webhook Headers",
description="A dictionary of headers to send with the webhook request.",
)
allow_private_urls: bool = Field(
default=True,
description="Whether to allow notifications to private URLs. Defaults to True.",
)
verify: bool = Field(
default=True,
description="Whether or not to enforce a secure connection to the webhook.",
)
def block_initialization(self) -> None:
if self.verify:
self._client = AsyncClient(transport=_http_transport)
else:
self._client = AsyncClient(transport=_insecure_http_transport)
async def call(self, payload: dict[str, Any] | str | None = None) -> Response:
"""
Call the webhook.
Args:
payload: an optional payload to send when calling the webhook.
"""
if not self.allow_private_urls:
validate_restricted_url(self.url.get_secret_value())
async with self._client:
if isinstance(payload, str):
return await self._client.request(
method=self.method,
url=self.url.get_secret_value(),
headers=self.headers.get_secret_value(),
content=payload,
)
else:
return await self._client.request(
method=self.method,
url=self.url.get_secret_value(),
headers=self.headers.get_secret_value(),
json=payload,
)
| Webhook |
python | pypa__pip | src/pip/_vendor/resolvelib/structs.py | {
"start": 1021,
"end": 3243
} | class ____(Generic[KT]):
"""A graph structure with directed edges."""
def __init__(self) -> None:
self._vertices: set[KT] = set()
self._forwards: dict[KT, set[KT]] = {} # <key> -> Set[<key>]
self._backwards: dict[KT, set[KT]] = {} # <key> -> Set[<key>]
def __iter__(self) -> Iterator[KT]:
return iter(self._vertices)
def __len__(self) -> int:
return len(self._vertices)
def __contains__(self, key: KT) -> bool:
return key in self._vertices
def copy(self) -> DirectedGraph[KT]:
"""Return a shallow copy of this graph."""
other = type(self)()
other._vertices = set(self._vertices)
other._forwards = {k: set(v) for k, v in self._forwards.items()}
other._backwards = {k: set(v) for k, v in self._backwards.items()}
return other
def add(self, key: KT) -> None:
"""Add a new vertex to the graph."""
if key in self._vertices:
raise ValueError("vertex exists")
self._vertices.add(key)
self._forwards[key] = set()
self._backwards[key] = set()
def remove(self, key: KT) -> None:
"""Remove a vertex from the graph, disconnecting all edges from/to it."""
self._vertices.remove(key)
for f in self._forwards.pop(key):
self._backwards[f].remove(key)
for t in self._backwards.pop(key):
self._forwards[t].remove(key)
def connected(self, f: KT, t: KT) -> bool:
return f in self._backwards[t] and t in self._forwards[f]
def connect(self, f: KT, t: KT) -> None:
"""Connect two existing vertices.
Nothing happens if the vertices are already connected.
"""
if t not in self._vertices:
raise KeyError(t)
self._forwards[f].add(t)
self._backwards[t].add(f)
def iter_edges(self) -> Iterator[tuple[KT, KT]]:
for f, children in self._forwards.items():
for t in children:
yield f, t
def iter_children(self, key: KT) -> Iterator[KT]:
return iter(self._forwards[key])
def iter_parents(self, key: KT) -> Iterator[KT]:
return iter(self._backwards[key])
| DirectedGraph |
python | django__django | tests/model_fields/test_jsonfield.py | {
"start": 11504,
"end": 45841
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.primitives = [True, False, "yes", 7, 9.6]
values = [
None,
[],
{},
{"a": "b", "c": 14},
{
"a": "b",
"c": 14,
"d": ["e", {"f": "g"}],
"h": True,
"i": False,
"j": None,
"k": {"l": "m"},
"n": [None, True, False],
"o": '"quoted"',
"p": 4.2,
"r": {"s": True, "t": False},
},
[1, [2]],
{"k": True, "l": False, "foo": "bax"},
{
"foo": "bar",
"baz": {"a": "b", "c": "d"},
"bar": ["foo", "bar"],
"bax": {"foo": "bar"},
},
]
cls.objs = [NullableJSONModel.objects.create(value=value) for value in values]
if connection.features.supports_primitives_in_json_field:
cls.objs.extend(
[
NullableJSONModel.objects.create(value=value)
for value in cls.primitives
]
)
cls.raw_sql = "%s::jsonb" if connection.vendor == "postgresql" else "%s"
def test_exact(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={}),
[self.objs[2]],
)
def test_exact_complex(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__exact={"a": "b", "c": 14}),
[self.objs[3]],
)
def test_icontains(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__icontains="BaX"),
self.objs[6:8],
)
def test_isnull(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__isnull=True),
[self.objs[0]],
)
def test_ordering_by_transform(self):
mariadb = connection.vendor == "mysql" and connection.mysql_is_mariadb
values = [
{"ord": 93, "name": "bar"},
{"ord": 22.1, "name": "foo"},
{"ord": -1, "name": "baz"},
{"ord": 21.931902, "name": "spam"},
{"ord": -100291029, "name": "eggs"},
]
for field_name in ["value", "value_custom"]:
with self.subTest(field=field_name):
objs = [
NullableJSONModel.objects.create(**{field_name: value})
for value in values
]
query = NullableJSONModel.objects.filter(
**{"%s__name__isnull" % field_name: False},
).order_by("%s__ord" % field_name)
expected = [objs[4], objs[2], objs[3], objs[1], objs[0]]
if mariadb or connection.vendor == "oracle":
# MariaDB and Oracle return JSON values as strings.
expected = [objs[2], objs[4], objs[3], objs[1], objs[0]]
self.assertSequenceEqual(query, expected)
def test_ordering_grouping_by_key_transform(self):
base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False)
for qs in (
base_qs.order_by("value__d__0"),
base_qs.annotate(
key=KeyTransform("0", KeyTransform("d", "value"))
).order_by("key"),
):
self.assertSequenceEqual(qs, [self.objs[4]])
none_val = "" if connection.features.interprets_empty_strings_as_nulls else None
qs = NullableJSONModel.objects.filter(value__isnull=False)
self.assertQuerySetEqual(
qs.filter(value__isnull=False)
.annotate(key=KT("value__d__1__f"))
.values("key")
.annotate(count=Count("key"))
.order_by("count"),
[(none_val, 0), ("g", 1)],
operator.itemgetter("key", "count"),
)
def test_ordering_grouping_by_count(self):
qs = (
NullableJSONModel.objects.filter(
value__isnull=False,
)
.values("value__d__0")
.annotate(count=Count("value__d__0"))
.order_by("count")
)
self.assertQuerySetEqual(qs, [0, 1], operator.itemgetter("count"))
def test_order_grouping_custom_decoder(self):
NullableJSONModel.objects.create(value_custom={"a": "b"})
qs = NullableJSONModel.objects.filter(value_custom__isnull=False)
self.assertSequenceEqual(
qs.values(
"value_custom__a",
)
.annotate(
count=Count("id"),
)
.order_by("value_custom__a"),
[{"value_custom__a": "b", "count": 1}],
)
def test_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": "bar"}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__foo=KeyTransform("x", expr)),
[self.objs[7]],
)
def test_nested_key_transform_raw_expression(self):
expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}'])
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
value__foo=KeyTransform("y", KeyTransform("x", expr))
),
[self.objs[7]],
)
def test_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("0", "key"),
expr=KeyTransform("0", Cast("key", models.JSONField())),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(value={"d": ["e", "e"]})
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__0"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__1")),
[obj],
)
def test_nested_key_transform_expression(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=KeyTransform("d", "value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
expr=KeyTransform(
"f", KeyTransform("1", Cast("key", models.JSONField()))
),
)
.filter(chain=F("expr")),
[self.objs[4]],
)
def test_nested_key_transform_annotation_expression(self):
obj = NullableJSONModel.objects.create(
value={"d": ["e", {"f": "g"}, {"f": "g"}]},
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
key=F("value__d"),
chain=F("key__1__f"),
expr=Cast("key", models.JSONField()),
)
.filter(chain=F("expr__2__f")),
[obj],
)
def test_nested_key_transform_on_subquery(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__0__isnull=False)
.annotate(
subquery_value=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
key=KeyTransform("d", "subquery_value"),
chain=KeyTransform("f", KeyTransform("1", "key")),
)
.filter(chain="g"),
[self.objs[4]],
)
def test_key_text_transform_char_lookup(self):
qs = NullableJSONModel.objects.annotate(
char_value=KeyTextTransform("foo", "value"),
).filter(char_value__startswith="bar")
self.assertSequenceEqual(qs, [self.objs[7]])
qs = NullableJSONModel.objects.annotate(
char_value=KeyTextTransform(1, KeyTextTransform("bar", "value")),
).filter(char_value__startswith="bar")
self.assertSequenceEqual(qs, [self.objs[7]])
def test_expression_wrapper_key_transform(self):
self.assertCountEqual(
NullableJSONModel.objects.annotate(
expr=ExpressionWrapper(
KeyTransform("c", "value"),
output_field=IntegerField(),
),
).filter(expr__isnull=False),
self.objs[3:5],
)
def test_has_key(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__has_key="a"),
[self.objs[3], self.objs[4]],
)
def test_has_key_null_value(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_key="j"),
[self.objs[4]],
)
def test_has_key_deep(self):
tests = [
(Q(value__baz__has_key="a"), self.objs[7]),
(
Q(value__has_key=KeyTransform("a", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__a")), self.objs[7]),
(
Q(value__has_key=KeyTransform("c", KeyTransform("baz", "value"))),
self.objs[7],
),
(Q(value__has_key=F("value__baz__c")), self.objs[7]),
(Q(value__d__1__has_key="f"), self.objs[4]),
(
Q(
value__has_key=KeyTransform(
"f", KeyTransform("1", KeyTransform("d", "value"))
)
),
self.objs[4],
),
(Q(value__has_key=F("value__d__1__f")), self.objs[4]),
]
for condition, expected in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[expected],
)
def test_has_key_literal_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(
HasKey(Value({"foo": "bar"}, JSONField()), "foo")
).order_by("id"),
self.objs,
)
def test_has_key_list(self):
obj = NullableJSONModel.objects.create(value=[{"a": 1}, {"b": "x"}])
tests = [
Q(value__1__has_key="b"),
Q(value__has_key=KeyTransform("b", KeyTransform(1, "value"))),
Q(value__has_key=KeyTransform("b", KeyTransform("1", "value"))),
Q(value__has_key=F("value__1__b")),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
def test_has_keys(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=["a", "c", "h"]),
[self.objs[4]],
)
def test_has_any_keys(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__has_any_keys=["c", "l"]),
[self.objs[3], self.objs[4], self.objs[6]],
)
def test_has_key_number(self):
obj = NullableJSONModel.objects.create(
value={
"123": "value",
"nested": {"456": "bar", "lorem": "abc", "999": True},
"array": [{"789": "baz", "777": "def", "ipsum": 200}],
"000": "val",
}
)
tests = [
Q(value__has_key="123"),
Q(value__nested__has_key="456"),
Q(value__array__0__has_key="789"),
Q(value__has_keys=["nested", "123", "array", "000"]),
Q(value__nested__has_keys=["lorem", "999", "456"]),
Q(value__array__0__has_keys=["789", "ipsum", "777"]),
Q(value__has_any_keys=["000", "nonexistent"]),
Q(value__nested__has_any_keys=["999", "nonexistent"]),
Q(value__array__0__has_any_keys=["777", "nonexistent"]),
]
for condition in tests:
with self.subTest(condition=condition):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition),
[obj],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains(self):
tests = [
({}, self.objs[2:5] + self.objs[6:8]),
({"baz": {"a": "b", "c": "d"}}, [self.objs[7]]),
({"baz": {"a": "b"}}, [self.objs[7]]),
({"baz": {"c": "d"}}, [self.objs[7]]),
({"k": True, "l": False}, [self.objs[6]]),
({"d": ["e", {"f": "g"}]}, [self.objs[4]]),
({"d": ["e"]}, [self.objs[4]]),
({"d": [{"f": "g"}]}, [self.objs[4]]),
([1, [2]], [self.objs[5]]),
([1], [self.objs[5]]),
([[2]], [self.objs[5]]),
({"n": [None, True, False]}, [self.objs[4]]),
({"j": None}, [self.objs[4]]),
]
for value, expected in tests:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertCountEqual(qs, expected)
@skipIfDBFeature("supports_json_field_contains")
def test_contains_unsupported(self):
msg = "contains lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(
value__contains={"baz": {"a": "b", "c": "d"}},
).get()
@skipUnlessDBFeature(
"supports_primitives_in_json_field",
"supports_json_field_contains",
)
def test_contains_primitives(self):
for value in self.primitives:
with self.subTest(value=value):
qs = NullableJSONModel.objects.filter(value__contains=value)
self.assertIs(qs.exists(), True)
@skipUnlessDBFeature("supports_json_field_contains")
def test_contained_by(self):
qs = NullableJSONModel.objects.filter(
value__contained_by={"a": "b", "c": 14, "h": True}
)
self.assertCountEqual(qs, self.objs[2:4])
@skipIfDBFeature("supports_json_field_contains")
def test_contained_by_unsupported(self):
msg = "contained_by lookup is not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(value__contained_by={"a": "b"}).get()
def test_deep_values(self):
qs = NullableJSONModel.objects.values_list("value__k__l").order_by("pk")
expected_objs = [(None,)] * len(self.objs)
expected_objs[4] = ("m",)
self.assertSequenceEqual(qs, expected_objs)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_deep_distinct(self):
query = NullableJSONModel.objects.distinct("value__k__l").values_list(
"value__k__l"
)
expected = [("m",), (None,)]
if not connection.features.nulls_order_largest:
expected.reverse()
self.assertSequenceEqual(query, expected)
def test_isnull_key(self):
# key__isnull=False works the same as has_key='key'.
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a__isnull=True),
self.objs[:3] + self.objs[5:],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__j__isnull=True),
self.objs[:4] + self.objs[5:],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a__isnull=False),
[self.objs[3], self.objs[4]],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j__isnull=False),
[self.objs[4]],
)
def test_isnull_key_or_none(self):
obj = NullableJSONModel.objects.create(value={"a": None})
self.assertCountEqual(
NullableJSONModel.objects.filter(
Q(value__a__isnull=True) | Q(value__a=None)
),
self.objs[:3] + self.objs[5:] + [obj],
)
def test_none_key(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__j=None),
[self.objs[4]],
)
def test_none_key_exclude(self):
obj = NullableJSONModel.objects.create(value={"j": 1})
if connection.vendor == "oracle":
# Oracle supports filtering JSON objects with NULL keys, but the
# current implementation doesn't support it.
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None),
self.objs[1:4] + self.objs[5:] + [obj],
)
else:
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(value__j=None), [obj]
)
def test_shallow_list_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__0=1),
[self.objs[5]],
)
@skipIfDBFeature("supports_json_negative_indexing")
def test_unsupported_negative_lookup(self):
msg = (
"Using negative JSON array indices is not supported on this database "
"backend."
)
with self.assertRaisesMessage(NotSupportedError, msg):
NullableJSONModel.objects.filter(**{"value__-2": 1}).get()
@skipUnlessDBFeature("supports_json_negative_indexing")
def test_shallow_list_negative_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(**{"value__-2": 1}), [self.objs[5]]
)
def test_shallow_obj_lookup(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__a="b"),
[self.objs[3], self.objs[4]],
)
def test_obj_subquery_lookup(self):
qs = NullableJSONModel.objects.annotate(
field=Subquery(
NullableJSONModel.objects.filter(pk=OuterRef("pk")).values("value")
),
).filter(field__a="b")
self.assertCountEqual(qs, [self.objs[3], self.objs[4]])
def test_deep_lookup_objs(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k__l="m"),
[self.objs[4]],
)
def test_shallow_lookup_obj_target(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__k={"l": "m"}),
[self.objs[4]],
)
def test_deep_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__1__0=2),
[self.objs[5]],
)
@skipUnlessDBFeature("supports_json_negative_indexing")
def test_deep_negative_lookup_array(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(**{"value__-1__0": 2}),
[self.objs[5]],
)
def test_deep_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__d__1__f="g"),
[self.objs[4]],
)
@skipUnlessDBFeature("supports_json_negative_indexing")
def test_deep_negative_lookup_mixed(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(**{"value__d__-1__f": "g"}),
[self.objs[4]],
)
def test_deep_lookup_transform(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(value__c__gt=2),
[self.objs[3], self.objs[4]],
)
self.assertCountEqual(
NullableJSONModel.objects.filter(value__c__gt=2.33),
[self.objs[3], self.objs[4]],
)
self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False)
def test_lookups_special_chars(self):
test_keys = [
"CONTROL",
"single'",
"dollar$",
"dot.dot",
"with space",
"back\\slash",
"question?mark",
"user@name",
"emo🤡'ji",
"com,ma",
"curly{{{brace}}}s",
"escape\uffff'seq'\uffffue\uffff'nce",
]
json_value = {key: "some value" for key in test_keys}
obj = NullableJSONModel.objects.create(value=json_value)
obj.refresh_from_db()
self.assertEqual(obj.value, json_value)
for key in test_keys:
lookups = {
"has_key": Q(value__has_key=key),
"has_keys": Q(value__has_keys=[key, "CONTROL"]),
"has_any_keys": Q(value__has_any_keys=[key, "does_not_exist"]),
"exact": Q(**{f"value__{key}": "some value"}),
}
for lookup, condition in lookups.items():
results = NullableJSONModel.objects.filter(condition)
with self.subTest(key=key, lookup=lookup):
self.assertSequenceEqual(results, [obj])
def test_lookups_special_chars_double_quotes(self):
test_keys = [
'double"',
"m\\i@x. m🤡'a,t{{{ch}}}e?d$\"'es\uffff'ca\uffff'pe",
]
json_value = {key: "some value" for key in test_keys}
obj = NullableJSONModel.objects.create(value=json_value)
obj.refresh_from_db()
self.assertEqual(obj.value, json_value)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__has_keys=test_keys), [obj]
)
for key in test_keys:
with self.subTest(key=key):
results = NullableJSONModel.objects.filter(
Q(value__has_key=key),
Q(value__has_any_keys=[key, "does_not_exist"]),
Q(**{f"value__{key}": "some value"}),
)
self.assertSequenceEqual(results, [obj])
def test_lookup_exclude(self):
tests = [
(Q(value__a="b"), [self.objs[0]]),
(Q(value__foo="bax"), [self.objs[0], self.objs[7]]),
]
for condition, expected in tests:
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition),
expected,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(~condition),
expected,
)
def test_lookup_exclude_nonexistent_key(self):
# Values without the key are ignored.
condition = Q(value__foo="bax")
objs_with_value = [self.objs[6]]
objs_with_different_value = [self.objs[0], self.objs[7]]
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition),
objs_with_different_value,
)
self.assertSequenceEqual(
NullableJSONModel.objects.exclude(~condition),
objs_with_value,
)
self.assertCountEqual(
NullableJSONModel.objects.filter(condition | ~condition),
objs_with_value + objs_with_different_value,
)
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & ~condition),
objs_with_value + objs_with_different_value,
)
# Add the __isnull lookup to get an exhaustive set.
self.assertCountEqual(
NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)),
self.objs[0:6] + self.objs[7:],
)
self.assertSequenceEqual(
NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)),
objs_with_value,
)
def test_usage_in_subquery(self):
self.assertCountEqual(
NullableJSONModel.objects.filter(
id__in=NullableJSONModel.objects.filter(value__c=14),
),
self.objs[3:5],
)
@skipUnlessDBFeature("supports_json_field_contains")
def test_array_key_contains(self):
tests = [
([], [self.objs[7]]),
("bar", [self.objs[7]]),
(["bar"], [self.objs[7]]),
("ar", []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__bar__contains=value),
expected,
)
def test_key_iexact(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact="BaR").exists(), True
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False
)
def test_key_in(self):
tests = [
("value__c__in", [14], self.objs[3:5]),
("value__c__in", [14, 15], self.objs[3:5]),
("value__0__in", [1], [self.objs[5]]),
("value__0__in", [1, 3], [self.objs[5]]),
("value__foo__in", ["bar"], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value"))],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo")], [self.objs[7]]),
(
"value__foo__in",
[KeyTransform("foo", KeyTransform("bax", "value")), "baz"],
[self.objs[7]],
),
("value__foo__in", [F("value__bax__foo"), "baz"], [self.objs[7]]),
("value__foo__in", ["bar", "baz"], [self.objs[7]]),
("value__bar__in", [["foo", "bar"]], [self.objs[7]]),
("value__bar__in", [["foo", "bar"], ["a"]], [self.objs[7]]),
("value__bax__in", [{"foo": "bar"}, {"a": "b"}], [self.objs[7]]),
("value__h__in", [True, "foo"], [self.objs[4]]),
("value__i__in", [False, "foo"], [self.objs[4]]),
]
for lookup, value, expected in tests:
with self.subTest(lookup=lookup, value=value), transaction.atomic():
self.assertCountEqual(
NullableJSONModel.objects.filter(**{lookup: value}),
expected,
)
def test_key_values(self):
qs = NullableJSONModel.objects.filter(value__h=True)
tests = [
("value__a", "b"),
("value__c", 14),
("value__d", ["e", {"f": "g"}]),
("value__h", True),
("value__i", False),
("value__j", None),
("value__k", {"l": "m"}),
("value__n", [None, True, False]),
("value__p", 4.2),
("value__r", {"s": True, "t": False}),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertEqual(qs.values_list(lookup, flat=True).get(), expected)
def test_key_values_boolean(self):
qs = NullableJSONModel.objects.filter(value__h=True, value__i=False)
tests = [
("value__h", True),
("value__i", False),
]
for lookup, expected in tests:
with self.subTest(lookup=lookup):
self.assertIs(qs.values_list(lookup, flat=True).get(), expected)
@skipUnlessDBFeature("supports_json_field_contains")
def test_key_contains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="ar").exists(), False
)
self.assertIs(
NullableJSONModel.objects.filter(value__foo__contains="bar").exists(), True
)
def test_key_icontains(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__icontains="Ar").exists(), True
)
def test_key_startswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__startswith="b").exists(), True
)
def test_key_istartswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__istartswith="B").exists(), True
)
def test_key_endswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__endswith="r").exists(), True
)
def test_key_iendswith(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iendswith="R").exists(), True
)
def test_key_regex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__regex=r"^bar$").exists(), True
)
def test_key_iregex(self):
self.assertIs(
NullableJSONModel.objects.filter(value__foo__iregex=r"^bAr$").exists(), True
)
def test_key_quoted_string(self):
self.assertEqual(
NullableJSONModel.objects.filter(value__o='"quoted"').get(),
self.objs[4],
)
@skipUnlessDBFeature("has_json_operators")
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertIs(
NullableJSONModel.objects.filter(
**{
"""value__test' = '"a"') OR 1 = 1 OR ('d""": "x",
}
).exists(),
False,
)
self.assertIn(
"""."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"'""",
queries[0]["sql"],
)
@skipIfDBFeature("has_json_operators")
def test_key_sql_injection_escape(self):
query = str(
JSONModel.objects.filter(
**{
"""value__test") = '"a"' OR 1 = 1 OR ("d""": "x",
}
).query
)
self.assertIn('"test\\"', query)
self.assertIn('\\"d', query)
def test_key_escape(self):
obj = NullableJSONModel.objects.create(value={"%total": 10})
self.assertEqual(
NullableJSONModel.objects.filter(**{"value__%total": 10}).get(), obj
)
def test_none_key_and_exact_lookup(self):
self.assertSequenceEqual(
NullableJSONModel.objects.filter(value__a="b", value__j=None),
[self.objs[4]],
)
def test_lookups_with_key_transform(self):
tests = (
("value__baz__has_key", "c"),
("value__baz__has_keys", ["a", "c"]),
("value__baz__has_any_keys", ["a", "x"]),
("value__has_key", KeyTextTransform("foo", "value")),
)
for lookup, value in tests:
with self.subTest(lookup=lookup):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
def test_cast_with_key_text_transform(self):
obj = NullableJSONModel.objects.annotate(
json_data=Cast(Value({"foo": "bar"}, JSONField()), JSONField())
).get(pk=self.objs[0].pk, json_data__foo__icontains="bar")
self.assertEqual(obj, self.objs[0])
@skipUnlessDBFeature("supports_json_field_contains")
def test_contains_contained_by_with_key_transform(self):
tests = [
("value__d__contains", "e"),
("value__d__contains", [{"f": "g"}]),
("value__contains", KeyTransform("bax", "value")),
("value__contains", F("value__bax")),
("value__baz__contains", {"a": "b"}),
("value__baz__contained_by", {"a": "b", "c": "d", "e": "f"}),
(
"value__contained_by",
KeyTransform(
"x",
RawSQL(
self.raw_sql,
['{"x": {"a": "b", "c": 1, "d": "e"}}'],
),
),
),
]
# For databases where {'f': 'g'} (without surrounding []) matches
# [{'f': 'g'}].
if not connection.features.json_key_contains_list_matching_requires_list:
tests.append(("value__d__contains", {"f": "g"}))
for lookup, value in tests:
with self.subTest(lookup=lookup, value=value):
self.assertIs(
NullableJSONModel.objects.filter(
**{lookup: value},
).exists(),
True,
)
def test_join_key_transform_annotation_expression(self):
related_obj = RelatedJSONModel.objects.create(
value={"d": ["f", "e"]},
json_model=self.objs[4],
)
RelatedJSONModel.objects.create(
value={"d": ["e", "f"]},
json_model=self.objs[4],
)
self.assertSequenceEqual(
RelatedJSONModel.objects.annotate(
key=F("value__d"),
related_key=F("json_model__value__d"),
chain=F("key__1"),
expr=Cast("key", models.JSONField()),
).filter(chain=F("related_key__0")),
[related_obj],
)
def test_key_text_transform_from_lookup(self):
qs = NullableJSONModel.objects.annotate(b=KT("value__bax__foo")).filter(
b__contains="ar",
)
self.assertSequenceEqual(qs, [self.objs[7]])
qs = NullableJSONModel.objects.annotate(c=KT("value__o")).filter(
c__contains="uot",
)
self.assertSequenceEqual(qs, [self.objs[4]])
def test_key_text_transform_from_lookup_invalid(self):
msg = "Lookup must contain key or index transforms."
with self.assertRaisesMessage(ValueError, msg):
KT("value")
with self.assertRaisesMessage(ValueError, msg):
KT("")
def test_literal_annotation_filtering(self):
all_objects = NullableJSONModel.objects.order_by("id")
qs = all_objects.annotate(data=Value({"foo": "bar"}, JSONField())).filter(
data__foo="bar"
)
self.assertQuerySetEqual(qs, all_objects)
@skipUnlessDBFeature("supports_primitives_in_json_field")
| TestQuerying |
python | spack__spack | lib/spack/spack/test/entry_points.py | {
"start": 764,
"end": 4154
} | class ____:
def __init__(self, tmp_path: pathlib.Path):
self.dir = tmp_path
self.name = "mypackage_extensions"
def load(self):
cmd_path = self.dir.joinpath("spack/spack-myext/myext/cmd")
cmd_path.mkdir(exist_ok=True, parents=True)
f = self.dir / "spack/spack-myext/myext/cmd/spam.py"
with open(f, "w", encoding="utf-8") as fh:
fh.write("description = 'hello world extension command'\n")
fh.write("section = 'test command'\n")
fh.write("level = 'long'\n")
fh.write("def setup_parser(subparser):\n pass\n")
fh.write("def spam(parser, args):\n print('spam for all!')\n")
def ep():
return self.dir / "spack/spack-myext"
return ep
def entry_points_factory(tmp_path: pathlib.Path):
def entry_points(group=None):
if group == "spack.config":
return (MockConfigEntryPoint(tmp_path),)
elif group == "spack.extensions":
return (MockExtensionsEntryPoint(tmp_path),)
return ()
return entry_points
@pytest.fixture()
def mock_get_entry_points(tmp_path: pathlib.Path, monkeypatch):
entry_points = entry_points_factory(tmp_path)
monkeypatch.setattr(spack.llnl.util.lang, "get_entry_points", entry_points)
def test_spack_entry_point_config(tmp_path: pathlib.Path, mock_get_entry_points):
"""Test config scope entry point"""
config_paths = dict(spack.config.config_paths_from_entry_points())
config_path = config_paths.get("plugin-mypackage_config")
my_config_path = tmp_path / "spack/etc"
if config_path is None:
raise ValueError("Did not find entry point config in %s" % str(config_paths))
else:
assert os.path.samefile(config_path, my_config_path)
config = spack.config.create()
assert config.get("config:install_tree:root", scope="plugin-mypackage_config") == "/spam/opt"
def test_spack_entry_point_extension(tmp_path: pathlib.Path, mock_get_entry_points):
"""Test config scope entry point"""
my_ext = tmp_path / "spack/spack-myext"
extensions = spack.extensions.get_extension_paths()
found = bool([ext for ext in extensions if os.path.samefile(ext, my_ext)])
if not found:
raise ValueError("Did not find extension in %s" % ", ".join(extensions))
extensions = spack.extensions.extension_paths_from_entry_points()
found = bool([ext for ext in extensions if os.path.samefile(ext, my_ext)])
if not found:
raise ValueError("Did not find extension in %s" % ", ".join(extensions))
root = spack.extensions.load_extension("myext")
assert os.path.samefile(root, my_ext)
module = spack.extensions.get_module("spam")
assert module is not None
@pytest.mark.skipif(sys.version_info[:2] < (3, 8), reason="Python>=3.8 required")
def test_llnl_util_lang_get_entry_points(tmp_path: pathlib.Path, monkeypatch):
import importlib.metadata # type: ignore # novermin
monkeypatch.setattr(importlib.metadata, "entry_points", entry_points_factory(tmp_path))
entry_points = list(spack.llnl.util.lang.get_entry_points(group="spack.config"))
assert isinstance(entry_points[0], MockConfigEntryPoint)
entry_points = list(spack.llnl.util.lang.get_entry_points(group="spack.extensions"))
assert isinstance(entry_points[0], MockExtensionsEntryPoint)
| MockExtensionsEntryPoint |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.