language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | conda__conda | conda/exceptions.py | {
"start": 14888,
"end": 15440
} | class ____(CondaError):
def __init__(self, message: str | None = None):
if message is None:
message = dals(
"""
Conda cannot proceed due to an error in your proxy configuration.
Check for typos and other configuration errors in any '.netrc' file in your home directory,
any environment variables ending in '_PROXY', and any other system-wide proxy
configuration settings.
"""
)
super().__init__(message)
| ProxyError |
python | run-llama__llama_index | llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py | {
"start": 2176,
"end": 2578
} | class ____(BaseVoiceAgentEvent):
audio: Union[bytes, str]
@model_validator(mode="after")
def validate_audio_input(self) -> Self:
try:
base64.b64decode(self.audio, validate=True)
except binascii.Error:
if isinstance(self.audio, bytes):
self.audio = base64.b64encode(self.audio).decode("utf-8")
return self
| ConversationInputEvent |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 18637,
"end": 18877
} | class ____:
def test_endpoints(self):
# Regression test for gh-13697. The following calculation
# should not generate a warning.
p = stats.arcsine.pdf([0, 1])
assert_equal(p, [np.inf, np.inf])
| TestArcsine |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/test_views.py | {
"start": 6332,
"end": 11433
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
get(EmailAddress, email=self.user.email, user=self.user, verified=True)
self.project = get(Project, users=[self.user])
self.another_user = get(User)
get(
EmailAddress,
email=self.another_user.email,
user=self.another_user,
verified=True,
)
def test_invite_by_username(self):
url = reverse("projects_users_create", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username_or_email": self.another_user.username,
},
)
self.assertEqual(resp.status_code, 302)
self.assertNotIn(self.another_user, self.project.users.all())
invitation = Invitation.objects.for_object(self.project).get()
self.assertFalse(invitation.expired)
self.assertEqual(invitation.object, self.project)
self.assertEqual(invitation.from_user, self.user)
self.assertEqual(invitation.to_user, self.another_user)
self.assertEqual(invitation.to_email, None)
def test_invite_by_email(self):
url = reverse("projects_users_create", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username_or_email": self.another_user.email,
},
)
self.assertEqual(resp.status_code, 302)
self.assertNotIn(self.another_user, self.project.users.all())
invitation = Invitation.objects.for_object(self.project).get()
self.assertFalse(invitation.expired)
self.assertEqual(invitation.object, self.project)
self.assertEqual(invitation.from_user, self.user)
self.assertEqual(invitation.to_user, self.another_user)
self.assertEqual(invitation.to_email, None)
def test_invite_existing_maintainer_by_username(self):
self.project.users.add(self.another_user)
url = reverse("projects_users_create", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username_or_email": self.another_user.username,
},
)
self.assertEqual(resp.status_code, 200)
form = resp.context_data["form"]
self.assertFalse(form.is_valid())
self.assertIn("is already a maintainer", form.errors["username_or_email"][0])
self.assertFalse(Invitation.objects.for_object(self.project).exists())
def test_invite_existing_maintainer_by_email(self):
self.project.users.add(self.another_user)
url = reverse("projects_users_create", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username_or_email": self.another_user.email,
},
)
self.assertEqual(resp.status_code, 200)
form = resp.context_data["form"]
self.assertFalse(form.is_valid())
self.assertIn("is already a maintainer", form.errors["username_or_email"][0])
self.assertFalse(Invitation.objects.for_object(self.project).exists())
def test_invite_unknown_user(self):
url = reverse("projects_users_create", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username_or_email": "foobar",
},
)
self.assertEqual(resp.status_code, 200)
form = resp.context_data["form"]
self.assertFalse(form.is_valid())
self.assertIn("does not exist", form.errors["username_or_email"][0])
self.assertNotIn(self.another_user, self.project.users.all())
self.assertFalse(Invitation.objects.for_object(self.project).exists())
def test_delete_maintainer(self):
self.project.users.add(self.another_user)
url = reverse("projects_users_delete", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username": self.user.username,
},
)
self.assertEqual(resp.status_code, 302)
self.assertNotIn(self.user, self.project.users.all())
# Ensure a message is shown
messages = list(get_messages(resp.wsgi_request))
self.assertEqual(len(messages), 1)
self.assertEqual(str(messages[0]), "User deleted")
def test_delete_last_maintainer(self):
url = reverse("projects_users_delete", args=[self.project.slug])
self.client.force_login(self.user)
resp = self.client.post(
url,
data={
"username": self.user.username,
},
)
self.assertEqual(resp.status_code, 400)
self.assertIn(self.user, self.project.users.all())
@override_settings(RTD_ALLOW_ORGANIZATIONS=False)
| TestProjectUsersViews |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/xcom.py | {
"start": 1267,
"end": 1407
} | class ____(RootModel):
"""XCom schema with minimal structure for slice-based access."""
root: list[JsonValue]
| XComSequenceSliceResponse |
python | sympy__sympy | sympy/polys/domains/mpelements.py | {
"start": 1183,
"end": 5042
} | class ____(PythonMPContext):
def __init__(ctx, prec=53, dps=None, tol=None, real=False):
ctx._prec_rounding = [prec, round_nearest]
if dps is None:
ctx._set_prec(prec)
else:
ctx._set_dps(dps)
ctx.mpf = RealElement
ctx.mpc = ComplexElement
ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding]
if real:
ctx.mpf.context = ctx
else:
ctx.mpc.context = ctx
ctx.constant = _constant
ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
ctx.constant.context = ctx
ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
ctx.trap_complex = True
ctx.pretty = True
if tol is None:
ctx.tol = ctx._make_tol()
elif tol is False:
ctx.tol = fzero
else:
ctx.tol = ctx._convert_tol(tol)
ctx.tolerance = ctx.make_mpf(ctx.tol)
if not ctx.tolerance:
ctx.max_denom = 1000000
else:
ctx.max_denom = int(1/ctx.tolerance)
ctx.zero = ctx.make_mpf(fzero)
ctx.one = ctx.make_mpf(fone)
ctx.j = ctx.make_mpc((fzero, fone))
ctx.inf = ctx.make_mpf(finf)
ctx.ninf = ctx.make_mpf(fninf)
ctx.nan = ctx.make_mpf(fnan)
def _make_tol(ctx):
hundred = (0, 25, 2, 5)
eps = (0, MPZ_ONE, 1-ctx.prec, 1)
return mpf_mul(hundred, eps)
def make_tol(ctx):
return ctx.make_mpf(ctx._make_tol())
def _convert_tol(ctx, tol):
if isinstance(tol, int_types):
return from_int(tol)
if isinstance(tol, float):
return from_float(tol)
if hasattr(tol, "_mpf_"):
return tol._mpf_
prec, rounding = ctx._prec_rounding
if isinstance(tol, str):
return from_str(tol, prec, rounding)
raise ValueError("expected a real number, got %s" % tol)
def _convert_fallback(ctx, x, strings):
raise TypeError("cannot create mpf from " + repr(x))
@property
def _repr_digits(ctx):
return repr_dps(ctx._prec)
@property
def _str_digits(ctx):
return ctx._dps
def to_rational(ctx, s, limit=True):
p, q = to_rational(s._mpf_)
# Needed for GROUND_TYPES=flint if gmpy2 is installed because mpmath's
# to_rational() function returns a gmpy2.mpz instance and if MPQ is
# flint.fmpq then MPQ(p, q) will fail.
p = int(p)
if not limit or q <= ctx.max_denom:
return p, q
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = p, q
while True:
a = n//d
q2 = q0 + a*q1
if q2 > ctx.max_denom:
break
p0, q0, p1, q1 = p1, q1, p0 + a*p1, q2
n, d = d, n - a*d
k = (ctx.max_denom - q0)//q1
number = MPQ(p, q)
bound1 = MPQ(p0 + k*p1, q0 + k*q1)
bound2 = MPQ(p1, q1)
if not bound2 or not bound1:
return p, q
elif abs(bound2 - number) <= abs(bound1 - number):
return bound2.numerator, bound2.denominator
else:
return bound1.numerator, bound1.denominator
def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
t = ctx.convert(t)
if abs_eps is None and rel_eps is None:
rel_eps = abs_eps = ctx.tolerance or ctx.make_tol()
if abs_eps is None:
abs_eps = ctx.convert(rel_eps)
elif rel_eps is None:
rel_eps = ctx.convert(abs_eps)
diff = abs(s-t)
if diff <= abs_eps:
return True
abss = abs(s)
abst = abs(t)
if abss < abst:
err = diff/abst
else:
err = diff/abss
return err <= rel_eps
| MPContext |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 94655,
"end": 95612
} | class ____(MeanMetricWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
>>> m.result().numpy()
0.12011322
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
... sample_weight=[1, 0])
>>> m.result().numpy()
0.24022643
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
```
"""
def __init__(self, name='mean_squared_logarithmic_error', dtype=None):
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name, dtype=dtype)
| MeanSquaredLogarithmicError |
python | sympy__sympy | sympy/assumptions/assume.py | {
"start": 6485,
"end": 10738
} | class ____(Boolean, metaclass=PredicateMeta):
"""
Base class for mathematical predicates. It also serves as a
constructor for undefined predicate objects.
Explanation
===========
Predicate is a function that returns a boolean value [1].
Predicate function is object, and it is instance of predicate class.
When a predicate is applied to arguments, ``AppliedPredicate``
instance is returned. This merely wraps the argument and remain
unevaluated. To obtain the truth value of applied predicate, use the
function ``ask``.
Evaluation of predicate is done by multiple dispatching. You can
register new handler to the predicate to support new types.
Every predicate in SymPy can be accessed via the property of ``Q``.
For example, ``Q.even`` returns the predicate which checks if the
argument is even number.
To define a predicate which can be evaluated, you must subclass this
class, make an instance of it, and register it to ``Q``. After then,
dispatch the handler by argument types.
If you directly construct predicate using this class, you will get
``UndefinedPredicate`` which cannot be dispatched. This is useful
when you are building boolean expressions which do not need to be
evaluated.
Examples
========
Applying and evaluating to boolean value:
>>> from sympy import Q, ask
>>> ask(Q.prime(7))
True
You can define a new predicate by subclassing and dispatching. Here,
we define a predicate for sexy primes [2] as an example.
>>> from sympy import Predicate, Integer
>>> class SexyPrimePredicate(Predicate):
... name = "sexyprime"
>>> Q.sexyprime = SexyPrimePredicate()
>>> @Q.sexyprime.register(Integer, Integer)
... def _(int1, int2, assumptions):
... args = sorted([int1, int2])
... if not all(ask(Q.prime(a), assumptions) for a in args):
... return False
... return args[1] - args[0] == 6
>>> ask(Q.sexyprime(5, 11))
True
Direct constructing returns ``UndefinedPredicate``, which can be
applied but cannot be dispatched.
>>> from sympy import Predicate, Integer
>>> Q.P = Predicate("P")
>>> type(Q.P)
<class 'sympy.assumptions.assume.UndefinedPredicate'>
>>> Q.P(1)
Q.P(1)
>>> Q.P.register(Integer)(lambda expr, assump: True)
Traceback (most recent call last):
...
TypeError: <class 'sympy.assumptions.assume.UndefinedPredicate'> cannot be dispatched.
References
==========
.. [1] https://en.wikipedia.org/wiki/Predicate_%28mathematical_logic%29
.. [2] https://en.wikipedia.org/wiki/Sexy_prime
"""
is_Atom = True
def __new__(cls, *args, **kwargs):
if cls is Predicate:
return UndefinedPredicate(*args, **kwargs)
obj = super().__new__(cls, *args)
return obj
@property
def name(self):
# May be overridden
return type(self).__name__
@classmethod
def register(cls, *types, **kwargs):
"""
Register the signature to the handler.
"""
if cls.handler is None:
raise TypeError(f"{type(cls)} cannot be dispatched.")
return cls.handler.register(*types, **kwargs)
@classmethod
def register_many(cls, *types, **kwargs):
"""
Register multiple signatures to same handler.
"""
def _(func):
for t in types:
if not is_sequence(t):
t = (t,) # for convenience, allow passing `type` to mean `(type,)`
cls.register(*t, **kwargs)(func)
return _
def __call__(self, *args):
return AppliedPredicate(self, *args)
def eval(self, args, assumptions=True):
"""
Evaluate ``self(*args)`` under the given assumptions.
This uses only direct resolution methods, not logical inference.
"""
result = None
try:
result = self.handler(*args, assumptions=assumptions)
except NotImplementedError:
pass
return result
def _eval_refine(self, assumptions):
# When Predicate is no longer Boolean, delete this method
return self
| Predicate |
python | TheAlgorithms__Python | data_structures/heap/min_heap.py | {
"start": 338,
"end": 4507
} | class ____:
"""
>>> r = Node("R", -1)
>>> b = Node("B", 6)
>>> a = Node("A", 3)
>>> x = Node("X", 1)
>>> e = Node("E", 4)
>>> print(b)
Node(B, 6)
>>> myMinHeap = MinHeap([r, b, a, x, e])
>>> myMinHeap.decrease_key(b, -17)
>>> print(b)
Node(B, -17)
>>> myMinHeap["B"]
-17
"""
def __init__(self, array):
self.idx_of_element = {}
self.heap_dict = {}
self.heap = self.build_heap(array)
def __getitem__(self, key):
return self.get_value(key)
def get_parent_idx(self, idx):
return (idx - 1) // 2
def get_left_child_idx(self, idx):
return idx * 2 + 1
def get_right_child_idx(self, idx):
return idx * 2 + 2
def get_value(self, key):
return self.heap_dict[key]
def build_heap(self, array):
last_idx = len(array) - 1
start_from = self.get_parent_idx(last_idx)
for idx, i in enumerate(array):
self.idx_of_element[i] = idx
self.heap_dict[i.name] = i.val
for i in range(start_from, -1, -1):
self.sift_down(i, array)
return array
# this is min-heapify method
def sift_down(self, idx, array):
while True:
left = self.get_left_child_idx(idx)
right = self.get_right_child_idx(idx)
smallest = idx
if left < len(array) and array[left] < array[idx]:
smallest = left
if right < len(array) and array[right] < array[smallest]:
smallest = right
if smallest != idx:
array[idx], array[smallest] = array[smallest], array[idx]
(
self.idx_of_element[array[idx]],
self.idx_of_element[array[smallest]],
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
idx = smallest
else:
break
def sift_up(self, idx):
p = self.get_parent_idx(idx)
while p >= 0 and self.heap[p] > self.heap[idx]:
self.heap[p], self.heap[idx] = self.heap[idx], self.heap[p]
self.idx_of_element[self.heap[p]], self.idx_of_element[self.heap[idx]] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
idx = p
p = self.get_parent_idx(idx)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
self.idx_of_element[self.heap[0]], self.idx_of_element[self.heap[-1]] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
x = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap)
return x
def insert(self, node):
self.heap.append(node)
self.idx_of_element[node] = len(self.heap) - 1
self.heap_dict[node.name] = node.val
self.sift_up(len(self.heap) - 1)
def is_empty(self):
return len(self.heap) == 0
def decrease_key(self, node, new_value):
assert self.heap[self.idx_of_element[node]].val > new_value, (
"newValue must be less that current value"
)
node.val = new_value
self.heap_dict[node.name] = new_value
self.sift_up(self.idx_of_element[node])
# USAGE
r = Node("R", -1)
b = Node("B", 6)
a = Node("A", 3)
x = Node("X", 1)
e = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
my_min_heap = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| MinHeap |
python | realpython__materials | python-tic-tac-toe-game-tkinter/source_code_final/tic_tac_toe.py | {
"start": 3092,
"end": 6773
} | class ____(tk.Tk):
def __init__(self, game):
super().__init__()
self.title("Tic-Tac-Toe Game")
self._cells = {}
self._game = game
self._create_menu()
self._create_board_display()
self._create_board_grid()
def _create_menu(self):
menu_bar = tk.Menu(master=self)
self.config(menu=menu_bar)
file_menu = tk.Menu(master=menu_bar)
file_menu.add_command(label="Play Again", command=self.reset_board)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=quit)
menu_bar.add_cascade(label="File", menu=file_menu)
def _create_board_display(self):
display_frame = tk.Frame(master=self)
display_frame.pack(fill=tk.X)
self.display = tk.Label(
master=display_frame,
text="Ready?",
font=font.Font(size=28, weight="bold"),
)
self.display.pack()
def _create_board_grid(self):
grid_frame = tk.Frame(master=self)
grid_frame.pack()
for row in range(self._game.board_size):
self.rowconfigure(row, weight=1, minsize=50)
self.columnconfigure(row, weight=1, minsize=75)
for col in range(self._game.board_size):
button = tk.Button(
master=grid_frame,
text="",
font=font.Font(size=36, weight="bold"),
fg="black",
width=3,
height=2,
highlightbackground="lightblue",
)
self._cells[button] = (row, col)
button.bind("<ButtonPress-1>", self.play)
button.grid(row=row, column=col, padx=5, pady=5, sticky="nsew")
def play(self, event):
"""Handle a player's move."""
clicked_btn = event.widget
row, col = self._cells[clicked_btn]
move = Move(row, col, self._game.current_player.label)
if self._game.is_valid_move(move):
self._update_button(clicked_btn)
self._game.process_move(move)
if self._game.is_tied():
self._update_display(msg="Tied game!", color="red")
elif self._game.has_winner():
self._highlight_cells()
msg = f'Player "{self._game.current_player.label}" won!'
color = self._game.current_player.color
self._update_display(msg, color)
else:
self._game.toggle_player()
msg = f"{self._game.current_player.label}'s turn"
self._update_display(msg)
def _update_button(self, clicked_btn):
clicked_btn.config(text=self._game.current_player.label)
clicked_btn.config(fg=self._game.current_player.color)
def _update_display(self, msg, color="black"):
self.display["text"] = msg
self.display["fg"] = color
def _highlight_cells(self):
for button, coordinates in self._cells.items():
if coordinates in self._game.winner_combo:
button.config(highlightbackground="red")
def reset_board(self):
"""Reset the game's board to play again."""
self._game.reset_game()
self._update_display(msg="Ready?")
for button in self._cells.keys():
button.config(highlightbackground="lightblue")
button.config(text="")
button.config(fg="black")
def main():
"""Create the game's board and run its main loop."""
game = TicTacToeGame()
board = TicTacToeBoard(game)
board.mainloop()
if __name__ == "__main__":
main()
| TicTacToeBoard |
python | django__django | django/db/models/lookups.py | {
"start": 15760,
"end": 15850
} | class ____(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "lte"
| LessThanOrEqual |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/data_structures.py | {
"start": 157,
"end": 212
} | class ____(NamedTuple):
rows: int
columns: int
| Size |
python | pytorch__pytorch | test/jit/test_dataclasses.py | {
"start": 1192,
"end": 1501
} | class ____:
def __init__(self, alpha: float = 0.125, scheme: MixupScheme2 = MixupScheme2.A):
self.alpha = alpha
self.scheme = scheme
# Make sure the Meta internal tooling doesn't raise an overflow error
NonHugeFloats = st.floats(min_value=-1e4, max_value=1e4, allow_nan=False)
| MixupParams3 |
python | encode__django-rest-framework | tests/test_bound_fields.py | {
"start": 8368,
"end": 8807
} | class ____:
def test_as_form_fields(self):
class TestSerializer(serializers.Serializer):
json_field = serializers.JSONField()
data = QueryDict(mutable=True)
data.update({'json_field': '{"some": ["json"}'})
serializer = TestSerializer(data=data)
assert serializer.is_valid() is False
assert serializer['json_field'].as_form_field().value == '{"some": ["json"}'
| TestJSONBoundField |
python | scikit-learn__scikit-learn | sklearn/linear_model/_passive_aggressive.py | {
"start": 579,
"end": 11969
} | class ____(BaseSGDClassifier):
"""Passive Aggressive Classifier.
.. deprecated:: 1.8
The whole class `PassiveAggressiveClassifier` was deprecated in version 1.8
and will be removed in 1.10. Instead use:
.. code-block:: python
clf = SGDClassifier(
loss="hinge",
penalty=None,
learning_rate="pa1", # or "pa2"
eta0=1.0, # for parameter C
)
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float, default=1.0
Aggressiveness parameter for the passive-agressive algorithm, see [1].
For PA-I it is the maximum step size. For PA-II it regularizes the
step size (the smaller `C` the more it regularizes).
As a general rule-of-thumb, `C` should be small when the data is noisy.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`~sklearn.linear_model.PassiveAggressiveClassifier.partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least `tol` for
`n_iter_no_change` consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
loss : str, default="hinge"
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
n_jobs : int or None, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
class_weight : dict, {class_label: weight} or "balanced" or None, \
default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
.. versionadded:: 0.19
parameter *average* to use weights averaging in SGD.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples + 1)``.
See Also
--------
SGDClassifier : Incrementally trained logistic regression.
Perceptron : Linear perceptron classifier.
References
----------
.. [1] Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
Examples
--------
>>> from sklearn.linear_model import PassiveAggressiveClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = PassiveAggressiveClassifier(max_iter=1000, random_state=0,
... tol=1e-3)
>>> clf.fit(X, y)
PassiveAggressiveClassifier(random_state=0)
>>> print(clf.coef_)
[[0.26642044 0.45070924 0.67251877 0.64185414]]
>>> print(clf.intercept_)
[1.84127814]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
_parameter_constraints: dict = {
**BaseSGDClassifier._parameter_constraints,
"loss": [StrOptions({"hinge", "squared_hinge"})],
"C": [Interval(Real, 0, None, closed="right")],
}
_parameter_constraints.pop("eta0")
def __init__(
self,
*,
C=1.0,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
shuffle=True,
verbose=0,
loss="hinge",
n_jobs=None,
random_state=None,
warm_start=False,
class_weight=None,
average=False,
):
super().__init__(
penalty=None,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=C,
warm_start=warm_start,
class_weight=class_weight,
average=average,
n_jobs=n_jobs,
)
self.C = C
self.loss = loss
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Subset of the training data.
y : array-like of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : object
Fitted estimator.
"""
if not hasattr(self, "classes_"):
self._more_validate_params(for_partial_fit=True)
if self.class_weight == "balanced":
raise ValueError(
"class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter."
)
# For an explanation, see
# https://github.com/scikit-learn/scikit-learn/pull/1259#issuecomment-9818044
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(
X,
y,
alpha=1.0,
loss="hinge",
learning_rate=lr,
max_iter=1,
classes=classes,
sample_weight=None,
coef_init=None,
intercept_init=None,
)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,)
The initial intercept to warm-start the optimization.
Returns
-------
self : object
Fitted estimator.
"""
self._more_validate_params()
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(
X,
y,
alpha=1.0,
loss="hinge",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init,
)
# TODO(1.10): Remove
@deprecated(
"this is deprecated in version 1.8 and will be removed in 1.10. "
"Use `SGDRegressor(loss='epsilon_insensitive', penalty=None, learning_rate='pa1', "
"eta0 = 1.0)` instead."
)
| PassiveAggressiveClassifier |
python | astropy__astropy | astropy/coordinates/tests/test_geodetic_representations.py | {
"start": 669,
"end": 10163
} | class ____:
@classmethod
def setup_class(cls):
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (CustomGeodetic, etc.)
cls.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
cls.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
class CustomGeodetic(BaseGeodeticRepresentation):
_flattening = 0.01832
_equatorial_radius = 4000000.0 * u.m
class CustomSphericGeodetic(BaseGeodeticRepresentation):
_flattening = 0.0
_equatorial_radius = 4000000.0 * u.m
class CustomSphericBodycentric(BaseBodycentricRepresentation):
_flattening = 0.0
_equatorial_radius = 4000000.0 * u.m
class IAUMARS2000GeodeticRepresentation(BaseGeodeticRepresentation):
_equatorial_radius = 3396190.0 * u.m
_flattening = 0.5886007555512007 * u.percent
class IAUMARS2000BodycentricRepresentation(BaseBodycentricRepresentation):
_equatorial_radius = 3396190.0 * u.m
_flattening = 0.5886007555512007 * u.percent
cls.CustomGeodetic = CustomGeodetic
cls.CustomSphericGeodetic = CustomSphericGeodetic
cls.CustomSphericBodycentric = CustomSphericBodycentric
cls.IAUMARS2000GeodeticRepresentation = IAUMARS2000GeodeticRepresentation
cls.IAUMARS2000BodycentricRepresentation = IAUMARS2000BodycentricRepresentation
@classmethod
def teardown_class(cls):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(cls.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(cls.DUPLICATE_REPRESENTATIONS_ORIG)
def get_representation(self, representation):
if isinstance(representation, str):
return getattr(self, representation)
else:
return representation
def test_geodetic_bodycentric_equivalence_spherical_bodies(self):
initial_cartesian = CartesianRepresentation(
x=[1, 3000.0] * u.km, y=[7000.0, 4.0] * u.km, z=[5.0, 6000.0] * u.km
)
gd_transformed = self.CustomSphericGeodetic.from_representation(
initial_cartesian
)
bc_transformed = self.CustomSphericBodycentric.from_representation(
initial_cartesian
)
assert_quantity_allclose(gd_transformed.lon, bc_transformed.lon)
assert_quantity_allclose(gd_transformed.lat, bc_transformed.lat)
assert_quantity_allclose(gd_transformed.height, bc_transformed.height)
@pytest.mark.parametrize(
"geodeticrepresentation",
[
"CustomGeodetic",
WGS84GeodeticRepresentation,
"IAUMARS2000GeodeticRepresentation",
"IAUMARS2000BodycentricRepresentation",
],
)
def test_cartesian_geodetic_roundtrip(self, geodeticrepresentation):
geodeticrepresentation = self.get_representation(geodeticrepresentation)
# Test array-valued input in the process.
initial_cartesian = CartesianRepresentation(
x=[1, 3000.0] * u.km, y=[7000.0, 4.0] * u.km, z=[5.0, 6000.0] * u.km
)
transformed = geodeticrepresentation.from_representation(initial_cartesian)
roundtripped = CartesianRepresentation.from_representation(transformed)
assert_quantity_allclose(initial_cartesian.x, roundtripped.x)
assert_quantity_allclose(initial_cartesian.y, roundtripped.y)
assert_quantity_allclose(initial_cartesian.z, roundtripped.z)
@pytest.mark.parametrize(
"geodeticrepresentation",
[
"CustomGeodetic",
WGS84GeodeticRepresentation,
"IAUMARS2000GeodeticRepresentation",
"IAUMARS2000BodycentricRepresentation",
],
)
def test_geodetic_cartesian_roundtrip(self, geodeticrepresentation):
geodeticrepresentation = self.get_representation(geodeticrepresentation)
initial_geodetic = geodeticrepresentation(
lon=[0.8, 1.3] * u.radian,
lat=[0.3, 0.98] * u.radian,
height=[100.0, 367.0] * u.m,
)
transformed = CartesianRepresentation.from_representation(initial_geodetic)
roundtripped = geodeticrepresentation.from_representation(transformed)
assert_quantity_allclose(initial_geodetic.lon, roundtripped.lon)
assert_quantity_allclose(initial_geodetic.lat, roundtripped.lat)
assert_quantity_allclose(initial_geodetic.height, roundtripped.height)
def test_geocentric_to_geodetic(self):
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
# Here, test the chain. Direct conversion from Cartesian to
# various Geodetic representations is done indirectly in test_earth.
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
gc = CartesianRepresentation(x, y, z, u.m)
gd = WGS84GeodeticRepresentation.from_cartesian(gc)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.9827937232473290680, 1e-14, "eraGc2gd", "e1", status)
vvd(p, 0.97160184819075459, 1e-14, "eraGc2gd", "p1", status)
vvd(h, 331.4172461426059892, 1e-8, "eraGc2gd", "h1", status)
gd = gd.represent_as(GRS80GeodeticRepresentation)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
gd = gd.represent_as(WGS72GeodeticRepresentation)
e, p, h = gd.lon.to(u.radian), gd.lat.to(u.radian), gd.height.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_geodetic_to_geocentric(self):
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
# These tests are also done implicitly in test_earth.py.
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
gd = WGS84GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
gd = GRS80GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
gd = WGS72GeodeticRepresentation(e, p, h)
xyz = gd.to_cartesian().get_xyz()
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
@pytest.mark.parametrize(
"representation",
[WGS84GeodeticRepresentation, "IAUMARS2000BodycentricRepresentation"],
)
def test_default_height_is_zero(self, representation):
representation = self.get_representation(representation)
gd = representation(10 * u.deg, 20 * u.deg)
assert gd.lon == 10 * u.deg
assert gd.lat == 20 * u.deg
assert gd.height == 0 * u.m
@pytest.mark.parametrize(
"representation",
[WGS84GeodeticRepresentation, "IAUMARS2000BodycentricRepresentation"],
)
def test_non_angle_error(self, representation):
representation = self.get_representation(representation)
with pytest.raises(u.UnitTypeError, match="require units equivalent to 'rad'"):
representation(20 * u.m, 20 * u.deg, 20 * u.m)
@pytest.mark.parametrize(
"representation",
[WGS84GeodeticRepresentation, "IAUMARS2000BodycentricRepresentation"],
)
def test_non_length_error(self, representation):
representation = self.get_representation(representation)
with pytest.raises(u.UnitTypeError, match="units of length"):
representation(10 * u.deg, 20 * u.deg, 30)
def test_subclass_bad_ellipsoid(self):
# Test incomplete initialization.
msg = "module 'erfa' has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
class InvalidCustomEllipsoid(BaseGeodeticRepresentation):
_ellipsoid = "foo"
assert "foo" not in ELLIPSOIDS
assert "invalidcustomellipsoid" not in REPRESENTATION_CLASSES
@pytest.mark.parametrize(
"baserepresentation",
[BaseGeodeticRepresentation, BaseBodycentricRepresentation],
)
def test_geodetic_subclass_missing_equatorial_radius(self, baserepresentation):
msg = "'_equatorial_radius' and '_flattening'."
with pytest.raises(AttributeError, match=msg):
class MissingCustomAttribute(baserepresentation):
_flattening = 0.075 * u.dimensionless_unscaled
assert "missingcustomattribute" not in REPRESENTATION_CLASSES
| TestCustomGeodeticRepresentations |
python | facebookresearch__faiss | tests/test_residual_quantizer.py | {
"start": 40015,
"end": 41841
} | class ____(unittest.TestCase):
def test_accuracy1(self):
"""check that the error is in the same ballpark as RQ."""
recall1 = self.eval_index_accuracy("PRQ4x3x5_Nqint8")
recall2 = self.eval_index_accuracy("RQ12x5_Nqint8")
self.assertGreaterEqual(recall1 * 1.1, recall2) # 657 vs 665
def test_accuracy2(self):
"""when nsplits = 1, PRQ should be the same as RQ"""
recall1 = self.eval_index_accuracy("PRQ1x3x5_Nqint8")
recall2 = self.eval_index_accuracy("RQ3x5_Nqint8")
self.assertEqual(recall1, recall2)
def eval_index_accuracy(self, index_key):
ds = datasets.SyntheticDataset(32, 1000, 1000, 100)
index = faiss.index_factory(ds.d, index_key)
index.train(ds.get_train())
index.add(ds.get_database())
D, I = index.search(ds.get_queries(), 10)
inter = faiss.eval_intersection(I, ds.get_groundtruth(10))
# do a little I/O test
index2 = faiss.deserialize_index(faiss.serialize_index(index))
D2, I2 = index2.search(ds.get_queries(), 10)
np.testing.assert_array_equal(I2, I)
np.testing.assert_array_equal(D2, D)
return inter
def test_factory(self):
AQ = faiss.AdditiveQuantizer
ns, Msub, nbits = 2, 4, 8
index = faiss.index_factory(64, f"PRQ{ns}x{Msub}x{nbits}_Nqint8")
assert isinstance(index, faiss.IndexProductResidualQuantizer)
self.assertEqual(index.prq.nsplits, ns)
self.assertEqual(index.prq.subquantizer(0).M, Msub)
self.assertEqual(index.prq.subquantizer(0).nbits.at(0), nbits)
self.assertEqual(index.prq.search_type, AQ.ST_norm_qint8)
code_size = (ns * Msub * nbits + 7) // 8 + 1
self.assertEqual(index.prq.code_size, code_size)
| TestIndexProductResidualQuantizer |
python | huggingface__transformers | tests/models/instructblipvideo/test_modeling_instructblipvideo.py | {
"start": 25930,
"end": 26917
} | class ____(unittest.TestCase):
def test_inference_vicuna_7b(self):
processor = InstructBlipVideoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
model = InstructBlipVideoForConditionalGeneration.from_pretrained(
"Salesforce/instructblip-vicuna-7b",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
clip = prepare_video()
prompt = "Explain what is happening in this short video."
inputs = processor(images=clip, text=prompt, return_tensors="pt").to(torch_device, torch.float16)
# verify generation
outputs = model.generate(**inputs, max_new_tokens=30)
generated_text = processor.batch_decode(outputs, skip_special_tokens=True)[0].strip()
self.assertEqual(
generated_text,
"Explain what is happening in this short video. a baby girl wearing glasses is reading a book on the bed 1080p",
)
| InstructBlipVideoModelIntegrationTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/logging_ops_test.py | {
"start": 2464,
"end": 13491
} | class ____(test.TestCase):
def testPrintOneTensor(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneStringTensor(self):
tensor = ops.convert_to_tensor([char for char in string.ascii_lowercase])
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "[\"a\" \"b\" \"c\" ... \"x\" \"y\" \"z\"]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneTensorVarySummarize(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=1)
self.evaluate(print_op)
expected = "[0 ... 9]"
self.assertIn((expected + "\n"), printed.contents())
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=2)
self.evaluate(print_op)
expected = "[0 1 ... 8 9]"
self.assertIn((expected + "\n"), printed.contents())
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=3)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, summarize=-1)
self.evaluate(print_op)
expected = "[0 1 2 3 4 5 6 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneVariable(self):
var = variables.Variable(math_ops.range(10))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(var)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTwoVariablesInStructWithAssignAdd(self):
var_one = variables.Variable(2.14)
plus_one = var_one.assign_add(1.0)
var_two = variables.Variable(math_ops.range(10))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
with self.captureWritesToStream(sys.stderr) as printed:
self.evaluate(plus_one)
print_op = logging_ops.print_v2(var_one, {"second": var_two})
self.evaluate(print_op)
expected = "3.14 {'second': [0 1 2 ... 7 8 9]}"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTwoTensors(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, tensor * 10)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9] [0 10 20 ... 70 80 90]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTwoTensorsDifferentSep(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, tensor * 10, sep="<separator>")
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]<separator>[0 10 20 ... 70 80 90]"
self.assertIn(expected + "\n", printed.contents())
def testPrintPlaceholderGeneration(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2("{}6", {"{}": tensor * 10})
self.evaluate(print_op)
expected = "{}6 {'{}': [0 10 20 ... 70 80 90]}"
self.assertIn((expected + "\n"), printed.contents())
def testPrintNoTensors(self):
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(23, [23, 5], {"6": 12})
self.evaluate(print_op)
expected = "23 [23, 5] {'6': 12}"
self.assertIn((expected + "\n"), printed.contents())
def testPrintFloatScalar(self):
for dtype in [dtypes.bfloat16, dtypes.half, dtypes.float32, dtypes.float64]:
tensor = ops.convert_to_tensor(43.5, dtype=dtype)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "43.5"
self.assertIn((expected + "\n"), printed.contents())
def testPrintStringScalar(self):
tensor = ops.convert_to_tensor("scalar")
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor)
self.evaluate(print_op)
expected = "scalar"
self.assertIn((expected + "\n"), printed.contents())
def testPrintStringScalarDifferentEnd(self):
tensor = ops.convert_to_tensor("scalar")
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(tensor, end="<customend>")
self.evaluate(print_op)
expected = "scalar<customend>"
self.assertIn(expected, printed.contents())
def testPrintComplexTensorStruct(self):
tensor = math_ops.range(10)
small_tensor = constant_op.constant([0.3, 12.4, -16.1])
big_tensor = math_ops.mul(tensor, 10)
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(
"first:", tensor, "middle:",
{"small": small_tensor, "Big": big_tensor}, 10,
[tensor * 2, tensor])
self.evaluate(print_op)
# Note that the keys in the dict will always be sorted,
# so 'Big' comes before 'small'
expected = ("first: [0 1 2 ... 7 8 9] "
"middle: {'Big': [0 10 20 ... 70 80 90], "
"'small': [0.3 12.4 -16.1]} "
"10 [[0 2 4 ... 14 16 18], [0 1 2 ... 7 8 9]]")
self.assertIn((expected + "\n"), printed.contents())
def testPrintSparseTensor(self):
ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
val = [0, 10, 13, 4, 14, 32, 33]
shape = [5, 6]
sparse = sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2(sparse)
self.evaluate(print_op)
expected = ("'SparseTensor(indices=[[0 0]\n"
" [1 0]\n"
" [1 3]\n"
" ...\n"
" [1 4]\n"
" [3 2]\n"
" [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])'")
self.assertIn((expected + "\n"), printed.contents())
def testPrintSparseTensorInDataStruct(self):
ind = [[0, 0], [1, 0], [1, 3], [4, 1], [1, 4], [3, 2], [3, 3]]
val = [0, 10, 13, 4, 14, 32, 33]
shape = [5, 6]
sparse = sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int64),
constant_op.constant(shape, dtypes.int64))
with self.captureWritesToStream(sys.stderr) as printed:
print_op = logging_ops.print_v2([sparse])
self.evaluate(print_op)
expected = ("['SparseTensor(indices=[[0 0]\n"
" [1 0]\n"
" [1 3]\n"
" ...\n"
" [1 4]\n"
" [3 2]\n"
" [3 3]], values=[0 10 13 ... 14 32 33], shape=[5 6])']")
self.assertIn((expected + "\n"), printed.contents())
def testPrintOneTensorStdout(self):
tensor = math_ops.range(10)
with self.captureWritesToStream(sys.stdout) as printed:
print_op = logging_ops.print_v2(
tensor, output_stream=sys.stdout)
self.evaluate(print_op)
expected = "[0 1 2 ... 7 8 9]"
self.assertIn((expected + "\n"), printed.contents())
def testPrintTensorsToFile(self):
fd, tmpfile_name = tempfile.mkstemp(".printv2_test")
tensor_0 = math_ops.range(0, 10)
print_op_0 = logging_ops.print_v2(tensor_0,
output_stream="file://"+tmpfile_name)
self.evaluate(print_op_0)
tensor_1 = math_ops.range(11, 20)
print_op_1 = logging_ops.print_v2(tensor_1,
output_stream="file://"+tmpfile_name)
self.evaluate(print_op_1)
try:
f = os.fdopen(fd, "r")
line_0 = f.readline()
expected_0 = "[0 1 2 ... 7 8 9]"
self.assertTrue(expected_0 in line_0)
line_1 = f.readline()
expected_1 = "[11 12 13 ... 17 18 19]"
self.assertTrue(expected_1 in line_1)
os.close(fd)
os.remove(tmpfile_name)
except IOError as e:
self.fail(e)
def testInvalidOutputStreamRaisesError(self):
tensor = math_ops.range(10)
with self.assertRaises(ValueError):
print_op = logging_ops.print_v2(
tensor, output_stream="unknown")
self.evaluate(print_op)
@test_util.run_deprecated_v1
def testPrintOpName(self):
tensor = math_ops.range(10)
print_op = logging_ops.print_v2(tensor, name="print_name")
self.assertEqual(print_op.name, "print_name")
@test_util.run_deprecated_v1
def testNoDuplicateFormatOpGraphModeAfterExplicitFormat(self):
tensor = math_ops.range(10)
formatted_string = string_ops.string_format("{}", tensor)
print_op = logging_ops.print_v2(formatted_string)
self.evaluate(print_op)
graph_ops = ops.get_default_graph().get_operations()
format_ops = [op for op in graph_ops if op.type == "StringFormat"]
# Should be only 1 format_op for graph mode.
self.assertEqual(len(format_ops), 1)
def testPrintOneTensorEagerOnOpCreate(self):
with context.eager_mode():
tensor = math_ops.range(10)
expected = "[0 1 2 ... 7 8 9]"
with self.captureWritesToStream(sys.stderr) as printed:
logging_ops.print_v2(tensor)
self.assertIn((expected + "\n"), printed.contents())
def testPrintsOrderedInDefun(self):
with context.eager_mode():
@def_function.function
def prints():
logging_ops.print_v2("A")
logging_ops.print_v2("B")
logging_ops.print_v2("C")
with self.captureWritesToStream(sys.stderr) as printed:
prints()
self.assertTrue(("A\nB\nC\n"), printed.contents())
def testPrintInDefunWithoutExplicitEvalOfPrint(self):
tensor = math_ops.range(10)
@def_function.function
def f(tensor):
logging_ops.print_v2(tensor)
return tensor
expected = "[0 1 2 ... 7 8 9]"
with self.captureWritesToStream(sys.stderr) as printed_one:
x = f(tensor)
self.evaluate(x)
self.assertIn((expected + "\n"), printed_one.contents())
# We execute the function again to make sure it doesn't only print on the
# first call.
with self.captureWritesToStream(sys.stderr) as printed_two:
y = f(tensor)
self.evaluate(y)
self.assertIn((expected + "\n"), printed_two.contents())
| PrintV2Test |
python | facebook__pyre-check | scripts/shape_type_coverage.py | {
"start": 639,
"end": 727
} | class ____:
name: str
parameters: List[str]
@dataclass(frozen=True)
| ParametricType |
python | getsentry__sentry | tests/sentry/web/frontend/test_oauth_authorize.py | {
"start": 46498,
"end": 54835
} | class ____(TestCase):
"""Tests for OAuth flows using custom URI schemes with strict matching (version 1)."""
@cached_property
def path(self) -> str:
return "/oauth/authorize/"
def setUp(self) -> None:
super().setUp()
self.custom_uri = "sentry-mobile-agent://sentry.io/auth"
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris=self.custom_uri, version=1 # Strict mode
)
def test_exact_match_succeeds_code_flow(self) -> None:
"""Test that exact URI match works in strict mode with authorization code flow."""
self.login_as(self.user)
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}&client_id={self.application.client_id}"
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.custom_uri
assert grant.application == self.application
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert f"code={grant.code}" in resp["Location"]
def test_prefix_match_fails_strict_mode(self) -> None:
"""Test that prefix matching is rejected in strict mode (version 1)."""
self.login_as(self.user)
# Try to use a URI that would match as a prefix in legacy mode
prefixed_uri = f"{self.custom_uri}/callback"
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={prefixed_uri}&client_id={self.application.client_id}"
)
# Should fail validation because strict mode requires exact match
assert resp.status_code == 400
self.assertTemplateUsed("sentry/oauth-error.html")
assert resp.context["error"] == "Missing or invalid <em>redirect_uri</em> parameter."
def test_exact_match_succeeds_token_flow(self) -> None:
"""Test that exact URI match works in strict mode with implicit grant flow."""
self.login_as(self.user)
resp = self.client.get(
f"{self.path}?response_type=token&redirect_uri={self.custom_uri}&client_id={self.application.client_id}"
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
token = ApiToken.objects.get(user=self.user)
assert token.application == self.application
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert "#" in resp["Location"]
assert "access_token=" in resp["Location"]
def test_code_flow_with_state_strict_mode(self) -> None:
"""Test authorization code flow with state parameter in strict mode."""
self.login_as(self.user)
state = "test-state-456"
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}&client_id={self.application.client_id}&state={state}"
)
assert resp.status_code == 200
resp = self.client.post(self.path, {"op": "approve"})
grant = ApiGrant.objects.get(user=self.user)
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert f"code={grant.code}" in resp["Location"]
assert f"state={state}" in resp["Location"]
def test_code_flow_with_scopes_strict_mode(self) -> None:
"""Test authorization code flow with scopes in strict mode."""
self.login_as(self.user)
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}&client_id={self.application.client_id}&scope=org%3Aread&state=bar"
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.custom_uri
assert grant.application == self.application
assert grant.get_scopes() == ["org:read"]
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert f"code={grant.code}" in resp["Location"]
assert "state=bar" in resp["Location"]
def test_denial_with_exact_match_strict_mode(self) -> None:
"""Test user denial works with exact match in strict mode."""
self.login_as(self.user)
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}&client_id={self.application.client_id}"
)
assert resp.status_code == 200
resp = self.client.post(self.path, {"op": "deny"})
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert "error=access_denied" in resp["Location"]
assert "code=" not in resp["Location"]
assert not ApiGrant.objects.filter(user=self.user).exists()
def test_token_flow_denial_strict_mode(self) -> None:
"""Test implicit grant denial with exact match in strict mode."""
self.login_as(self.user)
resp = self.client.get(
f"{self.path}?response_type=token&redirect_uri={self.custom_uri}&client_id={self.application.client_id}"
)
assert resp.status_code == 200
resp = self.client.post(self.path, {"op": "deny"})
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert "#" in resp["Location"]
assert "error=access_denied" in resp["Location"]
assert "access_token=" not in resp["Location"]
assert not ApiToken.objects.filter(user=self.user).exists()
def test_invalid_scope_with_exact_match_strict_mode(self) -> None:
"""Test invalid scope error with exact match in strict mode."""
self.login_as(self.user)
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}&client_id={self.application.client_id}&scope=invalid_scope"
)
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert "error=invalid_scope" in resp["Location"]
assert "code=" not in resp["Location"]
assert not ApiGrant.objects.filter(user=self.user).exists()
def test_trailing_slash_normalization_strict_mode(self) -> None:
"""Test that trailing slash differences are NOT normalized in strict mode."""
self.login_as(self.user)
# Strict mode requires exact match - trailing slash causes rejection
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}/&client_id={self.application.client_id}"
)
# Should fail validation because strict mode requires exact match (no trailing slash normalization)
assert resp.status_code == 400
self.assertTemplateUsed("sentry/oauth-error.html")
assert resp.context["error"] == "Missing or invalid <em>redirect_uri</em> parameter."
def test_bypass_prompt_with_existing_auth_strict_mode(self) -> None:
"""Test that authorization bypass works with exact match in strict mode."""
self.login_as(self.user)
ApiAuthorization.objects.create(user=self.user, application=self.application)
resp = self.client.get(
f"{self.path}?response_type=code&redirect_uri={self.custom_uri}&client_id={self.application.client_id}"
)
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.custom_uri
assert grant.application == self.application
assert resp.status_code == 302
assert resp["Location"].startswith("sentry-mobile-agent://")
assert f"code={grant.code}" in resp["Location"]
| OAuthAuthorizeCustomSchemeStrictTest |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dtbuild2/package.py | {
"start": 217,
"end": 483
} | class ____(Package):
"""Simple package which acts as a build dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/dtbuild2-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
provides("vdtbuild2")
| Dtbuild2 |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 799,
"end": 870
} | class ____(Model2A):
field2 = models.CharField(max_length=30)
| Model2B |
python | ray-project__ray | python/ray/serve/tests/test_config_files/multi_fastapi.py | {
"start": 173,
"end": 278
} | class ____:
def add(self, a: int):
return a + 1
@serve.deployment
@serve.ingress(app1)
| SubModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 247766,
"end": 249663
} | class ____(Annotated):
def _copy_internals(
self,
_annotations_traversal: bool = False,
ind_cols_on_fromclause: bool = False,
**kw: Any,
) -> None:
super()._copy_internals(**kw)
# passed from annotations._shallow_annotate(), _deep_annotate(), etc.
# the traversals used by annotations for these cases are not currently
# designed around expecting that inner elements inside of
# AnnotatedFromClause's element are also deep copied, so skip for these
# cases. in other cases such as plain visitors.cloned_traverse(), we
# expect this to happen. see issue #12915
if not _annotations_traversal:
ee = self._Annotated__element # type: ignore
ee._copy_internals(**kw)
if ind_cols_on_fromclause:
# passed from annotations._deep_annotate(). See that function
# for notes
ee = self._Annotated__element # type: ignore
self.c = ee.__class__.c.fget(self) # type: ignore
@util.ro_memoized_property
def c(self) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
"""proxy the .c collection of the underlying FromClause.
Originally implemented in 2008 as a simple load of the .c collection
when the annotated construct was created (see d3621ae961a), in modern
SQLAlchemy versions this can be expensive for statements constructed
with ORM aliases. So for #8796 SQLAlchemy 2.0 we instead proxy
it, which works just as well.
Two different use cases seem to require the collection either copied
from the underlying one, or unique to this AnnotatedFromClause.
See test_selectable->test_annotated_corresponding_column
"""
ee = self._Annotated__element # type: ignore
return ee.c # type: ignore
| AnnotatedFromClause |
python | apache__airflow | providers/dingding/tests/unit/dingding/hooks/test_dingding.py | {
"start": 958,
"end": 8788
} | class ____:
conn_id = "dingding_conn_id_test"
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=self.conn_id,
conn_type="dingding",
host="https://oapi.dingtalk.com",
password="you_token_here",
)
)
def test_get_endpoint_conn_id(self):
hook = DingdingHook(dingding_conn_id=self.conn_id)
endpoint = hook._get_endpoint()
assert endpoint == "robot/send?access_token=you_token_here"
def test_build_text_message_not_remind(self):
config = {
"dingding_conn_id": self.conn_id,
"message_type": "text",
"message": "Airflow dingding text message remind no one",
"at_mobiles": False,
"at_all": False,
}
expect = {
"msgtype": "text",
"text": {"content": "Airflow dingding text message remind no one"},
"at": {"atMobiles": False, "isAtAll": False},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_text_message_remind_specific(self):
config = {
"dingding_conn_id": self.conn_id,
"message_type": "text",
"message": "Airflow dingding text message remind specific users",
"at_mobiles": ["1234", "5768"],
"at_all": False,
}
expect = {
"msgtype": "text",
"text": {"content": "Airflow dingding text message remind specific users"},
"at": {"atMobiles": ["1234", "5768"], "isAtAll": False},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_text_message_remind_all(self):
config = {
"dingding_conn_id": self.conn_id,
"message_type": "text",
"message": "Airflow dingding text message remind all user in group",
"at_all": True,
}
expect = {
"msgtype": "text",
"text": {"content": "Airflow dingding text message remind all user in group"},
"at": {"atMobiles": None, "isAtAll": True},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_markdown_message_remind_specific(self):
msg = {
"title": "Airflow dingding markdown message",
"text": "# Markdown message title\ncontent content .. \n### sub-title\n"
"",
}
config = {
"dingding_conn_id": self.conn_id,
"message_type": "markdown",
"message": msg,
"at_mobiles": ["1234", "5678"],
"at_all": False,
}
expect = {
"msgtype": "markdown",
"markdown": msg,
"at": {"atMobiles": ["1234", "5678"], "isAtAll": False},
}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_markdown_message_remind_all(self):
msg = {
"title": "Airflow dingding markdown message",
"text": "# Markdown message title\ncontent content .. \n### sub-title\n"
"",
}
config = {
"dingding_conn_id": self.conn_id,
"message_type": "markdown",
"message": msg,
"at_all": True,
}
expect = {"msgtype": "markdown", "markdown": msg, "at": {"atMobiles": None, "isAtAll": True}}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_link_message(self):
msg = {
"title": "Airflow dingding link message",
"text": "Airflow official documentation link",
"messageUrl": "https://airflow.apache.org",
"picURL": "https://airflow.apache.org/_images/pin_large.png",
}
config = {"dingding_conn_id": self.conn_id, "message_type": "link", "message": msg}
expect = {"msgtype": "link", "link": msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_single_action_card_message(self):
msg = {
"title": "Airflow dingding single actionCard message",
"text": "Airflow dingding single actionCard message\n"
"\n"
"This is a official logo in Airflow website.",
"hideAvatar": "0",
"btnOrientation": "0",
"singleTitle": "read more",
"singleURL": "https://airflow.apache.org",
}
config = {"dingding_conn_id": self.conn_id, "message_type": "actionCard", "message": msg}
expect = {"msgtype": "actionCard", "actionCard": msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_multi_action_card_message(self):
msg = {
"title": "Airflow dingding multi actionCard message",
"text": "Airflow dingding multi actionCard message\n"
"\n"
"Airflow documentation and GitHub",
"hideAvatar": "0",
"btnOrientation": "0",
"btns": [
{"title": "Airflow Documentation", "actionURL": "https://airflow.apache.org"},
{"title": "Airflow GitHub", "actionURL": "https://github.com/apache/airflow"},
],
}
config = {"dingding_conn_id": self.conn_id, "message_type": "actionCard", "message": msg}
expect = {"msgtype": "actionCard", "actionCard": msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_build_feed_card_message(self):
msg = {
"links": [
{
"title": "Airflow DAG feed card",
"messageURL": "https://airflow.apache.org/docs/apache-airflow/stable/ui.html",
"picURL": "https://airflow.apache.org/_images/dags.png",
},
{
"title": "Airflow grid feed card",
"messageURL": "https://airflow.apache.org/docs/apache-airflow/stable/ui.html",
"picURL": "https://airflow.apache.org/_images/grid.png",
},
{
"title": "Airflow graph feed card",
"messageURL": "https://airflow.apache.org/docs/apache-airflow/stable/ui.html",
"picURL": "https://airflow.apache.org/_images/graph.png",
},
]
}
config = {"dingding_conn_id": self.conn_id, "message_type": "feedCard", "message": msg}
expect = {"msgtype": "feedCard", "feedCard": msg}
hook = DingdingHook(**config)
message = hook._build_message()
assert json.dumps(expect) == message
def test_send_not_support_type(self):
config = {
"dingding_conn_id": self.conn_id,
"message_type": "not_support_type",
"message": "Airflow dingding text message remind no one",
}
hook = DingdingHook(**config)
with pytest.raises(ValueError, match="receive not_support_type"):
hook.send()
| TestDingdingHook |
python | doocs__leetcode | solution/3100-3199/3120.Count the Number of Special Characters I/Solution.py | {
"start": 0,
"end": 180
} | class ____:
def numberOfSpecialChars(self, word: str) -> int:
s = set(word)
return sum(a in s and b in s for a, b in zip(ascii_lowercase, ascii_uppercase))
| Solution |
python | pypa__warehouse | warehouse/search/services.py | {
"start": 710,
"end": 998
} | class ____:
def __init__(self, **kwargs):
pass
@classmethod
def create_service(cls, context, request):
return cls()
def reindex(self, config, projects_to_update):
pass
def unindex(self, config, projects_to_delete):
pass
| NullSearchService |
python | ray-project__ray | python/ray/train/v2/_internal/exceptions.py | {
"start": 441,
"end": 976
} | class ____(RayTrainError):
"""Exception raised when a worker health check hangs for long enough."""
def __init__(self, message):
timeout = os.getenv(
WORKER_HEALTH_CHECK_TIMEOUT_S_ENV_VAR, DEFAULT_WORKER_HEALTH_CHECK_TIMEOUT_S
)
message += (
f"\nSet the {WORKER_HEALTH_CHECK_TIMEOUT_S_ENV_VAR} "
"environment variable to increase the timeout "
f"(current value: {timeout} seconds)."
)
super().__init__(message)
| WorkerHealthCheckTimeoutError |
python | scikit-learn__scikit-learn | sklearn/tests/test_metadata_routing.py | {
"start": 1446,
"end": 40566
} | class ____(BaseEstimator):
"""A very simple pipeline, assuming the last step is always a predictor.
Parameters
----------
steps : iterable of objects
An iterable of transformers with the last step being a predictor.
"""
def __init__(self, steps):
self.steps = steps
def fit(self, X, y, **fit_params):
self.steps_ = []
params = process_routing(self, "fit", **fit_params)
X_transformed = X
for i, step in enumerate(self.steps[:-1]):
transformer = clone(step).fit(
X_transformed, y, **params.get(f"step_{i}").fit
)
self.steps_.append(transformer)
X_transformed = transformer.transform(
X_transformed, **params.get(f"step_{i}").transform
)
self.steps_.append(
clone(self.steps[-1]).fit(X_transformed, y, **params.predictor.fit)
)
return self
def predict(self, X, **predict_params):
check_is_fitted(self)
X_transformed = X
params = process_routing(self, "predict", **predict_params)
for i, step in enumerate(self.steps_[:-1]):
X_transformed = step.transform(X, **params.get(f"step_{i}").transform)
return self.steps_[-1].predict(X_transformed, **params.predictor.predict)
def get_metadata_routing(self):
router = MetadataRouter(owner=self)
for i, step in enumerate(self.steps[:-1]):
router.add(
**{f"step_{i}": step},
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="fit", callee="transform")
.add(caller="predict", callee="transform"),
)
router.add(
predictor=self.steps[-1],
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict"),
)
return router
@config_context(enable_metadata_routing=True)
def test_assert_request_is_empty():
requests = MetadataRequest(owner="test")
assert_request_is_empty(requests)
requests.fit.add_request(param="foo", alias=None)
# this should still work, since None is the default value
assert_request_is_empty(requests)
requests.fit.add_request(param="bar", alias="value")
with pytest.raises(AssertionError):
# now requests is no more empty
assert_request_is_empty(requests)
# but one can exclude a method
assert_request_is_empty(requests, exclude="fit")
requests.score.add_request(param="carrot", alias=True)
with pytest.raises(AssertionError):
# excluding `fit` is not enough
assert_request_is_empty(requests, exclude="fit")
# and excluding both fit and score would avoid an exception
assert_request_is_empty(requests, exclude=["fit", "score"])
# test if a router is empty
assert_request_is_empty(
MetadataRouter(owner="test")
.add_self_request(WeightedMetaRegressor(estimator=None))
.add(
estimator=ConsumingRegressor(),
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
)
@pytest.mark.parametrize(
"estimator",
[
ConsumingClassifier(registry=_Registry()),
ConsumingRegressor(registry=_Registry()),
ConsumingTransformer(registry=_Registry()),
WeightedMetaClassifier(estimator=ConsumingClassifier(), registry=_Registry()),
WeightedMetaRegressor(estimator=ConsumingRegressor(), registry=_Registry()),
],
)
@config_context(enable_metadata_routing=True)
def test_estimator_puts_self_in_registry(estimator):
"""Check that an estimator puts itself in the registry upon fit."""
estimator.fit(X, y)
assert estimator in estimator.registry
@pytest.mark.parametrize(
"val, res",
[
(False, False),
(True, False),
(None, False),
("$UNUSED$", False),
("$WARN$", False),
("invalid-input", False),
("valid_arg", True),
],
)
@config_context(enable_metadata_routing=True)
def test_request_type_is_alias(val, res):
# Test request_is_alias
assert request_is_alias(val) == res
@pytest.mark.parametrize(
"val, res",
[
(False, True),
(True, True),
(None, True),
("$UNUSED$", True),
("$WARN$", True),
("invalid-input", False),
("alias_arg", False),
],
)
@config_context(enable_metadata_routing=True)
def test_request_type_is_valid(val, res):
# Test request_is_valid
assert request_is_valid(val) == res
@config_context(enable_metadata_routing=True)
def test_default_requests():
class OddEstimator(BaseEstimator):
__metadata_request__fit = {
# set a different default request
"sample_weight": True
} # type: ignore[var-annotated]
def fit(self, X, y=None):
return self # pragma: no cover
odd_request = get_routing_for_object(OddEstimator())
assert odd_request.fit.requests == {"sample_weight": True}
# check other test estimators
assert not len(get_routing_for_object(NonConsumingClassifier()).fit.requests)
assert_request_is_empty(NonConsumingClassifier().get_metadata_routing())
trs_request = get_routing_for_object(ConsumingTransformer())
assert trs_request.fit.requests == {
"sample_weight": None,
"metadata": None,
}
assert trs_request.transform.requests == {"metadata": None, "sample_weight": None}
assert_request_is_empty(trs_request)
est_request = get_routing_for_object(ConsumingClassifier())
assert est_request.fit.requests == {
"sample_weight": None,
"metadata": None,
}
assert_request_is_empty(est_request)
@config_context(enable_metadata_routing=True)
def test_default_request_override():
"""Test that default requests are correctly overridden regardless of the ASCII order
of the class names, hence testing small and capital letter class name starts.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28430
"""
class Base(BaseEstimator):
__metadata_request__split = {"groups": True}
def split(self, X, y=None):
pass # pragma: no cover
class class_1(Base):
__metadata_request__split = {"groups": "sample_domain"}
def split(self, X, y=None):
pass # pragma: no cover
class Class_1(Base):
__metadata_request__split = {"groups": "sample_domain"}
def split(self, X, y=None):
pass # pragma: no cover
assert_request_equal(
class_1()._get_metadata_request(), {"split": {"groups": "sample_domain"}}
)
assert_request_equal(
Class_1()._get_metadata_request(), {"split": {"groups": "sample_domain"}}
)
@config_context(enable_metadata_routing=True)
def test_process_routing_invalid_method():
with pytest.raises(TypeError, match="Can only route and process input"):
process_routing(ConsumingClassifier(), "invalid_method", groups=my_groups)
@config_context(enable_metadata_routing=True)
def test_process_routing_invalid_object():
class InvalidObject:
pass
with pytest.raises(AttributeError, match="either implement the routing method"):
process_routing(InvalidObject(), "fit", groups=my_groups)
@pytest.mark.parametrize("method", METHODS)
@pytest.mark.parametrize("default", [None, "default", []])
@config_context(enable_metadata_routing=True)
def test_process_routing_empty_params_get_with_default(method, default):
empty_params = {}
routed_params = process_routing(ConsumingClassifier(), "fit", **empty_params)
# Behaviour should be an empty dictionary returned for each method when retrieved.
params_for_method = routed_params[method]
assert isinstance(params_for_method, dict)
assert set(params_for_method.keys()) == set(METHODS)
# No default to `get` should be equivalent to the default
default_params_for_method = routed_params.get(method, default=default)
assert default_params_for_method == params_for_method
@config_context(enable_metadata_routing=True)
def test_simple_metadata_routing():
# Tests that metadata is properly routed
# The underlying estimator doesn't accept or request metadata
clf = WeightedMetaClassifier(estimator=NonConsumingClassifier())
clf.fit(X, y)
# Meta-estimator consumes sample_weight, but doesn't forward it to the underlying
# estimator
clf = WeightedMetaClassifier(estimator=NonConsumingClassifier())
clf.fit(X, y, sample_weight=my_weights)
# If the estimator accepts the metadata but doesn't explicitly say it doesn't
# need it, there's an error
clf = WeightedMetaClassifier(estimator=ConsumingClassifier())
err_message = (
"[sample_weight] are passed but are not explicitly set as requested or"
" not requested for ConsumingClassifier.fit"
)
with pytest.raises(ValueError, match=re.escape(err_message)):
clf.fit(X, y, sample_weight=my_weights)
# Explicitly saying the estimator doesn't need it, makes the error go away,
# because in this case `WeightedMetaClassifier` consumes `sample_weight`. If
# there was no consumer of sample_weight, passing it would result in an
# error.
clf = WeightedMetaClassifier(
estimator=ConsumingClassifier().set_fit_request(sample_weight=False)
)
# this doesn't raise since WeightedMetaClassifier itself is a consumer,
# and passing metadata to the consumer directly is fine regardless of its
# metadata_request values.
clf.fit(X, y, sample_weight=my_weights)
check_recorded_metadata(clf.estimator_, method="fit", parent="fit")
# Requesting a metadata will make the meta-estimator forward it correctly
clf = WeightedMetaClassifier(
estimator=ConsumingClassifier().set_fit_request(sample_weight=True)
)
clf.fit(X, y, sample_weight=my_weights)
check_recorded_metadata(
clf.estimator_, method="fit", parent="fit", sample_weight=my_weights
)
# And requesting it with an alias
clf = WeightedMetaClassifier(
estimator=ConsumingClassifier().set_fit_request(
sample_weight="alternative_weight"
)
)
clf.fit(X, y, alternative_weight=my_weights)
check_recorded_metadata(
clf.estimator_, method="fit", parent="fit", sample_weight=my_weights
)
@config_context(enable_metadata_routing=True)
def test_nested_routing():
# check if metadata is routed in a nested routing situation.
pipeline = SimplePipeline(
[
MetaTransformer(
transformer=ConsumingTransformer()
.set_fit_request(metadata=True, sample_weight=False)
.set_transform_request(sample_weight=True, metadata=False)
),
WeightedMetaRegressor(
estimator=ConsumingRegressor()
.set_fit_request(sample_weight="inner_weights", metadata=False)
.set_predict_request(sample_weight=False)
).set_fit_request(sample_weight="outer_weights"),
]
)
w1, w2, w3 = [1], [2], [3]
pipeline.fit(
X, y, metadata=my_groups, sample_weight=w1, outer_weights=w2, inner_weights=w3
)
check_recorded_metadata(
pipeline.steps_[0].transformer_,
method="fit",
parent="fit",
metadata=my_groups,
)
check_recorded_metadata(
pipeline.steps_[0].transformer_,
method="transform",
parent="fit",
sample_weight=w1,
)
check_recorded_metadata(
pipeline.steps_[1], method="fit", parent="fit", sample_weight=w2
)
check_recorded_metadata(
pipeline.steps_[1].estimator_, method="fit", parent="fit", sample_weight=w3
)
pipeline.predict(X, sample_weight=w3)
check_recorded_metadata(
pipeline.steps_[0].transformer_,
method="transform",
parent="fit",
sample_weight=w3,
)
@config_context(enable_metadata_routing=True)
def test_nested_routing_conflict():
# check if an error is raised if there's a conflict between keys
pipeline = SimplePipeline(
[
MetaTransformer(
transformer=ConsumingTransformer()
.set_fit_request(metadata=True, sample_weight=False)
.set_transform_request(sample_weight=True)
),
WeightedMetaRegressor(
estimator=ConsumingRegressor().set_fit_request(sample_weight=True)
).set_fit_request(sample_weight="outer_weights"),
]
)
w1, w2 = [1], [2]
with pytest.raises(
ValueError,
match=(
re.escape(
"In WeightedMetaRegressor, there is a conflict on sample_weight between"
" what is requested for this estimator and what is requested by its"
" children. You can resolve this conflict by using an alias for the"
" child estimators' requested metadata."
)
),
):
pipeline.fit(X, y, metadata=my_groups, sample_weight=w1, outer_weights=w2)
@config_context(enable_metadata_routing=True)
def test_invalid_metadata():
# check that passing wrong metadata raises an error
trs = MetaTransformer(
transformer=ConsumingTransformer().set_transform_request(sample_weight=True)
)
with pytest.raises(
TypeError,
match=(re.escape("transform got unexpected argument(s) {'other_param'}")),
):
trs.fit(X, y).transform(X, other_param=my_weights)
# passing a metadata which is not requested by any estimator should also raise
trs = MetaTransformer(
transformer=ConsumingTransformer().set_transform_request(sample_weight=False)
)
with pytest.raises(
TypeError,
match=(re.escape("transform got unexpected argument(s) {'sample_weight'}")),
):
trs.fit(X, y).transform(X, sample_weight=my_weights)
@config_context(enable_metadata_routing=True)
def test_get_metadata_routing():
class TestDefaults(_MetadataRequester):
__metadata_request__fit = {
"sample_weight": None,
"my_other_param": None,
}
__metadata_request__score = {
"sample_weight": None,
"my_param": True,
"my_other_param": None,
}
__metadata_request__predict = {"my_param": True}
def fit(self, X, y=None):
return self # pragma: no cover
def score(self, X, y=None):
pass # pragma: no cover
def predict(self, X):
pass # pragma: no cover
expected = {
"score": {
"my_param": True,
"my_other_param": None,
"sample_weight": None,
},
"fit": {
"my_other_param": None,
"sample_weight": None,
},
"predict": {"my_param": True},
}
assert_request_equal(TestDefaults().get_metadata_routing(), expected)
est = TestDefaults().set_score_request(my_param="other_param")
expected = {
"score": {
"my_param": "other_param",
"my_other_param": None,
"sample_weight": None,
},
"fit": {
"my_other_param": None,
"sample_weight": None,
},
"predict": {"my_param": True},
}
assert_request_equal(est.get_metadata_routing(), expected)
est = TestDefaults().set_fit_request(sample_weight=True)
expected = {
"score": {
"my_param": True,
"my_other_param": None,
"sample_weight": None,
},
"fit": {
"my_other_param": None,
"sample_weight": True,
},
"predict": {"my_param": True},
}
assert_request_equal(est.get_metadata_routing(), expected)
@config_context(enable_metadata_routing=True)
def test_setting_default_requests():
# Test _get_default_requests method
test_cases = dict()
class ExplicitRequest(BaseEstimator):
# `fit` doesn't accept `props` explicitly, but we want to request it
__metadata_request__fit = {"prop": None}
def fit(self, X, y, **kwargs):
return self
test_cases[ExplicitRequest] = {"prop": None}
class ExplicitRequestOverwrite(BaseEstimator):
# `fit` explicitly accepts `props`, but we want to change the default
# request value from None to True
__metadata_request__fit = {"prop": True}
def fit(self, X, y, prop=None, **kwargs):
return self
test_cases[ExplicitRequestOverwrite] = {"prop": True}
class ImplicitRequest(BaseEstimator):
# `fit` requests `prop` and the default None should be used
def fit(self, X, y, prop=None, **kwargs):
return self
test_cases[ImplicitRequest] = {"prop": None}
class ImplicitRequestRemoval(BaseEstimator):
# `fit` (in this class or a parent) requests `prop`, but we don't want
# it requested at all.
__metadata_request__fit = {"prop": metadata_routing.UNUSED}
def fit(self, X, y, prop=None, **kwargs):
return self
test_cases[ImplicitRequestRemoval] = {}
for Klass, requests in test_cases.items():
assert get_routing_for_object(Klass()).fit.requests == requests
assert_request_is_empty(Klass().get_metadata_routing(), exclude="fit")
Klass().fit(None, None) # for coverage
@config_context(enable_metadata_routing=True)
def test_removing_non_existing_param_raises():
"""Test that removing a metadata using UNUSED which doesn't exist raises."""
class InvalidRequestRemoval(BaseEstimator):
# `fit` (in this class or a parent) requests `prop`, but we don't want
# it requested at all.
__metadata_request__fit = {"prop": metadata_routing.UNUSED}
def fit(self, X, y, **kwargs):
return self
with pytest.raises(ValueError, match="Trying to remove parameter"):
InvalidRequestRemoval().get_metadata_routing()
@config_context(enable_metadata_routing=True)
def test_method_metadata_request():
mmr = MethodMetadataRequest(owner="test", method="fit")
with pytest.raises(ValueError, match="The alias you're setting for"):
mmr.add_request(param="foo", alias=1.4)
mmr.add_request(param="foo", alias=None)
assert mmr.requests == {"foo": None}
mmr.add_request(param="foo", alias=False)
assert mmr.requests == {"foo": False}
mmr.add_request(param="foo", alias=True)
assert mmr.requests == {"foo": True}
mmr.add_request(param="foo", alias="foo")
assert mmr.requests == {"foo": True}
mmr.add_request(param="foo", alias="bar")
assert mmr.requests == {"foo": "bar"}
assert mmr._get_param_names(return_alias=False) == {"foo"}
assert mmr._get_param_names(return_alias=True) == {"bar"}
@config_context(enable_metadata_routing=True)
def test_get_routing_for_object():
class Consumer(BaseEstimator):
__metadata_request__fit = {"prop": None}
def fit(self, X, y=None):
return self # pragma: no cover
assert_request_is_empty(get_routing_for_object(None))
assert_request_is_empty(get_routing_for_object(object()))
mr = MetadataRequest(owner="test")
mr.fit.add_request(param="foo", alias="bar")
mr_factory = get_routing_for_object(mr)
assert_request_is_empty(mr_factory, exclude="fit")
assert mr_factory.fit.requests == {"foo": "bar"}
mr = get_routing_for_object(Consumer())
assert_request_is_empty(mr, exclude="fit")
assert mr.fit.requests == {"prop": None}
@config_context(enable_metadata_routing=True)
def test_metadata_request_consumes_method():
"""Test that MetadataRequest().consumes() method works as expected."""
request = MetadataRequest(owner="test")
assert request.consumes(method="fit", params={"foo"}) == set()
request = MetadataRequest(owner="test")
request.fit.add_request(param="foo", alias=True)
assert request.consumes(method="fit", params={"foo"}) == {"foo"}
request = MetadataRequest(owner="test")
request.fit.add_request(param="foo", alias="bar")
assert request.consumes(method="fit", params={"bar", "foo"}) == {"bar"}
@config_context(enable_metadata_routing=True)
def test_metadata_router_consumes_method():
"""Test that MetadataRouter().consumes method works as expected."""
# having it here instead of parametrizing the test since `set_fit_request`
# is not available while collecting the tests.
cases = [
(
WeightedMetaRegressor(
estimator=ConsumingRegressor().set_fit_request(sample_weight=True)
),
{"sample_weight"},
{"sample_weight"},
),
(
WeightedMetaRegressor(
estimator=ConsumingRegressor().set_fit_request(
sample_weight="my_weights"
)
),
{"my_weights", "sample_weight"},
{"my_weights"},
),
]
for obj, input, output in cases:
assert obj.get_metadata_routing().consumes(method="fit", params=input) == output
@config_context(enable_metadata_routing=True)
def test_metaestimator_warnings():
class WeightedMetaRegressorWarn(WeightedMetaRegressor):
__metadata_request__fit = {"sample_weight": metadata_routing.WARN}
with pytest.warns(
UserWarning, match="Support for .* has recently been added to .* class"
):
WeightedMetaRegressorWarn(
estimator=LinearRegression().set_fit_request(sample_weight=False)
).fit(X, y, sample_weight=my_weights)
@config_context(enable_metadata_routing=True)
def test_estimator_warnings():
class ConsumingRegressorWarn(ConsumingRegressor):
__metadata_request__fit = {"sample_weight": metadata_routing.WARN}
with pytest.warns(
UserWarning, match="Support for .* has recently been added to .* class"
):
MetaRegressor(estimator=ConsumingRegressorWarn()).fit(
X, y, sample_weight=my_weights
)
@config_context(enable_metadata_routing=True)
@pytest.mark.parametrize(
"obj, string",
[
(
MethodMetadataRequest(owner="test", method="fit").add_request(
param="foo", alias="bar"
),
"{'foo': 'bar'}",
),
(
MetadataRequest(owner="test"),
"{}",
),
(
MetadataRouter(owner="test").add(
estimator=ConsumingRegressor(),
method_mapping=MethodMapping().add(caller="predict", callee="predict"),
),
(
"{'estimator': {'mapping': [{'caller': 'predict', 'callee':"
" 'predict'}], 'router': {'fit': {'sample_weight': None, 'metadata':"
" None}, 'partial_fit': {'sample_weight': None, 'metadata': None},"
" 'predict': {'sample_weight': None, 'metadata': None}, 'score':"
" {'sample_weight': None, 'metadata': None}}}}"
),
),
],
)
@config_context(enable_metadata_routing=True)
def test_string_representations(obj, string):
assert str(obj) == string
@pytest.mark.parametrize(
"obj, method, inputs, err_cls, err_msg",
[
(
MethodMapping(),
"add",
{"caller": "fit", "callee": "invalid"},
ValueError,
"Given callee",
),
(
MethodMapping(),
"add",
{"caller": "invalid", "callee": "fit"},
ValueError,
"Given caller",
),
(
MetadataRouter(owner="test"),
"add_self_request",
{"obj": MetadataRouter(owner="test")},
ValueError,
"Given `obj` is neither a `MetadataRequest` nor does it implement",
),
(
ConsumingClassifier(),
"set_fit_request",
{"invalid": True},
TypeError,
"Unexpected args",
),
],
)
@config_context(enable_metadata_routing=True)
def test_validations(obj, method, inputs, err_cls, err_msg):
with pytest.raises(err_cls, match=err_msg):
getattr(obj, method)(**inputs)
@config_context(enable_metadata_routing=True)
def test_methodmapping():
mm = (
MethodMapping()
.add(caller="fit", callee="transform")
.add(caller="fit", callee="fit")
)
mm_list = list(mm)
assert mm_list[0] == ("fit", "transform")
assert mm_list[1] == ("fit", "fit")
mm = MethodMapping()
for method in METHODS:
mm.add(caller=method, callee=method)
assert MethodPair(method, method) in mm._routes
assert len(mm._routes) == len(METHODS)
mm = MethodMapping().add(caller="score", callee="score")
assert repr(mm) == "[{'caller': 'score', 'callee': 'score'}]"
@config_context(enable_metadata_routing=True)
def test_metadatarouter_add_self_request():
# adding a MetadataRequest as `self` adds a copy
request = MetadataRequest(owner="nested")
request.fit.add_request(param="param", alias=True)
router = MetadataRouter(owner="test").add_self_request(request)
assert str(router._self_request) == str(request)
# should be a copy, not the same object
assert router._self_request is not request
# one can add an estimator as self
est = ConsumingRegressor().set_fit_request(sample_weight="my_weights")
router = MetadataRouter(owner="test").add_self_request(obj=est)
assert str(router._self_request) == str(est.get_metadata_routing())
assert router._self_request is not est.get_metadata_routing()
# adding a consumer+router as self should only add the consumer part
est = WeightedMetaRegressor(
estimator=ConsumingRegressor().set_fit_request(sample_weight="nested_weights")
)
router = MetadataRouter(owner="test").add_self_request(obj=est)
# _get_metadata_request() returns the consumer part of the requests
assert str(router._self_request) == str(est._get_metadata_request())
# get_metadata_routing() returns the complete request set, consumer and
# router included.
assert str(router._self_request) != str(est.get_metadata_routing())
# it should be a copy, not the same object
assert router._self_request is not est._get_metadata_request()
@config_context(enable_metadata_routing=True)
def test_metadata_routing_add():
# adding one with a string `method_mapping`
router = MetadataRouter(owner="test").add(
est=ConsumingRegressor().set_fit_request(sample_weight="weights"),
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
assert (
str(router)
== "{'est': {'mapping': [{'caller': 'fit', 'callee': 'fit'}], 'router': {'fit':"
" {'sample_weight': 'weights', 'metadata': None}, 'partial_fit':"
" {'sample_weight': None, 'metadata': None}, 'predict': {'sample_weight':"
" None, 'metadata': None}, 'score': {'sample_weight': None, 'metadata':"
" None}}}}"
)
# adding one with an instance of MethodMapping
router = MetadataRouter(owner="test").add(
method_mapping=MethodMapping().add(caller="fit", callee="score"),
est=ConsumingRegressor().set_score_request(sample_weight=True),
)
assert (
str(router)
== "{'est': {'mapping': [{'caller': 'fit', 'callee': 'score'}], 'router':"
" {'fit': {'sample_weight': None, 'metadata': None}, 'partial_fit':"
" {'sample_weight': None, 'metadata': None}, 'predict': {'sample_weight':"
" None, 'metadata': None}, 'score': {'sample_weight': True, 'metadata':"
" None}}}}"
)
@config_context(enable_metadata_routing=True)
def test_metadata_routing_get_param_names():
router = (
MetadataRouter(owner="test")
.add_self_request(
WeightedMetaRegressor(estimator=ConsumingRegressor()).set_fit_request(
sample_weight="self_weights"
)
)
.add(
trs=ConsumingTransformer().set_fit_request(
sample_weight="transform_weights"
),
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
)
assert (
str(router)
== "{'$self_request': {'fit': {'sample_weight': 'self_weights'}, 'score':"
" {'sample_weight': None}}, 'trs': {'mapping': [{'caller': 'fit', 'callee':"
" 'fit'}], 'router': {'fit': {'sample_weight': 'transform_weights',"
" 'metadata': None}, 'transform': {'sample_weight': None, 'metadata': None},"
" 'inverse_transform': {'sample_weight': None, 'metadata': None}}}}"
)
assert router._get_param_names(
method="fit", return_alias=True, ignore_self_request=False
) == {"transform_weights", "metadata", "self_weights"}
# return_alias=False will return original names for "self"
assert router._get_param_names(
method="fit", return_alias=False, ignore_self_request=False
) == {"sample_weight", "metadata", "transform_weights"}
# ignoring self would remove "sample_weight"
assert router._get_param_names(
method="fit", return_alias=False, ignore_self_request=True
) == {"metadata", "transform_weights"}
# return_alias is ignored when ignore_self_request=True
assert router._get_param_names(
method="fit", return_alias=True, ignore_self_request=True
) == router._get_param_names(
method="fit", return_alias=False, ignore_self_request=True
)
@config_context(enable_metadata_routing=True)
def test_method_generation():
# Test if all required request methods are generated.
# TODO: these test classes can be moved to sklearn.utils._testing once we
# have a better idea of what the commonly used classes are.
class SimpleEstimator(BaseEstimator):
# This class should have no set_{method}_request
def fit(self, X, y):
pass # pragma: no cover
def fit_transform(self, X, y):
pass # pragma: no cover
def fit_predict(self, X, y):
pass # pragma: no cover
def partial_fit(self, X, y):
pass # pragma: no cover
def predict(self, X):
pass # pragma: no cover
def predict_proba(self, X):
pass # pragma: no cover
def predict_log_proba(self, X):
pass # pragma: no cover
def decision_function(self, X):
pass # pragma: no cover
def score(self, X, y):
pass # pragma: no cover
def split(self, X, y=None):
pass # pragma: no cover
def transform(self, X):
pass # pragma: no cover
def inverse_transform(self, X):
pass # pragma: no cover
for method in METHODS:
assert not hasattr(SimpleEstimator(), f"set_{method}_request")
class SimpleEstimator(BaseEstimator):
# This class should have every set_{method}_request
def fit(self, X, y, sample_weight=None):
pass # pragma: no cover
def fit_transform(self, X, y, sample_weight=None):
pass # pragma: no cover
def fit_predict(self, X, y, sample_weight=None):
pass # pragma: no cover
def partial_fit(self, X, y, sample_weight=None):
pass # pragma: no cover
def predict(self, X, sample_weight=None):
pass # pragma: no cover
def predict_proba(self, X, sample_weight=None):
pass # pragma: no cover
def predict_log_proba(self, X, sample_weight=None):
pass # pragma: no cover
def decision_function(self, X, sample_weight=None):
pass # pragma: no cover
def score(self, X, y, sample_weight=None):
pass # pragma: no cover
def split(self, X, y=None, sample_weight=None):
pass # pragma: no cover
def transform(self, X, sample_weight=None):
pass # pragma: no cover
def inverse_transform(self, X, sample_weight=None):
pass # pragma: no cover
# composite methods shouldn't have a corresponding set method.
for method in COMPOSITE_METHODS:
assert not hasattr(SimpleEstimator(), f"set_{method}_request")
# simple methods should have a corresponding set method.
for method in SIMPLE_METHODS:
assert hasattr(SimpleEstimator(), f"set_{method}_request")
@config_context(enable_metadata_routing=True)
def test_composite_methods():
# Test the behavior and the values of methods (composite methods) whose
# request values are a union of requests by other methods (simple methods).
# fit_transform and fit_predict are the only composite methods we have in
# scikit-learn.
class SimpleEstimator(BaseEstimator):
# This class should have every set_{method}_request
def fit(self, X, y, foo=None, bar=None):
pass # pragma: no cover
def predict(self, X, foo=None, bar=None):
pass # pragma: no cover
def transform(self, X, other_param=None):
pass # pragma: no cover
est = SimpleEstimator()
# Since no request is set for fit or predict or transform, the request for
# fit_transform and fit_predict should also be empty.
assert est.get_metadata_routing().fit_transform.requests == {
"bar": None,
"foo": None,
"other_param": None,
}
assert est.get_metadata_routing().fit_predict.requests == {"bar": None, "foo": None}
# setting the request on only one of them should raise an error
est.set_fit_request(foo=True, bar="test")
with pytest.raises(ValueError, match="Conflicting metadata requests for"):
est.get_metadata_routing().fit_predict
# setting the request on the other one should fail if not the same as the
# first method
est.set_predict_request(bar=True)
with pytest.raises(ValueError, match="Conflicting metadata requests for"):
est.get_metadata_routing().fit_predict
# now the requests are consistent and getting the requests for fit_predict
# shouldn't raise.
est.set_predict_request(foo=True, bar="test")
est.get_metadata_routing().fit_predict
# setting the request for a none-overlapping parameter would merge them
# together.
est.set_transform_request(other_param=True)
assert est.get_metadata_routing().fit_transform.requests == {
"bar": "test",
"foo": True,
"other_param": True,
}
@config_context(enable_metadata_routing=True)
def test_no_feature_flag_raises_error():
"""Test that when feature flag disabled, set_{method}_requests raises."""
with config_context(enable_metadata_routing=False):
with pytest.raises(RuntimeError, match="This method is only available"):
ConsumingClassifier().set_fit_request(sample_weight=True)
@config_context(enable_metadata_routing=True)
def test_none_metadata_passed():
"""Test that passing None as metadata when not requested doesn't raise"""
MetaRegressor(estimator=ConsumingRegressor()).fit(X, y, sample_weight=None)
@config_context(enable_metadata_routing=True)
def test_no_metadata_always_works():
"""Test that when no metadata is passed, having a meta-estimator which does
not yet support metadata routing works.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28246
"""
class Estimator(_RoutingNotSupportedMixin, BaseEstimator):
def fit(self, X, y, metadata=None):
return self
# This passes since no metadata is passed.
MetaRegressor(estimator=Estimator()).fit(X, y)
# This fails since metadata is passed but Estimator() does not support it.
with pytest.raises(
NotImplementedError, match="Estimator has not implemented metadata routing yet."
):
MetaRegressor(estimator=Estimator()).fit(X, y, metadata=my_groups)
@config_context(enable_metadata_routing=True)
def test_unsetmetadatapassederror_correct():
"""Test that UnsetMetadataPassedError raises the correct error message when
set_{method}_request is not set in nested cases."""
weighted_meta = WeightedMetaClassifier(estimator=ConsumingClassifier())
pipe = SimplePipeline([weighted_meta])
msg = re.escape(
"[metadata] are passed but are not explicitly set as requested or not requested"
" for ConsumingClassifier.fit, which is used within WeightedMetaClassifier.fit."
" Call `ConsumingClassifier.set_fit_request({metadata}=True/False)` for each"
" metadata you want to request/ignore."
)
with pytest.raises(UnsetMetadataPassedError, match=msg):
pipe.fit(X, y, metadata="blah")
@config_context(enable_metadata_routing=True)
def test_unsetmetadatapassederror_correct_for_composite_methods():
"""Test that UnsetMetadataPassedError raises the correct error message when
composite metadata request methods are not set in nested cases."""
consuming_transformer = ConsumingTransformer()
pipe = Pipeline([("consuming_transformer", consuming_transformer)])
msg = re.escape(
"[metadata] are passed but are not explicitly set as requested or not requested"
" for ConsumingTransformer.fit_transform, which is used within"
" Pipeline.fit_transform. Call"
" `ConsumingTransformer.set_fit_request({metadata}=True/False)"
".set_transform_request({metadata}=True/False)`"
" for each metadata you want to request/ignore."
)
with pytest.raises(UnsetMetadataPassedError, match=msg):
pipe.fit_transform(X, y, metadata="blah")
@config_context(enable_metadata_routing=True)
def test_unbound_set_methods_work():
"""Tests that if the set_{method}_request is unbound, it still works.
Also test that passing positional arguments to the set_{method}_request fails
with the right TypeError message.
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/28632
"""
class A(BaseEstimator):
def fit(self, X, y, sample_weight=None):
return self
error_message = re.escape(
"set_fit_request() takes 0 positional argument but 1 were given"
)
# Test positional arguments error before making the descriptor method unbound.
with pytest.raises(TypeError, match=error_message):
A().set_fit_request(True)
# This somehow makes the descriptor method unbound, which results in the `instance`
# argument being None, and instead `self` being passed as a positional argument
# to the descriptor method.
A.set_fit_request = A.set_fit_request
# This should pass as usual
A().set_fit_request(sample_weight=True)
# Test positional arguments error after making the descriptor method unbound.
with pytest.raises(TypeError, match=error_message):
A().set_fit_request(True)
| SimplePipeline |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor16.py | {
"start": 178,
"end": 292
} | class ____(A):
def __new__(cls) -> A:
return A()
def __init__(self, a: int) -> None:
pass
| B |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/saveable_compat_test.py | {
"start": 2892,
"end": 6532
} | class ____(test.TestCase):
def test_checkpoint(self):
saveable_compat.force_checkpoint_conversion()
table_module = generate_checkpoint.TableModule()
table_module.lookup_table.insert(3, 9)
ckpt = checkpoint.Checkpoint(table_module)
checkpoint_directory = self.get_temp_dir()
checkpoint_path = os.path.join(checkpoint_directory, "ckpt")
ckpt.write(checkpoint_path)
new_table_module = generate_checkpoint.TableModule()
self.assertEqual(-1, self.evaluate(new_table_module.lookup_table.lookup(3)))
new_ckpt = checkpoint.Checkpoint(new_table_module)
new_ckpt.read(checkpoint_path).assert_consumed()
self.assertEqual(9, self.evaluate(new_table_module.lookup_table.lookup(3)))
def test_backwards_compatibility(self):
saveable_compat.force_checkpoint_conversion()
table_module = generate_checkpoint.TableModule()
table_module.lookup_table.insert(3, 9)
self.assertEqual(9, self.evaluate(table_module.lookup_table.lookup(3)))
ckpt = checkpoint.Checkpoint(table_module)
ckpt.read(_LEGACY_TABLE_CHECKPOINT_PATH).assert_consumed()
self.assertEqual(-1, self.evaluate(table_module.lookup_table.lookup(3)))
self.assertEqual(4, self.evaluate(table_module.lookup_table.lookup(2)))
def test_forward_compatibility(self):
class _MultiSpecSaveable(saveable_object.SaveableObject):
def __init__(self, obj, name):
self.obj = obj
specs = [
saveable_object.SaveSpec(obj.a, "", name + "-a"),
saveable_object.SaveSpec(obj.b, "", name + "-b")]
super(_MultiSpecSaveable, self).__init__(None, specs, name)
def restore(self, restored_tensors, restored_shapes):
del restored_shapes # Unused.
self.obj.a.assign(restored_tensors[0])
self.obj.b.assign(restored_tensors[1])
class DeprecatedTrackable(base.Trackable):
def __init__(self):
self.a = variables.Variable(1.0)
self.b = variables.Variable(2.0)
def _gather_saveables_for_checkpoint(self):
return {"foo": lambda name: _MultiSpecSaveable(self, name)}
@saveable_compat.legacy_saveable_name("foo")
class NewTrackable(base.Trackable):
def __init__(self):
self.a = variables.Variable(3.0)
self.b = variables.Variable(4.0)
def _serialize_to_tensors(self):
return {"-a": self.a, "-b": self.b}
def _restore_from_tensors(self, restored_tensors):
return control_flow_ops.group(
self.a.assign(restored_tensors["-a"]),
self.b.assign(restored_tensors["-b"]))
new = NewTrackable()
# Test with the checkpoint conversion flag disabled (normal compatibility).
saveable_compat.force_checkpoint_conversion(False)
checkpoint_path = os.path.join(self.get_temp_dir(), "ckpt")
checkpoint.Checkpoint(new).write(checkpoint_path)
dep = DeprecatedTrackable()
checkpoint.Checkpoint(dep).read(checkpoint_path).assert_consumed()
self.assertEqual(3, self.evaluate(dep.a))
self.assertEqual(4, self.evaluate(dep.b))
# Now test with the checkpoint conversion flag enabled (forward compat).
# The deprecated object will try to load from the new checkpoint.
saveable_compat.force_checkpoint_conversion()
checkpoint_path = os.path.join(self.get_temp_dir(), "ckpt2")
checkpoint.Checkpoint(new).write(checkpoint_path)
dep = DeprecatedTrackable()
checkpoint.Checkpoint(dep).read(checkpoint_path).assert_consumed()
self.assertEqual(3, self.evaluate(dep.a))
self.assertEqual(4, self.evaluate(dep.b))
if __name__ == "__main__":
test.main()
| TestForceCheckpointConversionFlag |
python | pandas-dev__pandas | pandas/core/arrays/sparse/accessor.py | {
"start": 577,
"end": 954
} | class ____:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None) -> None:
self._parent = data
self._validate(data)
def _validate(self, data) -> None:
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
| BaseAccessor |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 192012,
"end": 193639
} | class ____:
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
| TestStringCompare |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 19458,
"end": 19540
} | class ____(PermissionInstanceView):
permission_classes = (BasicPerm,)
| DeniedView |
python | pexpect__pexpect | tests/test_winsize.py | {
"start": 1035,
"end": 2377
} | class ____(PexpectTestCase.PexpectTestCase):
def test_initial_winsize(self):
""" Assert initial window dimension size (24, 80). """
p = pexpect.spawn('{self.PYTHONBIN} sigwinch_report.py'
.format(self=self), timeout=3)
# default size by PtyProcess class is 24 rows by 80 columns.
p.expect_exact('Initial Size: (24, 80)')
p.close()
def test_initial_winsize_by_dimension(self):
""" Assert user-parameter window dimension size is initial. """
p = pexpect.spawn('{self.PYTHONBIN} sigwinch_report.py'
.format(self=self), timeout=3,
dimensions=(40, 100))
p.expect_exact('Initial Size: (40, 100)')
p.close()
def test_setwinsize(self):
""" Ensure method .setwinsize() sends signal caught by child. """
p = pexpect.spawn('{self.PYTHONBIN} sigwinch_report.py'
.format(self=self), timeout=3)
# Note that we must await the installation of the child process'
# signal handler,
p.expect_exact('READY')
p.setwinsize(19, 84)
p.expect_exact('SIGWINCH: (19, 84)')
p.close()
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestCaseWinsize)
| TestCaseWinsize |
python | davidhalter__jedi | test/completion/flow_analysis.py | {
"start": 3590,
"end": 4266
} | class ____():
pass
if X:
a = 1
else:
a = ''
#? int()
a
# -----------------
# Recursion issues
# -----------------
def possible_recursion_error(filename):
if filename == 'a':
return filename
# It seems like without the brackets there wouldn't be a RecursionError.
elif type(filename) == str:
return filename
if NOT_DEFINED:
s = str()
else:
s = str()
#? str()
possible_recursion_error(s)
# -----------------
# In combination with imports
# -----------------
from import_tree import flow_import
if 1 == flow_import.env:
a = 1
elif 2 == flow_import.env:
a = ''
elif 3 == flow_import.env:
a = 1.0
#? int() str()
a
| X |
python | google__pytype | pytype/compare_test.py | {
"start": 14586,
"end": 14805
} | class ____(CompareTestBase):
def test_compatible_with(self):
cls = abstract.InterpreterClass("X", [], {}, None, None, (), self._ctx)
self.assertTruthy(cls)
if __name__ == "__main__":
unittest.main()
| ClassTest |
python | eth-brownie__brownie | brownie/convert/datatypes.py | {
"start": 7060,
"end": 8378
} | class ____(str):
"""String subclass that raises TypeError when compared to a non-address."""
def __new__(cls, value: Any) -> Self:
converted_value: HexStr
if isinstance(value, str):
converted_value = value # type: ignore [assignment]
elif isinstance(value, bytes):
converted_value = bytes_to_hexstring(value)
else:
converted_value = str(value) # type: ignore [assignment]
converted_value = add_0x_prefix(converted_value)
try:
converted_value = to_checksum_address(converted_value)
except ValueError:
raise ValueError(f"{value!r} is not a valid ETH address") from None
return str.__new__(cls, converted_value) # type: ignore
def __hash__(self) -> int:
return super().__hash__()
def __eq__(self, other: Any) -> bool:
return _address_compare(self, other)
def __ne__(self, other: Any) -> bool:
return not _address_compare(self, other)
def _address_compare(a: str, b: Any) -> bool:
bstr = str(b)
if not bstr.startswith("0x") or not is_hex(bstr) or len(bstr) != 42:
raise TypeError(f"Invalid type for comparison: '{bstr}' is not a valid address")
return a.lower() == bstr.lower()
@final
@mypyc_attr(native_class=False)
| EthAddress |
python | dask__distributed | distributed/worker_state_machine.py | {
"start": 16225,
"end": 16511
} | class ____(SendMessageToScheduler):
"""Worker->Scheduler response to ``{op: steal-request}``
See also
--------
StealRequestEvent
"""
op = "steal-response"
__slots__ = ("key", "state")
key: Key
state: TaskStateState | None
@dataclass
| StealResponseMsg |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_partition_backfill.py | {
"start": 13602,
"end": 68161
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_launch_full_pipeline_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numCancelable"] == 2
assert result.data["partitionBackfillOrError"]["hasCancelPermission"] is True
assert result.data["partitionBackfillOrError"]["hasResumePermission"] is True
assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2
def test_get_partition_backfills(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
# launch a backfill for this partition set
launch_result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
}
},
)
backfill_id = launch_result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
GET_PARTITION_BACKFILLS_QUERY,
variables={
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
)
assert not result.errors
assert result.data
assert result.data["partitionSetOrError"]["__typename"] == "PartitionSet"
assert len(result.data["partitionSetOrError"]["backfills"]) == 1
assert result.data["partitionSetOrError"]["backfills"][0]["id"] == backfill_id
assert result.data["partitionSetOrError"]["backfills"][0]["isAssetBackfill"] is False
def test_launch_partial_backfill(self, graphql_context):
# execute a full pipeline, without the failure environment variable
repository_selector = infer_repository_selector(graphql_context)
partition_set_selector = {
"repositorySelector": repository_selector,
"partitionSetName": "chained_failure_job_partition_set",
}
# reexecute a partial pipeline
partial_steps = ["after_failure"]
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": partition_set_selector,
"partitionNames": ["2", "3"],
"reexecutionSteps": partial_steps,
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numCancelable"] == 2
assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2
assert result.data["partitionBackfillOrError"]["reexecutionSteps"] == ["after_failure"]
def test_cancel_job_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "hanging_partitioned_job_partition_set",
},
"partitionNames": ["1", "2"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
# Update backfill data to update the partition checkpoint, but manually launch the run
# since launching the run via the backfill iteration loop will cause test process will hang forever.
backfill = graphql_context.instance.get_backfill(backfill_id)
partition_to_run = "1"
graphql_context.instance.update_backfill(
backfill.with_partition_checkpoint(partition_to_run)
)
# Launch the run that runs forever
selector = infer_job_selector(graphql_context, "hanging_partitioned_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {
"tags": [
{"key": "dagster/partition", "value": partition_to_run},
{"key": BACKFILL_ID_TAG, "value": backfill_id},
]
},
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
# ensure the execution has happened
start = time.time()
while not os.path.exists(path):
time.sleep(0.1)
assert time.time() - start < 60, "timed out waiting for file"
runs = graphql_context.instance.get_runs(RunsFilter(tags={BACKFILL_ID_TAG: backfill_id}))
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.STARTED
result = execute_dagster_graphql(
graphql_context,
CANCEL_BACKFILL_MUTATION,
variables={"backfillId": backfill_id},
)
assert result.data
assert result.data["cancelPartitionBackfill"]["__typename"] == "CancelBackfillSuccess"
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "CANCELING"
start = time.time()
while (
graphql_context.instance.get_backfill(backfill_id).status != BulkActionStatus.CANCELED
):
_execute_job_backfill_iteration_with_side_effects(graphql_context, backfill_id)
assert time.time() - start < 60, "timed out waiting for backfill to cancel"
runs = graphql_context.instance.get_runs(RunsFilter(tags={BACKFILL_ID_TAG: backfill_id}))
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.CANCELED
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "CANCELED"
def test_cancel_then_retry_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
CANCEL_BACKFILL_MUTATION,
variables={"backfillId": backfill_id},
)
assert result.data
assert result.data["cancelPartitionBackfill"]["__typename"] == "CancelBackfillSuccess"
# run the backfill iteration until the backfill is canceled
while (
graphql_context.instance.get_backfill(backfill_id).status != BulkActionStatus.CANCELED
):
_execute_job_backfill_iteration_with_side_effects(graphql_context, backfill_id)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "CANCELED"
result = execute_dagster_graphql(
graphql_context,
RETRY_BACKFILL_MUTATION,
variables={
"reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"},
},
)
assert not result.errors
assert result.data
assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
retried_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"]
retried_backfill = graphql_context.instance.get_backfill(retried_backfill_id)
assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id
assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": retried_backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numCancelable"] == 2
assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2
assert result.data["partitionBackfillOrError"]["fromFailure"]
def test_failing_job_backfill_cancels_runs(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "hanging_partitioned_job_partition_set",
},
"partitionNames": ["1", "2"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
# Update backfill data to update the partition checkpoint, but manually launch the run
# since launching the run via the backfill iteration loop will cause test process will hang forever.
backfill = graphql_context.instance.get_backfill(backfill_id)
partition_to_run = "1"
graphql_context.instance.update_backfill(
backfill.with_partition_checkpoint(partition_to_run)
)
# Launch the run that runs forever
selector = infer_job_selector(graphql_context, "hanging_partitioned_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {
"tags": [
{"key": "dagster/partition", "value": partition_to_run},
{"key": BACKFILL_ID_TAG, "value": backfill_id},
]
},
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
# ensure the execution has happened
start = time.time()
while not os.path.exists(path):
time.sleep(0.1)
assert time.time() - start < 60, "timed out waiting for file"
runs = graphql_context.instance.get_runs(RunsFilter(tags={BACKFILL_ID_TAG: backfill_id}))
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.STARTED
# simulate the backfill failing for some reason and being marked FAILING
updated_backfill = graphql_context.instance.get_backfill(backfill_id).with_status(
BulkActionStatus.FAILING
)
graphql_context.instance.update_backfill(updated_backfill)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "FAILING"
start = time.time()
while graphql_context.instance.get_backfill(backfill_id).status != BulkActionStatus.FAILED:
_execute_job_backfill_iteration_with_side_effects(graphql_context, backfill_id)
assert time.time() - start < 60, "timed out waiting for backfill to fail"
runs = graphql_context.instance.get_runs(RunsFilter(tags={BACKFILL_ID_TAG: backfill_id}))
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.CANCELED
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "FAILED"
def test_cancel_asset_backfill(self, graphql_context):
asset_key = AssetKey("hanging_partition_asset")
partitions = ["a"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [asset_key.to_graphql_input()],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
# Update asset backfill data to contain requested partition, but does not execute side effects,
# since launching the run will cause test process will hang forever.
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
# Launch the run that runs forever
selector = infer_job_selector(graphql_context, "hanging_partition_asset_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {
"tags": [
{"key": "dagster/partition", "value": "a"},
{"key": BACKFILL_ID_TAG, "value": backfill_id},
]
},
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
# ensure the execution has happened
start = time.time()
while not os.path.exists(path):
time.sleep(0.1)
assert time.time() - start < 60, "timed out waiting for file"
result = execute_dagster_graphql(
graphql_context,
CANCEL_BACKFILL_MUTATION,
variables={"backfillId": backfill_id},
)
assert result.data
assert result.data["cancelPartitionBackfill"]["__typename"] == "CancelBackfillSuccess"
start = time.time()
while (
graphql_context.instance.get_backfill(backfill_id).status
!= BulkActionStatus.CANCELED
):
_execute_backfill_iteration_with_side_effects(graphql_context, backfill_id)
assert time.time() - start < 60, "timed out waiting for backfill to cancel"
runs = graphql_context.instance.get_runs(
RunsFilter(tags={BACKFILL_ID_TAG: backfill_id})
)
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.CANCELED
def test_cancel_then_retry_asset_backfill(self, graphql_context):
asset_key = AssetKey("hanging_partition_asset")
partitions = ["a"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [asset_key.to_graphql_input()],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
# Update asset backfill data to contain requested partition, but does not execute side effects,
# since launching the run will cause test process will hang forever.
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
# Launch the run that runs forever
selector = infer_job_selector(graphql_context, "hanging_partition_asset_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {
"tags": [
{"key": "dagster/partition", "value": "a"},
{"key": BACKFILL_ID_TAG, "value": backfill_id},
]
},
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
# ensure the execution has happened
start = time.time()
while not os.path.exists(path):
time.sleep(0.1)
assert time.time() - start < 60, "timed out waiting for file"
result = execute_dagster_graphql(
graphql_context,
CANCEL_BACKFILL_MUTATION,
variables={"backfillId": backfill_id},
)
assert result.data
assert result.data["cancelPartitionBackfill"]["__typename"] == "CancelBackfillSuccess"
while (
graphql_context.instance.get_backfill(backfill_id).status
!= BulkActionStatus.CANCELED
):
_execute_backfill_iteration_with_side_effects(graphql_context, backfill_id)
runs = graphql_context.instance.get_runs(
RunsFilter(tags={BACKFILL_ID_TAG: backfill_id})
)
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.CANCELED
result = execute_dagster_graphql(
graphql_context,
RETRY_BACKFILL_MUTATION,
variables={
"reexecutionParams": {"parentRunId": backfill_id, "strategy": "FROM_FAILURE"},
},
)
assert not result.errors
assert result.data
assert result.data["reexecutePartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
retry_backfill_id = result.data["reexecutePartitionBackfill"]["backfillId"]
first_backfill = graphql_context.instance.get_backfill(backfill_id)
retried_backfill = graphql_context.instance.get_backfill(retry_backfill_id)
# no runs were successful for the first backfill, so the retry target should be the same
# as the first backfill
assert (
first_backfill.asset_backfill_data.target_subset
== retried_backfill.asset_backfill_data.target_subset
)
assert retried_backfill.tags.get(PARENT_BACKFILL_ID_TAG) == backfill_id
assert retried_backfill.tags.get(ROOT_BACKFILL_ID_TAG) == backfill_id
def test_failing_asset_backfill_cancels_runs(self, graphql_context):
asset_key = AssetKey("hanging_partition_asset")
partitions = ["a"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [asset_key.to_graphql_input()],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
# Update asset backfill data to contain requested partition, but does not execute side effects,
# since launching the run will cause test process will hang forever.
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
# Launch the run that runs forever
selector = infer_job_selector(graphql_context, "hanging_partition_asset_job")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {
"resources": {"hanging_asset_resource": {"config": {"file": path}}}
},
"executionMetadata": {
"tags": [
{"key": "dagster/partition", "value": "a"},
{"key": BACKFILL_ID_TAG, "value": backfill_id},
]
},
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPipelineExecution"]["__typename"] == "LaunchRunSuccess"
# ensure the execution has happened
start = time.time()
while not os.path.exists(path):
time.sleep(0.1)
assert time.time() - start < 60, "timed out waiting for file"
# simulate the backfill failing for some reason and being marked FAILING
updated_backfill = graphql_context.instance.get_backfill(backfill_id).with_status(
BulkActionStatus.FAILING
)
graphql_context.instance.update_backfill(updated_backfill)
start = time.time()
while (
graphql_context.instance.get_backfill(backfill_id).status != BulkActionStatus.FAILED
):
_execute_backfill_iteration_with_side_effects(graphql_context, backfill_id)
assert time.time() - start < 60, "timed out waiting for backfill to fail"
runs = graphql_context.instance.get_runs(
RunsFilter(tags={BACKFILL_ID_TAG: backfill_id})
)
assert len(runs) == 1
assert runs[0].status == DagsterRunStatus.CANCELED
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "FAILED"
def test_resume_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numCancelable"] == 2
assert len(result.data["partitionBackfillOrError"]["partitionNames"]) == 2
# manually mark as failed
backfill = graphql_context.instance.get_backfill(backfill_id)
graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.FAILED))
result = execute_dagster_graphql(
graphql_context,
RESUME_BACKFILL_MUTATION,
variables={"backfillId": backfill_id},
)
assert result.data
assert result.data["resumePartitionBackfill"]["__typename"] == "ResumeBackfillSuccess"
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
def test_backfill_run_stats(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3", "4", "5"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
_seed_runs(
graphql_context,
[
(DagsterRunStatus.SUCCESS, "5"),
(DagsterRunStatus.STARTED, "2"),
(DagsterRunStatus.STARTED, "3"),
(DagsterRunStatus.STARTED, "4"),
(DagsterRunStatus.STARTED, "5"),
(DagsterRunStatus.CANCELED, "2"),
(DagsterRunStatus.FAILURE, "3"),
(DagsterRunStatus.SUCCESS, "4"),
],
backfill_id,
)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numPartitions"] == 4
run_stats = _get_run_stats(
result.data["partitionBackfillOrError"]["partitionStatuses"]["results"]
)
assert run_stats.get("total") == 4
assert run_stats.get("queued") == 0
assert run_stats.get("in_progress") == 1
assert run_stats.get("success") == 1
assert run_stats.get("failure") == 1
assert run_stats.get("canceled") == 1
backfill = graphql_context.instance.get_backfill(backfill_id)
# Artificially mark the backfill as complete - verify run status is INCOMPLETE until the runs all succeed
graphql_context.instance.update_backfill(
backfill.with_status(BulkActionStatus.COMPLETED_SUCCESS)
)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert result.data["partitionBackfillOrError"]["status"] == "COMPLETED_SUCCESS"
def test_asset_job_backfill_run_stats(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_asset_job_partition_set",
},
"partitionNames": ["2", "3", "4", "5"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
_seed_runs(
graphql_context,
[
(DagsterRunStatus.SUCCESS, PartitionKeyRange("2", "5")),
],
backfill_id,
)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numPartitions"] == 4
run_stats = _get_run_stats(
result.data["partitionBackfillOrError"]["partitionStatuses"]["results"]
)
assert run_stats.get("total") == 4
assert run_stats.get("success") == 4
def test_asset_backfill_stats_in_topological_order(self, graphql_context):
asset_key_paths_in_topo_order = [
["upstream_static_partitioned_asset"],
["middle_static_partitioned_asset_1"],
["middle_static_partitioned_asset_2"],
["downstream_static_partitioned_asset"],
]
partitions = ["a", "b", "c", "d", "e", "f"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [
AssetKey(path).to_graphql_input() for path in asset_key_paths_in_topo_order
],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
BACKFILL_STATUS_BY_ASSET,
variables={"backfillId": backfill_id},
)
asset_status_counts = result.data["partitionBackfillOrError"]["assetBackfillData"][
"assetBackfillStatuses"
]
assert len(asset_status_counts) == 4
for i, path in enumerate(asset_key_paths_in_topo_order):
assert asset_status_counts[i]["assetKey"]["path"] == path
def test_asset_backfill_partition_stats(self, graphql_context):
asset_key = AssetKey("upstream_static_partitioned_asset")
partitions = ["a", "b", "c", "d", "e", "f"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [asset_key.to_graphql_input()],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
for partition, status in [
("a", DagsterRunStatus.SUCCESS),
("b", DagsterRunStatus.FAILURE),
("d", DagsterRunStatus.SUCCESS),
("e", DagsterRunStatus.SUCCESS),
("f", DagsterRunStatus.FAILURE),
]:
_mock_asset_backfill_runs(graphql_context, asset_key, backfill_id, status, partition)
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
result = execute_dagster_graphql(
graphql_context,
BACKFILL_STATUS_BY_ASSET,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
backfill_data = result.data["partitionBackfillOrError"]["assetBackfillData"]
assert backfill_data["rootTargetedPartitions"]["ranges"] is None
assert set(backfill_data["rootTargetedPartitions"]["partitionKeys"]) == set(partitions)
asset_partition_status_counts = backfill_data["assetBackfillStatuses"]
assert len(asset_partition_status_counts) == 1
assert asset_partition_status_counts[0]["assetKey"]["path"] == [
"upstream_static_partitioned_asset"
]
assert asset_partition_status_counts[0]["numPartitionsTargeted"] == 6
assert asset_partition_status_counts[0]["numPartitionsInProgress"] == 1
assert asset_partition_status_counts[0]["numPartitionsMaterialized"] == 3
assert asset_partition_status_counts[0]["numPartitionsFailed"] == 2
def test_asset_backfill_status_with_upstream_failure(self, graphql_context):
asset_keys = [
AssetKey("unpartitioned_upstream_of_partitioned"),
AssetKey("upstream_daily_partitioned_asset"),
AssetKey("downstream_weekly_partitioned_asset"),
]
partitions = ["2023-01-09"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [asset_key.to_graphql_input() for asset_key in asset_keys],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
_mock_asset_backfill_runs(
graphql_context,
AssetKey("unpartitioned_upstream_of_partitioned"),
backfill_id,
DagsterRunStatus.SUCCESS,
None,
)
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
_mock_asset_backfill_runs(
graphql_context,
AssetKey("upstream_daily_partitioned_asset"),
backfill_id,
DagsterRunStatus.FAILURE,
"2023-01-09",
)
_execute_asset_backfill_iteration_no_side_effects(graphql_context, backfill_id)
result = execute_dagster_graphql(
graphql_context,
BACKFILL_STATUS_BY_ASSET,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
backfill_data = result.data["partitionBackfillOrError"]["assetBackfillData"]
assert backfill_data["rootTargetedPartitions"]["ranges"] == [
{"start": "2023-01-09", "end": "2023-01-09"}
]
asset_statuses = backfill_data["assetBackfillStatuses"]
assert len(asset_statuses) == 3
assert asset_statuses[0]["assetKey"]["path"] == ["unpartitioned_upstream_of_partitioned"]
assert asset_statuses[0]["inProgress"] is False
assert asset_statuses[0]["materialized"] is True
assert asset_statuses[0]["failed"] is False
assert asset_statuses[1]["assetKey"]["path"] == ["upstream_daily_partitioned_asset"]
assert asset_statuses[1]["numPartitionsTargeted"] == 1
assert asset_statuses[1]["numPartitionsInProgress"] == 0
assert asset_statuses[1]["numPartitionsMaterialized"] == 0
assert asset_statuses[1]["numPartitionsFailed"] == 1
assert asset_statuses[2]["assetKey"]["path"] == ["downstream_weekly_partitioned_asset"]
assert asset_statuses[2]["numPartitionsTargeted"] == 1
assert asset_statuses[2]["numPartitionsInProgress"] == 0
assert asset_statuses[2]["numPartitionsMaterialized"] == 0
assert asset_statuses[2]["numPartitionsFailed"] == 1
def test_backfill_run_completed(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3", "4", "5"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
backfill = graphql_context.instance.get_backfill(backfill_id)
graphql_context.instance.update_backfill(
backfill.with_status(BulkActionStatus.COMPLETED_SUCCESS)
)
_seed_runs(
graphql_context,
[
(DagsterRunStatus.SUCCESS, "2"),
(DagsterRunStatus.SUCCESS, "3"),
(DagsterRunStatus.SUCCESS, "4"),
(DagsterRunStatus.SUCCESS, "5"),
],
backfill_id,
)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "COMPLETED_SUCCESS"
assert result.data["partitionBackfillOrError"]["numPartitions"] == 4
run_stats = _get_run_stats(
result.data["partitionBackfillOrError"]["partitionStatuses"]["results"]
)
assert run_stats.get("total") == 4
assert run_stats.get("queued") == 0
assert run_stats.get("in_progress") == 0
assert run_stats.get("success") == 4
assert run_stats.get("failure") == 0
def test_backfill_run_incomplete(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3", "4", "5"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
backfill = graphql_context.instance.get_backfill(backfill_id)
graphql_context.instance.update_backfill(
backfill.with_status(BulkActionStatus.COMPLETED_FAILED)
)
_seed_runs(
graphql_context,
[
(DagsterRunStatus.SUCCESS, "2"),
(DagsterRunStatus.SUCCESS, "3"),
(DagsterRunStatus.STARTED, "4"),
(DagsterRunStatus.CANCELED, "5"),
],
backfill_id,
)
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "COMPLETED_FAILED"
assert result.data["partitionBackfillOrError"]["numPartitions"] == 4
assert len(result.data["partitionBackfillOrError"]["cancelableRuns"]) == 1
run_stats = _get_run_stats(
result.data["partitionBackfillOrError"]["partitionStatuses"]["results"]
)
assert run_stats.get("total") == 4
assert run_stats.get("queued") == 0
assert run_stats.get("in_progress") == 1
assert run_stats.get("success") == 2
assert run_stats.get("failure") == 0
assert run_stats.get("canceled") == 1
def test_fetch_user_tag_from_backfill(self, graphql_context):
user_email = "user123@abc.com"
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
"tags": [{"key": "user", "value": user_email}],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["id"] == backfill_id
assert result.data["partitionBackfillOrError"]["user"] == user_email
def test_set_title_and_description_for_backfill(self, graphql_context):
title = "Test backfill"
description = "This is a test backfill"
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
"title": title,
"description": description,
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context,
PARTITION_PROGRESS_QUERY,
variables={"backfillId": backfill_id},
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["id"] == backfill_id
assert result.data["partitionBackfillOrError"]["title"] == title
assert result.data["partitionBackfillOrError"]["description"] == description
def test_job_name_correctly_set(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
stored_backfill = graphql_context.instance.get_backfill(backfill_id)
assert stored_backfill.job_name == "integers"
def test_job_name_correctly_set_for_asset_backfills(self, graphql_context):
asset_keys = [
AssetKey("unpartitioned_upstream_of_partitioned"),
AssetKey("upstream_daily_partitioned_asset"),
AssetKey("downstream_weekly_partitioned_asset"),
]
partitions = ["2023-01-09"]
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"partitionNames": partitions,
"assetSelection": [asset_key.to_graphql_input() for asset_key in asset_keys],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
stored_backfill = graphql_context.instance.get_backfill(backfill_id)
assert stored_backfill.job_name is None
def test_set_title_and_description_for_backfill_invalid_title(self, graphql_context):
title = "Title with invalid characters * %"
description = "This is a test backfill"
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["2", "3"],
"title": title,
"description": description,
}
},
)
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "PythonError"
assert (
'"Title with invalid characters * %" is not a valid title in Dagster'
in result.data["launchPartitionBackfill"]["message"]
)
def test_asset_job_backfill_with_nonexistent_partition_key(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
# launch a backfill for this partition set
launch_result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integers_partition_set",
},
"partitionNames": ["1", "nonexistent1", "nonexistent2"],
}
},
)
assert (
launch_result.data["launchPartitionBackfill"]["__typename"]
== "PartitionKeysNotFoundError"
)
assert (
"Partition keys `['nonexistent1', 'nonexistent2']` could not be found"
in launch_result.data["launchPartitionBackfill"]["message"]
)
| TestDaemonPartitionBackfill |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 9367,
"end": 9627
} | class ____(ShopifyStream):
"""
The location API does not support any form of filtering.
https://shopify.dev/api/admin-rest/2021-07/resources/location
Therefore, only FULL_REFRESH mode is supported.
"""
data_field = "locations"
| Locations |
python | h5py__h5py | h5py/tests/test_attrs.py | {
"start": 8770,
"end": 9652
} | class ____(BaseAttrs):
def test_datatype(self):
name = make_name()
self.f[name] = np.dtype('f')
dt = self.f[name]
self.assertEqual(list(dt.attrs.keys()), [])
dt.attrs.create('a', 4.0)
self.assertEqual(list(dt.attrs.keys()), ['a'])
self.assertEqual(list(dt.attrs.values()), [4.0])
def test_python_int_uint64(writable_file):
f = writable_file
name = make_name()
data = [np.iinfo(np.int64).max, np.iinfo(np.int64).max + 1]
# Check creating a new attribute
f.attrs.create(name, data, dtype=np.uint64)
assert f.attrs[name].dtype == np.dtype(np.uint64)
np.testing.assert_array_equal(f.attrs[name], np.array(data, dtype=np.uint64))
# Check modifying an existing attribute
f.attrs.modify(name, data)
np.testing.assert_array_equal(f.attrs[name], np.array(data, dtype=np.uint64))
| TestDatatype |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_highlight.py | {
"start": 10211,
"end": 11484
} | class ____(util.MdCase):
"""Test no Pygments with custom line number class."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'use_pygments': False,
'linenums_class': 'line-numbers',
'linenums': True
}
}
def test_no_pygments_linenums_custom_class(self):
"""Test with no Pygments and line numbers."""
self.check_markdown(
r'''
Text
import test
test.test()
Text
''',
r'''
<p>Text</p>
<pre class="highlight"><code>import test
test.test()</code></pre>
<p>Text</p>
''',
True
)
def test_no_pygments_linenums_custom_class_fences(self):
"""Test with no Pygments and line numbers in fences."""
self.check_markdown(
r'''
```python
import test
test.test()
```
''',
r'''
<pre class="highlight"><code class="language-python">import test
test.test()</code></pre>
''',
True
)
| TestNoPygmentsCustomLineClass |
python | PyCQA__pylint | tests/functional/g/generic_alias/generic_alias_collections.py | {
"start": 1903,
"end": 2128
} | class ____(collections.abc.Collection[int]): # [abstract-method,abstract-method,abstract-method] # __contains__, __iter__, __len__
pass
# No implementation required for 'builtins' and 'collections' types
| DerivedCollection |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-reach-city-with-discounts.py | {
"start": 223,
"end": 1364
} | class ____(object):
def minimumCost(self, n, highways, discounts):
"""
:type n: int
:type highways: List[List[int]]
:type discounts: int
:rtype: int
"""
adj = [[] for _ in xrange(n)]
for u, v, w in highways:
adj[u].append((v, w))
adj[v].append((u, w))
src, dst = 0, n-1
best = collections.defaultdict(lambda: collections.defaultdict(lambda: float("inf")))
best[src][discounts] = 0
min_heap = [(0, src, discounts)]
while min_heap:
result, u, k = heapq.heappop(min_heap)
if best[u][k] < result:
continue
if u == dst:
return result
for v, w in adj[u]:
if result+w < best[v][k]:
best[v][k] = result+w
heapq.heappush(min_heap, (result+w, v, k))
if k > 0 and result+w//2 < best[v][k-1]:
best[v][k-1] = result+w//2
heapq.heappush(min_heap, (result+w//2, v, k-1))
return -1
| Solution |
python | joerick__pyinstrument | pyinstrument/__main__.py | {
"start": 20693,
"end": 20898
} | class ____:
def __init__(self, value: str, remaining_args: list[str]):
self.value = value
self.remaining_args = remaining_args
if __name__ == "__main__":
main()
| ValueWithRemainingArgs |
python | scikit-learn__scikit-learn | sklearn/exceptions.py | {
"start": 466,
"end": 1167
} | class ____(ValueError):
"""Exception class to raise if a metadata is passed which is not explicitly \
requested (metadata=True) or not requested (metadata=False).
.. versionadded:: 1.3
Parameters
----------
message : str
The message
unrequested_params : dict
A dictionary of parameters and their values which are provided but not
requested.
routed_params : dict
A dictionary of routed parameters.
"""
def __init__(self, *, message, unrequested_params, routed_params):
super().__init__(message)
self.unrequested_params = unrequested_params
self.routed_params = routed_params
| UnsetMetadataPassedError |
python | astropy__astropy | astropy/modeling/tests/test_quantities_parameters.py | {
"start": 638,
"end": 12123
} | class ____(Fittable1DModel):
@staticmethod
def evaluate(x, a):
return x
def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m
def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy)
def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
MESSAGE = (
r"The .* parameter should be given as a .* because it was originally"
r" initialized as a .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.amplitude = 2
def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy)
def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
MESSAGE = (
r"Cannot attach units to parameters that were not initially specified with"
r" units"
)
with pytest.raises(ValueError, match=MESSAGE):
g.amplitude.unit = u.Jy
# But changing to another unit should not, even if it is an equivalent unit
MESSAGE = (
r"Cannot change the unit attribute directly, instead change the parameter to a"
r" new quantity"
)
with pytest.raises(ValueError, match=MESSAGE):
g.mean.unit = u.cm
def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
MESSAGE = (
r"The .value property on parameters should be set to unitless values, not"
r" Quantity objects.*"
)
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.value = 3 * u.Jy
def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
MESSAGE = r"The .quantity attribute should be set to a Quantity object"
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.quantity = 3
def test_parameter_default_units_match():
# If the unit and default quantity units are different, raise an error
MESSAGE = (
r"parameter default 1.0 m does not have units equivalent to the required"
r" unit Jy"
)
with pytest.raises(ParameterDefinitionError, match=MESSAGE):
class TestC(Fittable1DModel):
a = Parameter(default=1.0 * u.m, unit=u.Jy)
@pytest.mark.parametrize(("unit", "default"), ((u.m, 1.0), (None, 1 * u.m)))
def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
MESSAGE = r".* requires a Quantity for parameter .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(1.0)
def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m**2))
assert (2 * u.m) * g.mean == (2 * (u.m**2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
MESSAGE = (
r"Can only apply 'add' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean + 1
with pytest.raises(UnitsError, match=MESSAGE):
1 + g.mean
def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
MESSAGE = (
r"Can only apply 'less' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < 2 # noqa: B015
with pytest.raises(UnitsError, match=MESSAGE):
2 > g.mean # noqa: B015
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < [3, 4] # noqa: B015
with pytest.raises(UnitsError, match=MESSAGE):
[3, 4] > g.mean # noqa: B015
def test_parameters_compound_models():
Pix2Sky_TAN()
sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg)
lon_pole = 180 * u.deg
n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole)
rot = Rotation2D(23)
rot | n2c
def test_magunit_parameter():
"""Regression test for bug reproducer in issue #13133"""
unit = u.ABmag
c = -20.0 * unit
model = Const1D(c)
assert model(-23.0 * unit) == c
def test_log_getter():
"""Regression test for issue #14511"""
x = 6000 * u.AA
mdl_base = BlackBody(temperature=5000 * u.K, scale=u.Quantity(1))
class CustomBlackBody(BlackBody):
scale = Parameter(
"scale",
default=1,
bounds=(0, None),
getter=np.log,
setter=np.exp,
unit=u.dimensionless_unscaled,
)
mdl = CustomBlackBody(temperature=5000 * u.K, scale=u.Quantity(np.log(1)))
assert mdl.scale == np.log(1)
assert_quantity_allclose(mdl(x), mdl_base(x))
def test_sqrt_getter():
"""Regression test for issue #14511"""
x = 1 * u.m
mdl_base = Gaussian1D(mean=32 * u.m, stddev=3 * u.m)
class CustomGaussian1D(Gaussian1D):
mean = Parameter(
"mean",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
stddev = Parameter(
"stddev",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
mdl = CustomGaussian1D(mean=np.sqrt(32 * u.m), stddev=np.sqrt(3 * u.m))
assert mdl.mean == np.sqrt(32 * u.m)
assert (
mdl.mean._internal_value == np.sqrt(32) ** 2
) # numerical inaccuracy results in 32.00000000000001
assert mdl.mean._internal_unit == u.m
assert mdl.stddev == np.sqrt(3 * u.m)
assert (
mdl.stddev._internal_value == np.sqrt(3) ** 2
) # numerical inaccuracy results in 3.0000000000000004
assert mdl.stddev._internal_unit == u.m
assert_quantity_allclose(mdl(x), mdl_base(x))
| BaseTestModel |
python | encode__django-rest-framework | tests/test_throttling.py | {
"start": 1345,
"end": 1479
} | class ____(APIView):
throttle_classes = (User3SecRateThrottle,)
def get(self, request):
return Response('foo')
| MockView |
python | kamyu104__LeetCode-Solutions | Python/palindrome-permutation.py | {
"start": 50,
"end": 251
} | class ____(object):
def canPermutePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
return sum(v % 2 for v in collections.Counter(s).values()) < 2
| Solution |
python | sympy__sympy | sympy/core/core.py | {
"start": 62,
"end": 547
} | class ____:
"""
Base class for registry objects.
Registries map a name to an object using attribute notation. Registry
classes behave singletonically: all their instances share the same state,
which is stored in the class object.
All subclasses should set `__slots__ = ()`.
"""
__slots__ = ()
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
| Registry |
python | FactoryBoy__factory_boy | tests/cyclic/bar.py | {
"start": 106,
"end": 193
} | class ____:
def __init__(self, foo, y):
self.foo = foo
self.y = y
| Bar |
python | pytransitions__transitions | transitions/experimental/utils.py | {
"start": 4954,
"end": 6028
} | class ____:
definitions = defaultdict(lambda: defaultdict(list))
def __init__(self, configs):
self.configs = deque(configs)
def __set_name__(self, owner, name):
for config in self.configs:
TriggerPlaceholder.definitions[owner][name].append(config)
def __call__(self, *args, **kwargs):
raise RuntimeError("Trigger was not initialized correctly!")
def event(*configs):
return TriggerPlaceholder(configs)
def add_transitions(*configs):
def _outer(trigger_func):
if isinstance(trigger_func, TriggerPlaceholder):
for config in reversed(configs):
trigger_func.configs.appendleft(config)
else:
trigger_func = TriggerPlaceholder(configs)
return trigger_func
return _outer
def transition(source, dest=None, conditions=None, unless=None, before=None, after=None, prepare=None):
return {"source": source, "dest": dest, "conditions": conditions, "unless": unless, "before": before,
"after": after, "prepare": prepare}
| TriggerPlaceholder |
python | kamyu104__LeetCode-Solutions | Python/prime-palindrome.py | {
"start": 58,
"end": 552
} | class ____(object):
def primePalindrome(self, N):
"""
:type N: int
:rtype: int
"""
def is_prime(n):
if n < 2 or n % 2 == 0:
return n == 2
return all(n % d for d in xrange(3, int(n**.5) + 1, 2))
if 8 <= N <= 11:
return 11
for i in xrange(10**(len(str(N))//2), 10**5):
j = int(str(i) + str(i)[-2::-1])
if j >= N and is_prime(j):
return j
| Solution |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 70107,
"end": 79456
} | class ____(Conv):
"""Abstract base layer for separable nD convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output.
It then optionally applies an activation function to produce the final output.
Args:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: A tuple or list of integers specifying the spatial
dimensions of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
strides: A tuple or list of integers specifying the strides
of the convolution. Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any `stride` value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
depth_multiplier: The number of depthwise convolution output channels for
each input channel. The total number of depthwise convolution output
channels will be equal to `num_filters_in * depth_multiplier`.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias.
depthwise_initializer: An initializer for the depthwise convolution kernel (
see `keras.initializers`). If None, then the default initializer (
'glorot_uniform') will be used.
pointwise_initializer: An initializer for the pointwise convolution kernel (
see `keras.initializers`). If None, then the default initializer
('glorot_uniform') will be used.
bias_initializer: An initializer for the bias vector. If None, the default
initializer ('zeros') will be used (see `keras.initializers`).
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used for
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
"""
def __init__(self,
rank,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer='glorot_uniform',
pointwise_initializer='glorot_uniform',
bias_initializer='zeros',
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(SeparableConv, self).__init__(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
bias_initializer=initializers.get(bias_initializer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
self.depth_multiplier = depth_multiplier
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
depthwise_kernel_shape = self.kernel_size + (input_dim,
self.depth_multiplier)
pointwise_kernel_shape = (
1,) * self.rank + (self.depth_multiplier * input_dim, self.filters)
self.depthwise_kernel = self.add_weight(
name='depthwise_kernel',
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype)
self.pointwise_kernel = self.add_weight(
name='pointwise_kernel',
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
raise NotImplementedError
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'depth_multiplier':
self.depth_multiplier,
'dilation_rate':
self.dilation_rate,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'depthwise_initializer':
initializers.serialize(self.depthwise_initializer),
'pointwise_initializer':
initializers.serialize(self.pointwise_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'depthwise_regularizer':
regularizers.serialize(self.depthwise_regularizer),
'pointwise_regularizer':
regularizers.serialize(self.pointwise_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'depthwise_constraint':
constraints.serialize(self.depthwise_constraint),
'pointwise_constraint':
constraints.serialize(self.pointwise_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(SeparableConv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| SeparableConv |
python | getsentry__sentry | src/sentry/search/events/datasets/metrics.py | {
"start": 733,
"end": 88049
} | class ____(DatasetConfig):
missing_function_error = IncompatibleMetricsQuery
def __init__(self, builder: metrics.MetricsQueryBuilder):
self.builder = builder
self.total_transaction_duration: float | None = None
self.total_score_weights: dict[str, int] = {}
@property
def search_filter_converter(
self,
) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]:
return {
"message": self._message_filter_converter,
constants.PROJECT_ALIAS: self._project_slug_filter_converter,
constants.PROJECT_NAME_ALIAS: self._project_slug_filter_converter,
constants.EVENT_TYPE_ALIAS: self._event_type_converter,
constants.TEAM_KEY_TRANSACTION_ALIAS: self._key_transaction_filter_converter,
"environment": self.builder._environment_filter_converter,
"transaction": self._transaction_filter_converter,
"transaction.status": self._transaction_status_converter,
"tags[transaction]": self._transaction_filter_converter,
constants.TITLE_ALIAS: self._transaction_filter_converter,
constants.RELEASE_ALIAS: self._release_filter_converter,
constants.DEVICE_CLASS_ALIAS: lambda search_filter: filter_aliases.device_class_converter(
self.builder, search_filter
),
}
@property
def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
transaction_alias = (
self._resolve_transaction_alias_on_demand
if self.builder.use_on_demand
else self._resolve_transaction_alias
)
return {
constants.PROJECT_ALIAS: self._resolve_project_slug_alias,
constants.PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,
constants.TEAM_KEY_TRANSACTION_ALIAS: self._resolve_team_key_transaction_alias,
constants.TITLE_ALIAS: self._resolve_title_alias,
constants.PROJECT_THRESHOLD_CONFIG_ALIAS: lambda _: self._resolve_project_threshold_config,
"transaction": transaction_alias,
"tags[transaction]": transaction_alias,
constants.DEVICE_CLASS_ALIAS: lambda alias: field_aliases.resolve_device_class(
self.builder, alias
),
constants.SPAN_MODULE_ALIAS: self._resolve_span_module,
}
def resolve_metric(self, value: str) -> int:
# SPAN_METRICS_MAP and METRICS_MAP have some overlapping keys
mri_map = constants.SPAN_METRICS_MAP | constants.METRICS_MAP
metric_id = self.builder.resolve_metric_index(mri_map.get(value, value))
if metric_id is None:
# Maybe this is a custom measurment?
for measurement in self.builder.custom_measurement_map:
if measurement["name"] == value and measurement["metric_id"] is not None:
metric_id = measurement["metric_id"]
# If its still None its not a custom measurement
if metric_id is None:
raise IncompatibleMetricsQuery(f"Metric: {value} could not be resolved")
self.builder.metric_ids.add(metric_id)
return metric_id
@property
def should_skip_interval_calculation(self) -> bool:
return self.builder.builder_config.skip_time_conditions and (
not self.builder.params.start or not self.builder.params.end
)
@property
def function_converter(self) -> Mapping[str, fields.MetricsFunction]:
"""While the final functions in clickhouse must have their -Merge combinators in order to function, we don't
need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions
like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles)
Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the
Metric Layer will actually handle which dataset each function goes to
"""
resolve_metric_id = {
"name": "metric_id",
"fn": lambda args: self.resolve_metric(args["column"]),
}
function_converter = {
function.name: function
for function in [
# Note while the discover version of apdex, count_miserable, user_misery
# accepts arguments, because this is precomputed with tags no parameters
# are available
fields.MetricsFunction(
"apdex",
optional_args=[fields.NullableNumberRange("satisfaction", 0, None)],
snql_distribution=self._resolve_apdex_function,
default_result_type="number",
),
fields.MetricsFunction(
"avg",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
| constants.METRIC_DURATION_COLUMNS,
)
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_avg,
snql_gauge=self._resolve_avg,
result_type_fn=self.reflective_result_type(),
default_result_type="integer",
),
fields.MetricsFunction(
"avg_if",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.METRIC_DURATION_COLUMNS,
),
fields.MetricArg(
"if_col",
allowed_columns=["release"],
),
fields.SnQLStringArg(
"if_val", unquote=True, unescape_quotes=True, optional_unquote=True
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: Function(
"avgIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
Function(
"equals",
[self.builder.column(args["if_col"]), args["if_val"]],
),
],
),
],
alias,
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"count_if",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.METRIC_DURATION_COLUMNS,
),
fields.MetricArg(
"if_col",
allowed_columns=["release"],
),
fields.SnQLStringArg(
"if_val", unquote=True, unescape_quotes=True, optional_unquote=True
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
Function(
"equals",
[self.builder.column(args["if_col"]), args["if_val"]],
),
],
),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"count_miserable",
required_args=[
fields.MetricArg(
"column", allowed_columns=["user"], allow_custom_measurements=False
)
],
optional_args=[fields.NullableNumberRange("satisfaction", 0, None)],
calculated_args=[resolve_metric_id],
snql_set=self._resolve_count_miserable_function,
default_result_type="integer",
),
fields.MetricsFunction(
"count_unparameterized_transactions",
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
Function(
"equals",
[
self.builder.column("transaction"),
self.builder.resolve_tag_value("<< unparameterized >>"),
],
),
],
),
],
alias,
),
# Not yet exposed, need to add far more validation around tag&value
private=True,
default_result_type="integer",
),
fields.MetricsFunction(
"count_null_transactions",
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
Function(
"equals",
[
self.builder.column("transaction"),
"",
],
),
],
),
],
alias,
),
private=True,
),
fields.MetricsFunction(
"count_has_transaction_name",
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
Function(
"and",
[
Function(
"notEquals",
[
self.builder.column("transaction"),
"",
],
),
Function(
"notEquals",
[
self.builder.column("transaction"),
self.builder.resolve_tag_value(
"<< unparameterized >>"
),
],
),
],
),
],
),
],
alias,
),
private=True,
default_result_type="integer",
),
fields.MetricsFunction(
"user_misery",
optional_args=[
fields.NullableNumberRange("satisfaction", 0, None),
fields.with_default(
constants.MISERY_ALPHA, fields.NumberRange("alpha", 0, None)
),
fields.with_default(
constants.MISERY_BETA, fields.NumberRange("beta", 0, None)
),
],
calculated_args=[],
snql_set=self._resolve_user_misery_function,
default_result_type="number",
),
fields.MetricsFunction(
"p50",
optional_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_percentile(
args, alias, 0.5
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"p75",
optional_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_percentile(
args, alias, 0.75
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"p90",
optional_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_percentile(
args, alias, 0.90
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"p95",
optional_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_percentile(
args, alias, 0.95
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"p99",
optional_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_percentile(
args, alias, 0.99
),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"p100",
optional_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: self._resolve_percentile(args, alias, 1),
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"max",
required_args=[
fields.MetricArg("column"),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_max,
snql_gauge=self._resolve_max,
result_type_fn=self.reflective_result_type(),
),
fields.MetricsFunction(
"min",
required_args=[
fields.MetricArg("column"),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_min,
snql_gauge=self._resolve_min,
result_type_fn=self.reflective_result_type(),
),
fields.MetricsFunction(
"sum",
required_args=[
fields.MetricArg("column"),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_sum,
snql_gauge=self._resolve_sum,
result_type_fn=self.reflective_result_type(),
),
fields.MetricsFunction(
"sumIf",
required_args=[
fields.ColumnTagArg("if_col"),
fields.FunctionArg("if_val"),
],
calculated_args=[
{
"name": "resolved_val",
"fn": lambda args: self.builder.resolve_tag_value(args["if_val"]),
}
],
snql_counter=lambda args, alias: Function(
"sumIf",
[
Column("value"),
Function("equals", [args["if_col"], args["resolved_val"]]),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"percentile",
required_args=[
fields.with_default(
"transaction.duration",
fields.MetricArg(
"column", allowed_columns=constants.METRIC_DURATION_COLUMNS
),
),
fields.NumberRange("percentile", 0, 1),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_percentile,
result_type_fn=self.reflective_result_type(),
default_result_type="duration",
),
fields.MetricsFunction(
"count_unique",
required_args=[
fields.MetricArg(
"column", allowed_columns=["user"], allow_custom_measurements=False
)
],
calculated_args=[resolve_metric_id],
snql_set=lambda args, alias: Function(
"uniqIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"uniq",
snql_set=lambda args, alias: Function(
"uniq",
[Column("value")],
alias,
),
),
fields.MetricsFunction(
"uniqIf",
required_args=[
fields.ColumnTagArg("if_col"),
fields.FunctionArg("if_val"),
],
calculated_args=[
{
"name": "resolved_val",
"fn": lambda args: self.builder.resolve_tag_value(args["if_val"]),
}
],
snql_set=lambda args, alias: Function(
"uniqIf",
[
Column("value"),
Function("equals", [args["if_col"], args["resolved_val"]]),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"count",
snql_distribution=lambda args, alias: Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
],
alias,
),
default_result_type="integer",
),
fields.MetricsFunction(
"count_starts",
required_args=[
fields.MetricArg(
"column",
allowed_columns=[
"measurements.app_start_warm",
"measurements.app_start_cold",
],
allow_custom_measurements=False,
),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_count_starts_function,
default_result_type="integer",
),
fields.MetricsFunction(
"count_total_starts",
snql_distribution=self._resolve_count_total_starts_function,
default_result_type="integer",
),
fields.MetricsFunction(
"count_web_vitals",
required_args=[
fields.MetricArg(
"column",
allowed_columns=[
"measurements.fp",
"measurements.fcp",
"measurements.lcp",
"measurements.fid",
"measurements.cls",
"measurements.ttfb",
"measurements.inp",
],
allow_custom_measurements=False,
),
fields.SnQLStringArg(
"quality", allowed_strings=["good", "meh", "poor", "any"]
),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_web_vital_function,
default_result_type="integer",
),
fields.MetricsFunction(
"performance_score",
required_args=[
fields.MetricArg(
"column",
allowed_columns=[
"measurements.score.fcp",
"measurements.score.lcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.total",
],
allow_custom_measurements=False,
)
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_web_vital_score_function,
default_result_type="number",
),
fields.MetricsFunction(
"opportunity_score",
required_args=[
fields.MetricArg(
"column",
allowed_columns=[
"measurements.score.fcp",
"measurements.score.lcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.total",
],
allow_custom_measurements=False,
)
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_web_vital_opportunity_score_function,
default_result_type="number",
),
fields.MetricsFunction(
"total_opportunity_score",
snql_distribution=self._resolve_total_web_vital_opportunity_score_with_fixed_weights_function,
default_result_type="number",
),
fields.MetricsFunction(
"count_scores",
required_args=[
fields.MetricArg(
"column",
allowed_columns=[
"measurements.score.total",
"measurements.score.fcp",
"measurements.score.lcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
],
allow_custom_measurements=False,
),
],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_count_scores_function,
default_result_type="integer",
),
fields.MetricsFunction(
"epm",
snql_distribution=self._resolve_epm,
optional_args=[fields.IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
fields.MetricsFunction(
"floored_epm",
snql_distribution=lambda args, alias: Function(
"pow",
[
10,
Function(
"floor",
[
Function(
"log10",
[
Function(
"divide",
[
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(
"transaction.duration"
),
],
),
],
),
Function("divide", [args["interval"], 60]),
],
),
],
)
],
),
],
alias,
),
optional_args=[fields.IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
fields.MetricsFunction(
"spm",
snql_distribution=self._resolve_spm,
optional_args=[
(
fields.NullColumn("interval")
if self.should_skip_interval_calculation
else fields.IntervalDefault("interval", 1, None)
)
],
default_result_type="rate",
),
fields.MetricsFunction(
"eps",
snql_distribution=self._resolve_eps,
optional_args=[fields.IntervalDefault("interval", 1, None)],
default_result_type="rate",
),
fields.MetricsFunction(
"failure_count",
snql_distribution=self._resolve_failure_count,
default_result_type="integer",
),
fields.MetricsFunction(
"failure_rate",
snql_distribution=lambda args, alias: Function(
"divide",
[
self._resolve_failure_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
],
),
],
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"histogram",
required_args=[fields.MetricArg("column")],
calculated_args=[resolve_metric_id],
snql_distribution=self._resolve_histogram_function,
default_result_type="number",
private=True,
),
fields.MetricsFunction(
"time_spent_percentage",
optional_args=[
fields.with_default(
"app", fields.SnQLStringArg("scope", allowed_strings=["app", "local"])
)
],
snql_distribution=self._resolve_time_spent_percentage,
default_result_type="percentage",
),
fields.MetricsFunction(
"http_error_rate",
snql_distribution=lambda args, alias: Function(
"divide",
[
self._resolve_http_error_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
],
),
],
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"http_error_count",
snql_distribution=self._resolve_http_error_count,
default_result_type="integer",
),
fields.MetricsFunction(
"percentile_range",
required_args=[
fields.MetricArg(
"column",
allowed_columns=["transaction.duration"],
allow_custom_measurements=False,
),
fields.NumberRange("percentile", 0, 1),
fields.ConditionArg("condition"),
fields.SnQLDateArg("middle"),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
args=args,
alias=alias,
fixed_percentile=args["percentile"],
extra_conditions=[
Function(
args["condition"],
[
Function("toDateTime", [args["middle"]]),
self.builder.column("timestamp"),
],
),
],
),
default_result_type="duration",
),
fields.MetricsFunction(
"avg_compare",
required_args=[
fields.MetricArg(
"column",
allowed_columns=constants.METRIC_DURATION_COLUMNS,
allow_custom_measurements=False,
),
fields.MetricArg(
"comparison_column",
allowed_columns=["release"],
),
fields.SnQLStringArg(
"first_value", unquote=True, unescape_quotes=True, optional_unquote=True
),
fields.SnQLStringArg(
"second_value",
unquote=True,
unescape_quotes=True,
optional_unquote=True,
),
],
calculated_args=[resolve_metric_id],
snql_distribution=lambda args, alias: function_aliases.resolve_avg_compare(
self.builder.column, args, alias
),
default_result_type="percent_change",
),
fields.MetricsFunction(
"cache_hit_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_cache_hit_count(args),
self._resolve_cache_hit_and_miss_count(args),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"cache_miss_rate",
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_cache_miss_count(args),
self._resolve_cache_hit_and_miss_count(args),
alias,
),
default_result_type="percentage",
),
fields.MetricsFunction(
"http_response_rate",
required_args=[
fields.SnQLStringArg("code"),
],
snql_distribution=lambda args, alias: function_aliases.resolve_division(
self._resolve_http_response_count(args),
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
],
),
alias,
),
default_result_type="percentage",
),
]
}
for alias, name in [
*constants.FUNCTION_ALIASES.items(),
*constants.METRICS_FUNCTION_ALIASES.items(),
]:
if name in function_converter:
function_converter[alias] = function_converter[name].alias_as(alias)
return function_converter
@property
def orderby_converter(self) -> Mapping[str, OrderBy]:
return {}
# Field Aliases
def _resolve_title_alias(self, alias: str) -> SelectType:
"""title == transaction in discover"""
return self.field_alias_converter["transaction"](alias)
def _resolve_team_key_transaction_alias(self, _: str) -> SelectType:
return field_aliases.resolve_team_key_transaction_alias(
self.builder, resolve_metric_index=True
)
def _resolve_project_slug_alias(self, alias: str) -> SelectType:
return field_aliases.resolve_project_slug_alias(self.builder, alias)
def _resolve_transaction_alias(self, alias: str) -> SelectType:
return Function(
"transform",
[
Column(self.builder.resolve_column_name("transaction")),
[""],
[self.builder.resolve_tag_value("<< unparameterized >>")],
],
alias,
)
def _resolve_transaction_alias_on_demand(self, _: str) -> SelectType:
"""On-demand doesn't need a transform for transaction in it's where clause
since conditions are saved on a per-metric basis.
"""
return Column(self.builder.resolve_column_name("transaction"))
@cached_property
def _resolve_project_threshold_config(self) -> SelectType:
org_id = self.builder.params.organization_id
if org_id is None:
raise InvalidSearchQuery("Missing organization")
return function_aliases.resolve_project_threshold_config(
tag_value_resolver=lambda _org_id, value: self.builder.resolve_tag_value(value),
column_name_resolver=lambda _org_id, value: self.builder.resolve_column_name(value),
org_id=org_id,
project_ids=self.builder.params.project_ids,
)
def _project_threshold_multi_if_function(self) -> SelectType:
"""Accessed by `_resolve_apdex_function` and `_resolve_count_miserable_function`,
this returns the right duration value (for example, lcp or duration) based
on project or transaction thresholds that have been configured by the user.
"""
return Function(
"multiIf",
[
Function(
"equals",
[
self.builder.resolve_field_alias("project_threshold_config"),
"lcp",
],
),
self.resolve_metric("measurements.lcp"),
self.resolve_metric("transaction.duration"),
],
)
# Query Filters
def _event_type_converter(self, search_filter: SearchFilter) -> WhereType | None:
"""Not really a converter, check its transaction, error otherwise"""
value = search_filter.value.value
operator = search_filter.operator
if value == "transaction" and operator == "=":
return None
raise IncompatibleMetricsQuery("Can only filter event.type:transaction")
def _message_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.message_filter_converter(self.builder, search_filter)
def _project_slug_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.project_slug_converter(self.builder, search_filter)
def _release_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.release_filter_converter(self.builder, search_filter)
def _transaction_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
operator = search_filter.operator
value = search_filter.value.value
if operator in ("=", "!=") and value == "":
# !has:transaction
if operator == "=":
raise InvalidSearchQuery(
"All events have a transaction so this query wouldn't return anything"
)
else:
# All events have a "transaction" since we map null -> unparam so no need to filter
return None
if isinstance(value, list):
resolved_values = []
for item in value:
resolved_item = self.builder.resolve_tag_value(item)
if resolved_item is None:
raise IncompatibleMetricsQuery(f"Transaction value {item} in filter not found")
resolved_values.append(resolved_item)
value = resolved_values
else:
resolved_value = self.builder.resolve_tag_value(value)
if resolved_value is None:
raise IncompatibleMetricsQuery(f"Transaction value {value} in filter not found")
value = resolved_value
if search_filter.value.is_wildcard():
return Condition(
Function("match", [self.builder.resolve_column("transaction"), f"(?i){value}"]),
Op(search_filter.operator),
1,
)
return Condition(self.builder.resolve_column("transaction"), Op(operator), value)
def _transaction_status_converter(self, search_filter: SearchFilter) -> WhereType | None:
operator = search_filter.operator
value = search_filter.value.value
# For backward compatibility, `unknown_error` is converted to `unknown`, since Relay always emits `unknown`
# `transaction.status`.
if value == "unknown_error":
value = "unknown"
lhs = self.builder.resolve_column("transaction.status")
if search_filter.value.is_wildcard():
return Condition(
Function("match", [lhs, f"(?i){value}"]),
Op(operator),
1,
)
return Condition(lhs, Op(operator), value)
def _resolve_span_module(self, alias: str) -> SelectType:
return field_aliases.resolve_span_module(self.builder, alias)
# Query Functions
def _resolve_count_if(
self,
metric_condition: Function,
condition: Function,
alias: str | None = None,
) -> SelectType:
return Function(
"countIf",
[
Column("value"),
Function(
"and",
[
metric_condition,
condition,
],
),
],
alias,
)
def _resolve_avg(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return Function(
"avgIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
],
alias,
)
def _resolve_sum(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return Function(
"sumIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
)
def _resolve_min(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return Function(
"minIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
)
def _resolve_max(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return Function(
"maxIf",
[
Column("value"),
Function("equals", [Column("metric_id"), args["metric_id"]]),
],
alias,
)
def _resolve_apdex_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
"""Apdex is tag based in metrics, which means we can't base it on the satsifaction parameter"""
if args["satisfaction"] is not None:
raise IncompatibleMetricsQuery(
"Cannot query apdex with a threshold parameter on the metrics dataset"
)
metric_satisfied = self.builder.resolve_tag_value(constants.METRIC_SATISFIED_TAG_VALUE)
metric_tolerated = self.builder.resolve_tag_value(constants.METRIC_TOLERATED_TAG_VALUE)
# Nothing is satisfied or tolerated, the score must be 0
if metric_satisfied is None and metric_tolerated is None:
return Function(
"toUInt64",
[0],
alias,
)
satisfied = Function(
"equals", [self.builder.column(constants.METRIC_SATISFACTION_TAG_KEY), metric_satisfied]
)
tolerable = Function(
"equals", [self.builder.column(constants.METRIC_SATISFACTION_TAG_KEY), metric_tolerated]
)
metric_condition = Function(
"equals", [Column("metric_id"), self._project_threshold_multi_if_function()]
)
return Function(
"divide",
[
Function(
"plus",
[
self._resolve_count_if(metric_condition, satisfied),
Function(
"divide",
[self._resolve_count_if(metric_condition, tolerable), 2],
),
],
),
Function("countIf", [Column("value"), metric_condition]),
],
alias,
)
def _resolve_histogram_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
"""zoom_params is based on running metrics zoom_histogram function that adds conditions based on min, max,
buckets"""
zoom_params = getattr(self.builder, "zoom_params", None)
num_buckets = getattr(self.builder, "num_buckets", 250)
histogram_aliases = getattr(self.builder, "histogram_aliases", [])
histogram_aliases.append(alias)
metric_condition = Function("equals", [Column("metric_id"), args["metric_id"]])
return Function(
f"histogramIf({num_buckets})",
[
Column("value"),
(
Function("and", [zoom_params, metric_condition])
if zoom_params
else metric_condition
),
],
alias,
)
def _resolve_count_miserable_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
if args["satisfaction"] is not None:
raise IncompatibleMetricsQuery(
"Cannot query misery with a threshold parameter on the metrics dataset"
)
metric_frustrated = self.builder.resolve_tag_value(constants.METRIC_FRUSTRATED_TAG_VALUE)
# Nobody is miserable, we can return 0
if metric_frustrated is None:
return Function(
"toUInt64",
[0],
alias,
)
return Function(
"uniqIf",
[
Column("value"),
Function(
"and",
[
Function(
"equals",
[
Column("metric_id"),
args["metric_id"],
],
),
Function(
"equals",
[
self.builder.column(constants.METRIC_SATISFACTION_TAG_KEY),
metric_frustrated,
],
),
],
),
],
alias,
)
def _resolve_user_misery_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
if not isinstance(args["alpha"], float) or not isinstance(args["beta"], float):
raise InvalidSearchQuery("Cannot query user_misery with non floating point alpha/beta")
if args["satisfaction"] is not None:
raise IncompatibleMetricsQuery(
"Cannot query user_misery with a threshold parameter on the metrics dataset"
)
return Function(
"divide",
[
Function(
"plus",
[
self.builder.resolve_function("count_miserable(user)"),
args["alpha"],
],
),
Function(
"plus",
[
Function(
"nullIf", [self.builder.resolve_function("count_unique(user)"), 0]
),
args["alpha"] + args["beta"],
],
),
],
alias,
)
def _resolve_failure_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
statuses = [
self.builder.resolve_tag_value(status) for status in constants.NON_FAILURE_STATUS
]
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
Function(
"notIn",
[
self.builder.column("transaction.status"),
list(status for status in statuses if status is not None),
],
),
alias,
)
def _resolve_http_error_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
statuses = [
self.builder.resolve_tag_value(status) for status in constants.HTTP_SERVER_ERROR_STATUS
]
base_condition = Function(
"in",
[
self.builder.column("http.status_code"),
list(status for status in statuses if status is not None),
],
)
if extra_condition:
condition = Function(
"and",
[
base_condition,
extra_condition,
],
)
else:
condition = base_condition
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("transaction.duration"),
],
),
condition,
alias,
)
def _resolve_percentile(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
fixed_percentile: float | None = None,
) -> SelectType:
return function_aliases.resolve_metrics_percentile(
args=args, alias=alias, fixed_percentile=fixed_percentile
)
def _key_transaction_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
return filter_aliases.team_key_transaction_filter(self.builder, search_filter)
def _resolve_count_starts_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
column = args["column"]
metric_id = args["metric_id"]
if column not in [
"measurements.app_start_cold",
"measurements.app_start_warm",
]:
raise InvalidSearchQuery("count_starts only supports cold or app start measurements")
return Function(
"countIf",
[
Column("value"),
Function("equals", [Column("metric_id"), metric_id]),
],
alias,
)
def _resolve_count_total_starts_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
return Function(
"countIf",
[
Column("value"),
Function(
"or",
[
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("measurements.app_start_cold"),
],
),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("measurements.app_start_warm"),
],
),
],
),
],
alias,
)
def _resolve_web_vital_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
column = args["column"]
metric_id = args["metric_id"]
quality = args["quality"]
if not isinstance(quality, str):
raise InvalidSearchQuery(f"Invalid argument quanlity: {quality}")
quality = quality.lower()
if not isinstance(column, str) or column not in [
"measurements.lcp",
"measurements.fcp",
"measurements.fp",
"measurements.fid",
"measurements.cls",
"measurements.ttfb",
"measurements.inp",
]:
raise InvalidSearchQuery("count_web_vitals only supports measurements")
measurement_rating = self.builder.resolve_column("measurement_rating")
if quality == "any":
return Function(
"countIf",
[
Column("value"),
Function("equals", [Column("metric_id"), metric_id]),
],
alias,
)
try:
quality_id = self.builder.resolve_tag_value(quality)
except IncompatibleMetricsQuery:
quality_id = None
if quality_id is None:
return Function(
# This matches the type from doing `select toTypeName(count()) ...` from clickhouse
"toUInt64",
[0],
alias,
)
return Function(
"countIf",
[
Column("value"),
Function(
"and",
[
Function("equals", [measurement_rating, quality_id]),
Function("equals", [Column("metric_id"), metric_id]),
],
),
],
alias,
)
def _resolve_web_vital_score_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None,
) -> SelectType:
"""Returns the normalized score (0.0-1.0) for a given web vital.
This function exists because we don't store a metric for the normalized score.
The normalized score is calculated by dividing the sum of measurements.score.* by the sum of measurements.score.weight.*
To calculate the total performance score, see _resolve_total_performance_score_function.
"""
column = args["column"]
metric_id = args["metric_id"]
if not isinstance(column, str) or column not in [
"measurements.score.lcp",
"measurements.score.fcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.total",
]:
raise InvalidSearchQuery("performance_score only supports measurements")
if column == "measurements.score.total":
return self._resolve_total_performance_score_function(args, alias)
weight_metric_id = self.resolve_metric(column.replace("score", "score.weight"))
return Function(
"greatest",
[
Function(
"least",
[
Function(
"if",
[
Function(
"greater",
[
Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[Column("metric_id"), weight_metric_id],
),
],
),
0.0,
],
),
Function(
"divide",
[
Function(
"sumIf",
[
Column("value"),
Function(
"equals", [Column("metric_id"), metric_id]
),
],
),
Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[Column("metric_id"), weight_metric_id],
),
],
),
],
),
0.0,
],
),
1.0,
],
),
0.0,
],
alias,
)
def _resolve_web_vital_opportunity_score_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
column = args["column"]
metric_id = args["metric_id"]
if not isinstance(column, str) or column not in [
"measurements.score.lcp",
"measurements.score.fcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.total",
]:
raise InvalidSearchQuery("performance_score only supports measurements")
weight_metric = (
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
metric_id,
],
),
],
)
if column == "measurements.score.total"
else Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(column.replace("score", "score.weight")),
],
),
],
)
)
return Function(
"minus",
[
weight_metric,
Function(
"sumIf",
[
Column("value"),
Function("equals", [Column("metric_id"), metric_id]),
],
),
],
alias,
)
def _resolve_total_web_vital_opportunity_score_with_fixed_weights_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
"""Calculates the total opportunity score for a page.
The formula for an individual web vital opportunity score is:
(sum_page_lcp_weight - sum_page_lcp_score) / sum_project_lcp_weight
The total opportunity score is the sum of all individual web vital opportunity scores with another layer of fixed weights applied.
"""
vitals = ["lcp", "fcp", "cls", "ttfb", "inp"]
opportunity_score_sums = {
vital: Function(
"minus",
[
Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(f"measurements.score.weight.{vital}"),
],
),
],
),
Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(f"measurements.score.{vital}"),
],
),
],
),
],
)
for vital in vitals
}
adjusted_opportunity_scores = {
vital: Function(
"multiply",
[
constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital],
Function(
"if",
[
Function(
"isZeroOrNull",
[opportunity_score_sums[vital]],
),
0,
Function(
"divide",
[
opportunity_score_sums[vital],
self._resolve_total_score_weights_function(
f"measurements.score.weight.{vital}", None
),
],
),
],
),
],
)
for vital in vitals
}
# TODO: Divide by the total weights to factor out any missing web vitals
return Function(
"divide",
[
Function(
"plus",
[
adjusted_opportunity_scores["lcp"],
Function(
"plus",
[
adjusted_opportunity_scores["fcp"],
Function(
"plus",
[
adjusted_opportunity_scores["cls"],
Function(
"plus",
[
adjusted_opportunity_scores["ttfb"],
adjusted_opportunity_scores["inp"],
],
),
],
),
],
),
],
),
self._resolve_total_weights_function(),
],
alias,
)
def _resolve_total_score_weights_function(self, column: str, alias: str | None) -> SelectType:
"""Calculates the total sum score weights for a given web vital.
This must be cached since it runs another query."""
self.builder.requires_other_aggregates = True
if column in self.total_score_weights and self.total_score_weights[column] is not None:
return Function("toFloat64", [self.total_score_weights[column]], alias)
# Pull out browser.name filters from the query
assert self.builder.query is not None
parsed_terms = parse_search_query(self.builder.query)
query = " ".join(
term.to_query_string()
for term in parsed_terms
if (isinstance(term, SearchFilter) and term.key.name == "browser.name")
or (
isinstance(term, ParenExpression)
and all(
(isinstance(child_term, SearchFilter) and child_term.key.name == "browser.name")
or child_term == "OR"
for child_term in term.children
)
)
)
total_query = metrics.MetricsQueryBuilder(
dataset=self.builder.dataset,
params={},
snuba_params=self.builder.params,
selected_columns=[f"sum({column})"],
query=query,
)
total_query.columns += self.builder.resolve_groupby()
total_results = total_query.run_query(Referrer.API_DISCOVER_TOTAL_SCORE_WEIGHTS_FIELD.value)
results = total_query.process_results(total_results)
if len(results["data"]) != 1:
self.total_score_weights[column] = 0
return Function("toFloat64", [0], alias)
self.total_score_weights[column] = results["data"][0][
fields.get_function_alias(f"sum({column})")
]
return Function("toFloat64", [self.total_score_weights[column]], alias)
def _resolve_count_scores_function(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str,
) -> SelectType:
column = args["column"]
metric_id = args["metric_id"]
if column not in [
"measurements.score.total",
"measurements.score.lcp",
"measurements.score.fcp",
"measurements.score.fid",
"measurements.score.inp",
"measurements.score.cls",
"measurements.score.ttfb",
]:
raise InvalidSearchQuery("count_scores only supports performance score measurements")
return Function(
"countIf",
[
Column("value"),
Function("equals", [Column("metric_id"), metric_id]),
],
alias,
)
def _resolve_total_weights_function(self) -> SelectType:
vitals = ["lcp", "fcp", "cls", "ttfb", "inp"]
weights = {
vital: Function(
"if",
[
Function(
"isZeroOrNull",
[
Function(
"sumIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(
f"measurements.score.weight.{vital}"
),
],
),
],
),
],
),
0,
constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital],
],
)
for vital in vitals
}
return Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
weights["lcp"],
weights["fcp"],
],
),
weights["cls"],
],
),
weights["ttfb"],
],
),
weights["inp"],
],
)
def _resolve_total_performance_score_function(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None,
) -> SelectType:
"""Returns the total performance score based on a page/site's web vitals.
This function is calculated by:
the summation of (normalized_vital_score * weight) for each vital, divided by the sum of all weights
- normalized_vital_score is the 0.0-1.0 score for each individual vital
- weight is the 0.0-1.0 weight for each individual vital (this is a constant value stored in constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS)
- if all webvitals have data, then the sum of all weights is 1
- normalized_vital_score is obtained through _resolve_web_vital_score_function (see docstring on that function for more details)
"""
vitals = ["lcp", "fcp", "cls", "ttfb", "inp"]
scores = {
vital: Function(
"multiply",
[
constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital],
self._resolve_web_vital_score_function(
{
"column": f"measurements.score.{vital}",
"metric_id": self.resolve_metric(f"measurements.score.{vital}"),
},
None,
),
],
)
for vital in vitals
}
# TODO: Divide by the total weights to factor out any missing web vitals
return Function(
"divide",
[
# TODO: Is there a way to sum more than 2 values at once?
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
scores["lcp"],
scores["fcp"],
],
),
scores["cls"],
],
),
scores["ttfb"],
],
),
scores["inp"],
],
),
self._resolve_total_weights_function(),
],
alias,
)
def _resolve_total_transaction_duration(self, alias: str, scope: str) -> SelectType:
"""This calculates the total time, and based on the scope will return
either the apps total time or whatever other local scope/filters are
applied.
This must be cached since it runs another query."""
self.builder.requires_other_aggregates = True
if self.total_transaction_duration is not None:
return Function("toFloat64", [self.total_transaction_duration], alias)
total_query = metrics.MetricsQueryBuilder(
dataset=self.builder.dataset,
params={},
snuba_params=self.builder.params,
selected_columns=["sum(transaction.duration)"],
)
total_query.columns += self.builder.resolve_groupby()
if scope == "local":
total_query.where = self.builder.where
total_results = total_query.run_query(
Referrer.API_DISCOVER_TOTAL_SUM_TRANSACTION_DURATION_FIELD.value
)
results = total_query.process_results(total_results)
if len(results["data"]) != 1:
self.total_transaction_duration = 0
return Function("toFloat64", [0], alias)
self.total_transaction_duration = results["data"][0]["sum_transaction_duration"]
return Function("toFloat64", [self.total_transaction_duration], alias)
def _resolve_time_spent_percentage(
self, args: Mapping[str, str | Column | SelectType | int | float], alias: str
) -> SelectType:
if not isinstance(args["scope"], str):
raise InvalidSearchQuery(f"Invalid scope: {args['scope']}")
total_time = self._resolve_total_transaction_duration(
constants.TOTAL_TRANSACTION_DURATION_ALIAS, args["scope"]
)
metric_id = self.resolve_metric("transaction.duration")
return Function(
"divide",
[
Function(
"sumIf",
[
Column("value"),
Function("equals", [Column("metric_id"), metric_id]),
],
),
total_time,
],
alias,
)
def _resolve_epm(
self,
args: MutableMapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
if hasattr(self.builder, "interval"):
args["interval"] = self.builder.interval
return self._resolve_rate(60, args, alias, extra_condition)
def _resolve_spm(
self,
args: MutableMapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
if hasattr(self.builder, "interval"):
args["interval"] = self.builder.interval
return self._resolve_rate(60, args, alias, extra_condition, "span.self_time")
def _resolve_eps(
self,
args: MutableMapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
) -> SelectType:
if hasattr(self.builder, "interval"):
args["interval"] = self.builder.interval
return self._resolve_rate(None, args, alias, extra_condition)
def _resolve_rate(
self,
interval: int | None,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
extra_condition: Function | None = None,
metric: str = "transaction.duration",
) -> SelectType:
base_condition = Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(metric),
],
)
if extra_condition:
condition = Function("and", [base_condition, extra_condition])
else:
condition = base_condition
query_time_range_interval = (
self.builder.resolve_time_range_window()
if self.should_skip_interval_calculation
else args["interval"]
)
return Function(
"divide",
[
Function(
"countIf",
[
Column("value"),
condition,
],
),
(
query_time_range_interval
if interval is None
else Function("divide", [query_time_range_interval, interval])
),
],
alias,
)
def _resolve_cache_hit_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("cache.hit"),
self.builder.resolve_tag_value("true"),
],
),
alias,
)
def _resolve_cache_miss_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"equals",
[
self.builder.column("cache.hit"),
self.builder.resolve_tag_value("false"),
],
),
alias,
)
def _resolve_cache_hit_and_miss_count(
self,
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
statuses = [self.builder.resolve_tag_value(status) for status in constants.CACHE_HIT_STATUS]
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
Function(
"in",
[
self.builder.column("cache.hit"),
list(status for status in statuses if status is not None),
],
),
alias,
)
def _resolve_http_response_count(
self,
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None = None,
) -> SelectType:
condition = Function(
"startsWith",
[
self.builder.column("span.status_code"),
args["code"],
],
)
return self._resolve_count_if(
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric("span.self_time"),
],
),
condition,
alias,
)
| MetricsDatasetConfig |
python | numba__numba | numba/core/controlflow.py | {
"start": 23385,
"end": 31439
} | class ____(object):
"""
Attributes
----------
- bytecode
- blocks
- blockseq
- doms: dict of set
Dominators
- backbone: set of block offsets
The set of block that is common to all possible code path.
"""
def __init__(self, bytecode):
self.bytecode = bytecode
self.blocks = {}
self.liveblocks = {}
self.blockseq = []
self.doms = None
self.backbone = None
# Internal temp states
self._force_new_block = True
self._curblock = None
self._blockstack = []
self._loops = []
self._withs = []
def iterblocks(self):
"""
Return all blocks in sequence of occurrence
"""
for i in self.blockseq:
yield self.blocks[i]
def iterliveblocks(self):
"""
Return all live blocks in sequence of occurrence
"""
for i in self.blockseq:
if i in self.liveblocks:
yield self.blocks[i]
def incoming_blocks(self, block):
"""
Yield (incoming block, number of stack pops) pairs for *block*.
"""
for i, pops in block.incoming_jumps.items():
if i in self.liveblocks:
yield self.blocks[i], pops
def dump(self, file=None):
self.graph.dump(file=None)
def run(self):
for inst in self._iter_inst():
fname = "op_%s" % inst.opname
fn = getattr(self, fname, None)
if fn is not None:
fn(inst)
elif inst.is_jump:
# this catches e.g. try... except
l = Loc(self.bytecode.func_id.filename, inst.lineno)
if inst.opname in {"SETUP_FINALLY"}:
msg = "'try' block not supported until python3.7 or later"
else:
msg = "Use of unsupported opcode (%s) found" % inst.opname
raise UnsupportedError(msg, loc=l)
else:
# Non-jump instructions are ignored
pass # intentionally
# Close all blocks
for cur, nxt in zip(self.blockseq, self.blockseq[1:]):
blk = self.blocks[cur]
if not blk.outgoing_jumps and not blk.terminating:
blk.outgoing_jumps[nxt] = 0
graph = CFGraph()
for b in self.blocks:
graph.add_node(b)
for b in self.blocks.values():
for out, pops in b.outgoing_jumps.items():
graph.add_edge(b.offset, out, pops)
graph.set_entry_point(min(self.blocks))
graph.process()
self.graph = graph
# Fill incoming
for b in self.blocks.values():
for out, pops in b.outgoing_jumps.items():
self.blocks[out].incoming_jumps[b.offset] = pops
# Find liveblocks
self.liveblocks = dict((i, self.blocks[i])
for i in self.graph.nodes())
for lastblk in reversed(self.blockseq):
if lastblk in self.liveblocks:
break
else:
raise AssertionError("No live block that exits!?")
# Find backbone
backbone = self.graph.backbone()
# Filter out in loop blocks (Assuming no other cyclic control blocks)
# This is to unavoid variable defined in loops to be considered as
# function scope.
inloopblocks = set()
for b in self.blocks.keys():
if self.graph.in_loops(b):
inloopblocks.add(b)
self.backbone = backbone - inloopblocks
def jump(self, target, pops=0):
"""
Register a jump (conditional or not) to *target* offset.
*pops* is the number of stack pops implied by the jump (default 0).
"""
self._curblock.outgoing_jumps[target] = pops
def _iter_inst(self):
for inst in self.bytecode:
if self._use_new_block(inst):
self._guard_with_as(inst)
self._start_new_block(inst)
self._curblock.body.append(inst.offset)
yield inst
def _use_new_block(self, inst):
if inst.offset in self.bytecode.labels:
res = True
elif inst.opname in NEW_BLOCKERS:
res = True
else:
res = self._force_new_block
self._force_new_block = False
return res
def _start_new_block(self, inst):
self._curblock = CFBlock(inst.offset)
self.blocks[inst.offset] = self._curblock
self.blockseq.append(inst.offset)
def _guard_with_as(self, current_inst):
"""Checks if the next instruction after a SETUP_WITH is something other
than a POP_TOP, if it is something else it'll be some sort of store
which is not supported (this corresponds to `with CTXMGR as VAR(S)`)."""
if current_inst.opname == "SETUP_WITH":
next_op = self.bytecode[current_inst.next].opname
if next_op != "POP_TOP":
msg = ("The 'with (context manager) as "
"(variable):' construct is not "
"supported.")
raise UnsupportedError(msg)
def op_SETUP_LOOP(self, inst):
end = inst.get_jump_target()
self._blockstack.append(end)
self._loops.append((inst.offset, end))
# TODO: Looplifting requires the loop entry be its own block.
# Forcing a new block here is the simplest solution for now.
# But, we should consider other less ad-hoc ways.
self.jump(inst.next)
self._force_new_block = True
def op_SETUP_WITH(self, inst):
end = inst.get_jump_target()
self._blockstack.append(end)
self._withs.append((inst.offset, end))
# TODO: WithLifting requires the loop entry be its own block.
# Forcing a new block here is the simplest solution for now.
# But, we should consider other less ad-hoc ways.
self.jump(inst.next)
self._force_new_block = True
def op_POP_BLOCK(self, inst):
self._blockstack.pop()
def op_FOR_ITER(self, inst):
self.jump(inst.get_jump_target())
self.jump(inst.next)
self._force_new_block = True
def _op_ABSOLUTE_JUMP_IF(self, inst):
self.jump(inst.get_jump_target())
self.jump(inst.next)
self._force_new_block = True
op_POP_JUMP_IF_FALSE = _op_ABSOLUTE_JUMP_IF
op_POP_JUMP_IF_TRUE = _op_ABSOLUTE_JUMP_IF
op_JUMP_IF_FALSE = _op_ABSOLUTE_JUMP_IF
op_JUMP_IF_TRUE = _op_ABSOLUTE_JUMP_IF
op_POP_JUMP_FORWARD_IF_FALSE = _op_ABSOLUTE_JUMP_IF
op_POP_JUMP_BACKWARD_IF_FALSE = _op_ABSOLUTE_JUMP_IF
op_POP_JUMP_FORWARD_IF_TRUE = _op_ABSOLUTE_JUMP_IF
op_POP_JUMP_BACKWARD_IF_TRUE = _op_ABSOLUTE_JUMP_IF
def _op_ABSOLUTE_JUMP_OR_POP(self, inst):
self.jump(inst.get_jump_target())
self.jump(inst.next, pops=1)
self._force_new_block = True
op_JUMP_IF_FALSE_OR_POP = _op_ABSOLUTE_JUMP_OR_POP
op_JUMP_IF_TRUE_OR_POP = _op_ABSOLUTE_JUMP_OR_POP
def op_JUMP_ABSOLUTE(self, inst):
self.jump(inst.get_jump_target())
self._force_new_block = True
def op_JUMP_FORWARD(self, inst):
self.jump(inst.get_jump_target())
self._force_new_block = True
op_JUMP_BACKWARD = op_JUMP_FORWARD
def op_RETURN_VALUE(self, inst):
self._curblock.terminating = True
self._force_new_block = True
if PYVERSION in ((3, 12), (3, 13), (3, 14)):
def op_RETURN_CONST(self, inst):
self._curblock.terminating = True
self._force_new_block = True
elif PYVERSION in ((3, 10), (3, 11)):
pass
else:
raise NotImplementedError(PYVERSION)
def op_RAISE_VARARGS(self, inst):
self._curblock.terminating = True
self._force_new_block = True
def op_BREAK_LOOP(self, inst):
self.jump(self._blockstack[-1])
self._force_new_block = True
| ControlFlowAnalysis |
python | facebookresearch__faiss | benchs/distributed_ondisk/search_server.py | {
"start": 1820,
"end": 3042
} | class ____:
""" Combine query results from a sliced dataset (for k-nn search) """
def __init__(self, nq, k):
" nq: number of query vectors, k: number of results per query "
self.I = np.zeros((nq, k), dtype='int64')
self.D = np.zeros((nq, k), dtype='float32')
self.nq, self.k = nq, k
heaps = faiss.float_maxheap_array_t()
heaps.k = k
heaps.nh = nq
heaps.val = faiss.swig_ptr(self.D)
heaps.ids = faiss.swig_ptr(self.I)
heaps.heapify()
self.heaps = heaps
def add_batch_result(self, D, I, i0):
assert D.shape == (self.nq, self.k)
assert I.shape == (self.nq, self.k)
I += i0
self.heaps.addn_with_ids(
self.k, faiss.swig_ptr(D),
faiss.swig_ptr(I), self.k)
def finalize(self):
self.heaps.reorder()
def distribute_weights(weights, nbin):
""" assign a set of weights to a smaller set of bins to balance them """
nw = weights.size
o = weights.argsort()
bins = np.zeros(nbin)
assign = np.ones(nw, dtype=int)
for i in o[::-1]:
b = bins.argmin()
assign[i] = b
bins[b] += weights[i]
return bins, assign
| ResultHeap |
python | python-openxml__python-docx | src/docx/oxml/document.py | {
"start": 1158,
"end": 3593
} | class ____(BaseOxmlElement):
"""`w:body`, the container element for the main document story in `document.xml`."""
add_p: Callable[[], CT_P]
get_or_add_sectPr: Callable[[], CT_SectPr]
p_lst: List[CT_P]
tbl_lst: List[CT_Tbl]
_insert_tbl: Callable[[CT_Tbl], CT_Tbl]
p = ZeroOrMore("w:p", successors=("w:sectPr",))
tbl = ZeroOrMore("w:tbl", successors=("w:sectPr",))
sectPr: CT_SectPr | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:sectPr", successors=()
)
def add_section_break(self) -> CT_SectPr:
"""Return `w:sectPr` element for new section added at end of document.
The last `w:sectPr` becomes the second-to-last, with the new `w:sectPr` being an
exact clone of the previous one, except that all header and footer references
are removed (and are therefore now "inherited" from the prior section).
A copy of the previously-last `w:sectPr` will now appear in a new `w:p` at the
end of the document. The returned `w:sectPr` is the sentinel `w:sectPr` for the
document (and as implemented, `is` the prior sentinel `w:sectPr` with headers
and footers removed).
"""
# ---get the sectPr at file-end, which controls last section (sections[-1])---
sentinel_sectPr = self.get_or_add_sectPr()
# ---add exact copy to new `w:p` element; that is now second-to last section---
self.add_p().set_sectPr(sentinel_sectPr.clone())
# ---remove any header or footer references from "new" last section---
for hdrftr_ref in sentinel_sectPr.xpath("w:headerReference|w:footerReference"):
sentinel_sectPr.remove(hdrftr_ref)
# ---the sentinel `w:sectPr` now controls the new last section---
return sentinel_sectPr
def clear_content(self):
"""Remove all content child elements from this <w:body> element.
Leave the <w:sectPr> element if it is present.
"""
for content_elm in self.xpath("./*[not(self::w:sectPr)]"):
self.remove(content_elm)
@property
def inner_content_elements(self) -> List[CT_P | CT_Tbl]:
"""Generate all `w:p` and `w:tbl` elements in this document-body.
Elements appear in document order. Elements shaded by nesting in a `w:ins` or
other "wrapper" element will not be included.
"""
return self.xpath("./w:p | ./w:tbl")
| CT_Body |
python | astropy__astropy | astropy/utils/data.py | {
"start": 41349,
"end": 41680
} | class ____(urllib.request.ftpwrapper):
def init(self):
self.busy = 0
self.ftp = ftplib.FTP_TLS()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
self.ftp.prot_p()
_target = "/".join(self.dirs)
self.ftp.cwd(_target)
| _ftptlswrapper |
python | django__django | django/utils/autoreload.py | {
"start": 9473,
"end": 13343
} | class ____:
def __init__(self):
self.extra_files = set()
self.directory_globs = defaultdict(set)
self._stop_condition = threading.Event()
def watch_dir(self, path, glob):
path = Path(path)
try:
path = path.absolute()
except FileNotFoundError:
logger.debug(
"Unable to watch directory %s as it cannot be resolved.",
path,
exc_info=True,
)
return
logger.debug("Watching dir %s with glob %s.", path, glob)
self.directory_globs[path].add(glob)
def watched_files(self, include_globs=True):
"""
Yield all files that need to be watched, including module files and
files within globs.
"""
yield from iter_all_python_module_files()
yield from self.extra_files
if include_globs:
for directory, patterns in self.directory_globs.items():
for pattern in patterns:
yield from directory.glob(pattern)
def wait_for_apps_ready(self, app_reg, django_main_thread):
"""
Wait until Django reports that the apps have been loaded. If the given
thread has terminated before the apps are ready, then a SyntaxError or
other non-recoverable error has been raised. In that case, stop waiting
for the apps_ready event and continue processing.
Return True if the thread is alive and the ready event has been
triggered, or False if the thread is terminated while waiting for the
event.
"""
while django_main_thread.is_alive():
if app_reg.ready_event.wait(timeout=0.1):
return True
else:
logger.debug("Main Django thread has terminated before apps are ready.")
return False
def run(self, django_main_thread):
logger.debug("Waiting for apps ready_event.")
self.wait_for_apps_ready(apps, django_main_thread)
from django.urls import get_resolver
# Prevent a race condition where URL modules aren't loaded when the
# reloader starts by accessing the urlconf_module property.
try:
get_resolver().urlconf_module
except Exception:
# Loading the urlconf can result in errors during development.
# If this occurs then swallow the error and continue.
pass
logger.debug("Apps ready_event triggered. Sending autoreload_started signal.")
autoreload_started.send(sender=self)
self.run_loop()
def run_loop(self):
ticker = self.tick()
while not self.should_stop:
try:
next(ticker)
except StopIteration:
break
self.stop()
def tick(self):
"""
This generator is called in a loop from run_loop. It's important that
the method takes care of pausing or otherwise waiting for a period of
time. This split between run_loop() and tick() is to improve the
testability of the reloader implementations by decoupling the work they
do from the loop.
"""
raise NotImplementedError("subclasses must implement tick().")
@classmethod
def check_availability(cls):
raise NotImplementedError("subclasses must implement check_availability().")
def notify_file_changed(self, path):
results = file_changed.send(sender=self, file_path=path)
logger.debug("%s notified as changed. Signal results: %s.", path, results)
if not any(res[1] for res in results):
trigger_reload(path)
# These are primarily used for testing.
@property
def should_stop(self):
return self._stop_condition.is_set()
def stop(self):
self._stop_condition.set()
| BaseReloader |
python | RaRe-Technologies__gensim | gensim/models/lda_worker.py | {
"start": 2006,
"end": 7555
} | class ____:
"""Used as a Pyro4 class with exposed methods.
Exposes every non-private method and property of the class automatically to be available for remote access.
"""
def __init__(self):
"""Partly initialize the model."""
self.model = None
@Pyro4.expose
def initialize(self, myid, dispatcher, **model_params):
"""Fully initialize the worker.
Parameters
----------
myid : int
An ID number used to identify this worker in the dispatcher object.
dispatcher : :class:`~gensim.models.lda_dispatcher.Dispatcher`
The dispatcher responsible for scheduling this worker.
**model_params
Keyword parameters to initialize the inner LDA model,see :class:`~gensim.models.ldamodel.LdaModel`.
"""
self.lock_update = threading.Lock()
self.jobsdone = 0 # how many jobs has this worker completed?
# id of this worker in the dispatcher; just a convenience var for easy access/logging TODO remove?
self.myid = myid
self.dispatcher = dispatcher
self.finished = False
logger.info("initializing worker #%s", myid)
self.model = ldamodel.LdaModel(**model_params)
@Pyro4.expose
@Pyro4.oneway
def requestjob(self):
"""Request jobs from the dispatcher, in a perpetual loop until :meth:`gensim.models.lda_worker.Worker.getstate`
is called.
Raises
------
RuntimeError
If `self.model` is None (i.e. worker non initialized).
"""
if self.model is None:
raise RuntimeError("worker must be initialized before receiving jobs")
job = None
while job is None and not self.finished:
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
# no new job: try again, unless we're finished with all work
continue
if job is not None:
logger.info("worker #%s received job #%i", self.myid, self.jobsdone)
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info("worker #%i stopping asking for jobs", self.myid)
@utils.synchronous('lock_update')
def processjob(self, job):
"""Incrementally process the job and potentially logs progress.
Parameters
----------
job : iterable of list of (int, float)
Corpus in BoW format.
"""
logger.debug("starting to process job #%i", self.jobsdone)
self.model.do_estep(job)
self.jobsdone += 1
if SAVE_DEBUG and self.jobsdone % SAVE_DEBUG == 0:
fname = os.path.join(tempfile.gettempdir(), 'lda_worker.pkl')
self.model.save(fname)
logger.info("finished processing job #%i", self.jobsdone - 1)
@Pyro4.expose
def ping(self):
"""Test the connectivity with Worker."""
return True
@Pyro4.expose
@utils.synchronous('lock_update')
def getstate(self):
"""Log and get the LDA model's current state.
Returns
-------
result : :class:`~gensim.models.ldamodel.LdaState`
The current state.
"""
logger.info("worker #%i returning its state after %s jobs", self.myid, self.jobsdone)
result = self.model.state
assert isinstance(result, ldamodel.LdaState)
self.model.clear() # free up mem in-between two EM cycles
self.finished = True
return result
@Pyro4.expose
@utils.synchronous('lock_update')
def reset(self, state):
"""Reset the worker by setting sufficient stats to 0.
Parameters
----------
state : :class:`~gensim.models.ldamodel.LdaState`
Encapsulates information for distributed computation of LdaModel objects.
"""
assert state is not None
logger.info("resetting worker #%i", self.myid)
self.model.state = state
self.model.sync_state()
self.model.state.reset()
self.finished = False
@Pyro4.oneway
def exit(self):
"""Terminate the worker."""
logger.info("terminating worker #%i", self.myid)
os._exit(0)
def main():
parser = argparse.ArgumentParser(description=__doc__[:-130], formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None)
parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int)
parser.add_argument(
"--no-broadcast", help="Disable broadcast (default: %(default)s)", action='store_const',
default=True, const=False
)
parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None)
parser.add_argument(
'-v', '--verbose', help='Verbose flag', action='store_const', dest="loglevel",
const=logging.INFO, default=logging.WARNING
)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel)
logger.info("running %s", " ".join(sys.argv))
ns_conf = {
"broadcast": args.no_broadcast,
"host": args.host,
"port": args.port,
"hmac_key": args.hmac
}
utils.pyro_daemon(LDA_WORKER_PREFIX, Worker(), random_suffix=True, ns_conf=ns_conf)
logger.info("finished running %s", " ".join(sys.argv))
if __name__ == '__main__':
main()
| Worker |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/kubernetes_engine.py | {
"start": 14258,
"end": 15883
} | class ____(GoogleBaseAsyncHook):
"""Asynchronous client of GKE."""
sync_hook_class = GKEHook
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
location: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._client: ClusterManagerAsyncClient | None = None
self.location = location
async def _get_client(self) -> ClusterManagerAsyncClient:
if self._client is None:
self._client = ClusterManagerAsyncClient(
credentials=(await self.get_sync_hook()).get_credentials(),
client_info=CLIENT_INFO,
)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
async def get_operation(
self,
operation_name: str,
project_id: str = PROVIDE_PROJECT_ID,
) -> Operation:
"""
Fetch an operation from Google Cloud.
:param operation_name: Name of operation to fetch.
:param project_id: Google Cloud project ID.
:return: The new, updated operation from Google Cloud.
"""
project_id = project_id or (await self.get_sync_hook()).project_id
operation_path = f"projects/{project_id}/locations/{self.location}/operations/{operation_name}"
client = await self._get_client()
return await client.get_operation(
name=operation_path,
)
| GKEAsyncHook |
python | jina-ai__jina | tests/integration/floating_deployments/test_floating_deployments.py | {
"start": 172,
"end": 10451
} | class ____(Executor):
def __init__(self, file_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_name = file_name
@requests
def foo(self, docs, **kwargs):
time.sleep(TIME_SLEEP_FLOATING)
with open(self.file_name, 'a+', encoding='utf-8') as f:
f.write('here ')
for d in docs:
d.text = 'change it'
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_floating_executors(tmpdir, protocol):
NUM_REQ = 20
file_name = os.path.join(str(tmpdir), 'file.txt')
expected_str = 'here ' * NUM_REQ
f = (
Flow(protocol=protocol)
.add(name='first')
.add(
name='second',
floating=True,
uses=FloatingTestExecutor,
uses_with={'file_name': file_name},
)
)
with f:
for j in range(NUM_REQ):
start_time = time.time()
ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1))
end_time = time.time()
assert (
end_time - start_time
) < TIME_SLEEP_FLOATING # check that the response arrives before the
# Floating Executor finishes
assert len(ret) == 1
assert ret[0].text == ''
with open(file_name, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_floating_executors_right_after_gateway(tmpdir, protocol):
NUM_REQ = 20
file_name = os.path.join(str(tmpdir), 'file.txt')
expected_str = 'here ' * NUM_REQ
f = (
Flow(protocol=protocol)
.add(name='first')
.add(
name='second',
floating=True,
uses=FloatingTestExecutor,
uses_with={'file_name': file_name},
needs=['gateway'],
)
)
with f:
for j in range(NUM_REQ):
start_time = time.time()
ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1))
end_time = time.time()
assert (
end_time - start_time
) < TIME_SLEEP_FLOATING # check that the response arrives before the
# Floating Executor finishes
assert len(ret) == 1
assert ret[0].text == ''
with open(file_name, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_multiple_floating_points(tmpdir, protocol):
NUM_REQ = 20
file_name1 = os.path.join(str(tmpdir), 'file1.txt')
file_name2 = os.path.join(str(tmpdir), 'file2.txt')
expected_str = 'here ' * NUM_REQ
f = (
Flow(protocol=protocol)
.add(name='first')
.add(
name='second',
floating=True,
uses=FloatingTestExecutor,
uses_with={'file_name': file_name1},
)
.add(
name='third',
floating=True,
uses=FloatingTestExecutor,
uses_with={'file_name': file_name2},
)
)
with f:
for j in range(NUM_REQ):
start_time = time.time()
ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1))
end_time = time.time()
assert (
end_time - start_time
) < TIME_SLEEP_FLOATING # check that the response arrives before the
assert len(ret) == 1
assert ret[0].text == ''
with open(file_name1, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
with open(file_name2, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_complex_flow(tmpdir, protocol):
NUM_REQ = 20
file_name1 = os.path.join(str(tmpdir), 'file1.txt')
file_name2 = os.path.join(str(tmpdir), 'file2.txt')
expected_str = 'here ' * NUM_REQ
f = (
Flow(protocol=protocol)
.add(name='pod0')
.add(name='pod4', needs=['gateway'])
.add(
name='floating_pod6',
needs=['gateway'],
floating=True,
uses=FloatingTestExecutor,
uses_with={'file_name': file_name2},
)
.add(
name='floating_pod1',
needs=['pod0'],
floating=True,
uses=FloatingTestExecutor,
uses_with={'file_name': file_name1},
)
.add(name='pod2', needs=['pod0'])
.add(name='pod3', needs=['pod2'])
.add(name='pod5', needs=['pod4'])
.add(name='merger', needs=['pod5', 'pod3'])
.add(name='pod_last', needs=['merger'])
)
with f:
for j in range(NUM_REQ):
start_time = time.time()
ret = f.post(on=__default_endpoint__, inputs=DocumentArray.empty(1))
end_time = time.time()
assert (
end_time - start_time
) < TIME_SLEEP_FLOATING # check that the response arrives before the
assert len(ret) == 1
assert ret[0].text == ''
with open(file_name1, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
with open(file_name2, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
@pytest.mark.parametrize('needs', ['gateway', 'executor0'])
def test_floating_needs(needs, tmpdir):
NUM_REQ = 20
file_name = os.path.join(str(tmpdir), 'file.txt')
expected_str = 'here ' * NUM_REQ
class FastChangingExecutor(Executor):
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'Hello World'
f = (
Flow()
.add(name='executor0', uses=FastChangingExecutor)
.add(
name='floating_executor',
uses=FloatingTestExecutor,
uses_with={'file_name': file_name},
needs=[needs],
floating=True,
)
)
with f:
for j in range(NUM_REQ):
start_time = time.time()
response = f.post(on='/endpoint', inputs=DocumentArray.empty(2))
end_time = time.time()
assert (end_time - start_time) < TIME_SLEEP_FLOATING
assert response.texts == ['Hello World', 'Hello World']
with open(file_name, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str
@pytest.mark.parametrize('needs', ['gateway', 'executor0', 'executor1'])
def test_floating_needs_more_complex(needs, tmpdir):
NUM_REQ = 20
print(f' tmpdir {tmpdir}')
file_name1 = os.path.join(str(tmpdir), 'file1.txt')
file_name2 = os.path.join(str(tmpdir), 'file2.txt')
class FloatingTestExecutorWriteDocs(Executor):
def __init__(self, file_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_name = file_name
@requests
def foo(self, docs, **kwargs):
time.sleep(TIME_SLEEP_FLOATING)
with open(self.file_name, 'a+', encoding='utf-8') as f:
for d in docs:
f.write(d.text)
for d in docs:
d.text = 'change it'
expected_str1 = ''
if needs == 'executor0':
expected_str1 = 'Hello World' * NUM_REQ
elif needs == 'executor1':
expected_str1 = 'Hello World from FastAddExecutor' * NUM_REQ
expected_str2 = 'change it' * NUM_REQ
class FastChangingExecutor(Executor):
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'Hello World'
class FastAddExecutor(Executor):
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text += ' from FastAddExecutor'
f = (
Flow()
.add(name='executor0', uses=FastChangingExecutor)
.add(name='executor1', uses=FastAddExecutor, needs=['executor0'])
.add(
name='floating_executor',
uses=FloatingTestExecutorWriteDocs,
uses_with={'file_name': file_name1},
needs=[needs],
floating=True,
)
.add(
name='floating_executor_2',
uses=FloatingTestExecutorWriteDocs,
uses_with={'file_name': file_name2},
needs=['floating_executor'],
floating=True,
)
)
with f:
for j in range(NUM_REQ):
start_time = time.time()
response = f.post(on='/endpoint', inputs=DocumentArray.empty(1))
end_time = time.time()
assert (end_time - start_time) < TIME_SLEEP_FLOATING
assert response.texts == [
'Hello World from FastAddExecutor',
]
with open(file_name1, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str1
with open(file_name2, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == expected_str2
@pytest.mark.parametrize('protocol', ['http', 'grpc', 'websocket'])
def test_flow_all_floating(protocol, tmpdir):
file_name = os.path.join(str(tmpdir), 'file.txt')
class FloatingTestExecutorWriteDocs(Executor):
def __init__(self, file_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_name = file_name
@requests
def foo(self, docs, **kwargs):
length_of_docs = len(docs)
with open(self.file_name, 'a+', encoding='utf-8') as f:
f.write(str(len(docs)))
flow = Flow(protocol=protocol).add(
name='A',
floating=True,
uses=FloatingTestExecutorWriteDocs,
uses_with={'file_name': file_name},
)
with flow:
flow.post(on='/', inputs=DocumentArray.empty(1))
with open(file_name, 'r', encoding='utf-8') as f:
resulted_str = f.read()
assert resulted_str == '1'
| FloatingTestExecutor |
python | celery__celery | t/unit/backends/test_redis.py | {
"start": 11115,
"end": 12661
} | class ____:
def get_backend(self):
from celery.backends.redis import RedisBackend
class _RedisBackend(RedisBackend):
redis = redis
return _RedisBackend
def get_E_LOST(self):
from celery.backends.redis import E_LOST
return E_LOST
def create_task(self, i, group_id="group_id"):
tid = uuid()
task = Mock(name=f'task-{tid}')
task.name = 'foobarbaz'
self.app.tasks['foobarbaz'] = task
task.request.chord = signature(task)
task.request.id = tid
self.b.set_chord_size(group_id, 10)
task.request.group = group_id
task.request.group_index = i
return task
@contextmanager
def chord_context(self, size=1):
with patch('celery.backends.redis.maybe_signature') as ms:
request = Mock(name='request')
request.id = 'id1'
group_id = 'gid1'
request.group = group_id
request.group_index = None
tasks = [
self.create_task(i, group_id=request.group)
for i in range(size)
]
callback = ms.return_value = Signature('add')
callback.id = 'id1'
self.b.set_chord_size(group_id, size)
callback.delay = Mock(name='callback.delay')
yield tasks, request, callback
def setup_method(self):
self.Backend = self.get_backend()
self.E_LOST = self.get_E_LOST()
self.b = self.Backend(app=self.app)
| basetest_RedisBackend |
python | getsentry__sentry | tests/sentry/mail/activity/test_note.py | {
"start": 628,
"end": 3450
} | class ____(ActivityTestCase):
def setUp(self) -> None:
super().setUp()
self.email = NoteActivityNotification(
Activity(
project=self.project,
group=self.group,
user_id=self.user.id,
type=ActivityType.NOTE,
data={"text": "text", "mentions": []},
)
)
def test_simple(self) -> None:
# Defaults: SUBSCRIBE_ONLY and self_notifications:0
assert self.email.get_participants_with_group_subscription_reason().is_empty()
def test_allow_self_notifications(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="workflow",
value="always",
)
UserOption.objects.create(user=self.user, key="self_notifications", value="1")
participants = self.email.get_participants_with_group_subscription_reason()
actual = dict(participants.get_participants_by_provider(ExternalProviders.EMAIL))
expected = {
Actor.from_orm_user(self.user): GroupSubscriptionReason.implicit,
}
assert actual == expected
def test_disable_self_notifications(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="workflow",
value="always",
)
UserOption.objects.create(user=self.user, key="self_notifications", value="0")
participants = self.email.get_participants_with_group_subscription_reason()
assert len(participants.get_participants_by_provider(ExternalProviders.EMAIL)) == 0
def test_note_with_braces(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
NotificationSettingOption.objects.create(
user_id=self.user.id,
scope_type="user",
scope_identifier=self.user.id,
type="workflow",
value="always",
)
UserOption.objects.create(user=self.user, key="self_notifications", value="1")
email = NoteActivityNotification(
Activity(
project=self.project,
group=self.group,
user_id=self.user.id,
type=ActivityType.NOTE,
data={"text": "{abc.property}", "mentions": []},
)
)
context = email.get_context()
assert context["text_description"] == "{abc.property}"
| NoteTestCase |
python | joblib__joblib | joblib/parallel.py | {
"start": 37786,
"end": 86989
} | class ____(Logger):
"""Helper class for readable parallel mapping.
Read more in the :ref:`User Guide <parallel>`.
Parameters
----------
n_jobs: int, default=None
The maximum number of concurrently running jobs, such as the number
of Python worker processes when ``backend="loky"`` or the size of
the thread-pool when ``backend="threading"``.
This argument is converted to an integer, rounded below for float.
If -1 is given, `joblib` tries to use all CPUs. The number of CPUs
``n_cpus`` is obtained with :func:`~cpu_count`.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance,
using ``n_jobs=-2`` will result in all CPUs but one being used.
This argument can also go above ``n_cpus``, which will cause
oversubscription. In some cases, slight oversubscription can be
beneficial, e.g., for tasks with large I/O operations.
If 1 is given, no parallel computing code is used at all, and the
behavior amounts to a simple python `for` loop. This mode is not
compatible with ``timeout``.
None is a marker for 'unset' that will be interpreted as n_jobs=1
unless the call is performed under a :func:`~parallel_config`
context manager that sets another value for ``n_jobs``.
If n_jobs = 0 then a ValueError is raised.
backend: str, ParallelBackendBase instance or None, default='loky'
Specify the parallelization backend implementation.
Supported backends are:
- "loky" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes. On some rare
systems (such as Pyiodide), the loky backend may not be
available.
- "multiprocessing" previous process-based backend based on
`multiprocessing.Pool`. Less robust than `loky`.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
:func:`~register_parallel_backend`. This will allow you to
implement a backend of your liking.
It is not recommended to hard-code the backend name in a call to
:class:`~Parallel` in a library. Instead it is recommended to set
soft hints (prefer) or hard constraints (require) so as to make it
possible for library users to change the backend from the outside
using the :func:`~parallel_config` context manager.
return_as: str in {'list', 'generator', 'generator_unordered'}, default='list'
If 'list', calls to this instance will return a list, only when
all results have been processed and retrieved.
If 'generator', it will return a generator that yields the results
as soon as they are available, in the order the tasks have been
submitted with.
If 'generator_unordered', the generator will immediately yield
available results independently of the submission order. The output
order is not deterministic in this case because it depends on the
concurrency of the workers.
prefer: str in {'processes', 'threads'} or None, default=None
Soft hint to choose the default backend if no specific backend
was selected with the :func:`~parallel_config` context manager.
The default process-based backend is 'loky' and the default
thread-based backend is 'threading'. Ignored if the ``backend``
parameter is specified.
require: 'sharedmem' or None, default=None
Hard constraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
:func:`~joblib.parallel_config`.
verbose: int, default=0
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float or None, default=None
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}, default='2*n_jobs'
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the workers should never starve. Note that only basic
arithmetic are allowed here and no modules can be used in this
expression.
batch_size: int or 'auto', default='auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, dispatching
calls to workers can be slower than sequential computation because
of the overhead. Batching fast computations together can mitigate
this.
The ``'auto'`` strategy keeps track of the time it takes for a
batch to complete, and dynamically adjusts the batch size to keep
the time on the order of half a second, using a heuristic. The
initial batch size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str or None, default=None
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAM disk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when ``backend="loky"`` or ``"multiprocessing"``.
max_nbytes int, str, or None, optional, default='1M'
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmapping of large arrays.
Only active when ``backend="loky"`` or ``"multiprocessing"``.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r'
Memmapping mode for numpy arrays passed to workers. None will
disable memmapping, other modes defined in the numpy.memmap doc:
https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
Also, see 'max_nbytes' parameter documentation for more details.
backend_kwargs: dict, optional
Additional parameters to pass to the backend `configure` method.
Notes
-----
This object uses workers to compute in parallel the application of a
function to many different arguments. The main functionality it brings
in addition to using the raw multiprocessing or concurrent.futures API
are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Note that the intended usage is to run one call at a time. Multiple
calls to the same Parallel object will result in a ``RuntimeError``
Examples
--------
A simple example:
>>> from math import sqrt
>>> from joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=10)(
... delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s
[Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(
... delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3))
... # doctest: +SKIP
-----------------------------------------------------------------------
Sub-process traceback:
-----------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
_______________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly:
>>> from math import sqrt
>>> from joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
""" # noqa: E501
def __init__(
self,
n_jobs=default_parallel_config["n_jobs"],
backend=default_parallel_config["backend"],
return_as="list",
verbose=default_parallel_config["verbose"],
timeout=None,
pre_dispatch="2 * n_jobs",
batch_size="auto",
temp_folder=default_parallel_config["temp_folder"],
max_nbytes=default_parallel_config["max_nbytes"],
mmap_mode=default_parallel_config["mmap_mode"],
prefer=default_parallel_config["prefer"],
require=default_parallel_config["require"],
**backend_kwargs,
):
# Initiate parent Logger class state
super().__init__()
# Interpret n_jobs=None as 'unset'
if n_jobs is None:
n_jobs = default_parallel_config["n_jobs"]
active_backend, context_config = _get_active_backend(
prefer=prefer, require=require, verbose=verbose
)
nesting_level = active_backend.nesting_level
self.verbose = _get_config_param(verbose, context_config, "verbose")
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if return_as not in {"list", "generator", "generator_unordered"}:
raise ValueError(
'Expected `return_as` parameter to be a string equal to "list"'
f',"generator" or "generator_unordered", but got {return_as} '
"instead."
)
self.return_as = return_as
self.return_generator = return_as != "list"
self.return_ordered = return_as != "generator_unordered"
# Check if we are under a parallel_config or parallel_backend
# context manager and use the config from the context manager
# for arguments that are not explicitly set.
self._backend_kwargs = {
**backend_kwargs,
**{
k: _get_config_param(param, context_config, k)
for param, k in [
(max_nbytes, "max_nbytes"),
(temp_folder, "temp_folder"),
(mmap_mode, "mmap_mode"),
(prefer, "prefer"),
(require, "require"),
(verbose, "verbose"),
]
},
}
if isinstance(self._backend_kwargs["max_nbytes"], str):
self._backend_kwargs["max_nbytes"] = memstr_to_bytes(
self._backend_kwargs["max_nbytes"]
)
self._backend_kwargs["verbose"] = max(0, self._backend_kwargs["verbose"] - 50)
if DEFAULT_MP_CONTEXT is not None:
self._backend_kwargs["context"] = DEFAULT_MP_CONTEXT
elif hasattr(mp, "get_context"):
self._backend_kwargs["context"] = mp.get_context()
if backend is default_parallel_config["backend"] or backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is, with the current nesting_level if it
# is not set yet.
if backend.nesting_level is None:
backend.nesting_level = nesting_level
elif hasattr(backend, "Pool") and hasattr(backend, "Lock"):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_kwargs["context"] = backend
backend = MultiprocessingBackend(nesting_level=nesting_level)
elif backend not in BACKENDS and backend in MAYBE_AVAILABLE_BACKENDS:
warnings.warn(
f"joblib backend '{backend}' is not available on "
f"your system, falling back to {DEFAULT_BACKEND}.",
UserWarning,
stacklevel=2,
)
BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND]
backend = BACKENDS[DEFAULT_BACKEND](nesting_level=nesting_level)
else:
try:
backend_factory = BACKENDS[backend]
except KeyError as e:
raise ValueError(
"Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys()))
) from e
backend = backend_factory(nesting_level=nesting_level)
n_jobs = _get_config_param(n_jobs, context_config, "n_jobs")
if n_jobs is None:
# No specific context override and no specific value request:
# default to the default of the backend.
n_jobs = backend.default_n_jobs
try:
n_jobs = int(n_jobs)
except ValueError:
raise ValueError("n_jobs could not be converted to int")
self.n_jobs = n_jobs
if require == "sharedmem" and not getattr(backend, "supports_sharedmem", False):
raise ValueError("Backend %s does not support shared memory" % backend)
if batch_size == "auto" or isinstance(batch_size, Integral) and batch_size > 0:
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r" % batch_size
)
if not isinstance(backend, SequentialBackend):
if self.return_generator and not backend.supports_return_generator:
raise ValueError(
"Backend {} does not support return_as={}".format(
backend, return_as
)
)
# This lock is used to coordinate the main thread of this process
# with the async callback thread of our the pool.
self._lock = threading.RLock()
self._jobs = collections.deque()
self._jobs_set = set()
self._pending_outputs = list()
self._ready_batches = queue.Queue()
self._reducer_callback = None
# Internal variables
self._backend = backend
self._running = False
self._managed_backend = False
self._id = uuid4().hex
self._call_ref = None
def __enter__(self):
self._managed_backend = True
self._calling = False
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._managed_backend = False
if self.return_generator and self._calling:
self._abort()
self._terminate_and_reset()
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(
n_jobs=self.n_jobs, parallel=self, **self._backend_kwargs
)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
"The backend class {!r} does not support timeout. "
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__, self.timeout
)
)
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_and_reset(self):
if hasattr(self._backend, "stop_call") and self._calling:
self._backend.stop_call()
self._calling = False
if not self._managed_backend:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
batch_size = len(batch)
self.n_dispatched_tasks += batch_size
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
batch_tracker = BatchCompletionCallBack(dispatch_timestamp, batch_size, self)
self._register_new_job(batch_tracker)
# If return_ordered is False, the batch_tracker is not stored in the
# jobs queue at the time of submission. Instead, it will be appended to
# the queue by itself as soon as the callback is triggered to be able
# to return the results in the order of completion.
job = self._backend.submit(batch, callback=batch_tracker)
batch_tracker.register_job(job)
def _register_new_job(self, batch_tracker):
if self.return_ordered:
self._jobs.append(batch_tracker)
else:
self._jobs_set.add(batch_tracker)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self._aborting:
return False
batch_size = self._get_batch_size()
with self._lock:
# to ensure an even distribution of the workload between workers,
# we look ahead in the original iterators more than batch_size
# tasks - However, we keep consuming only one batch at each
# dispatch_one_batch call. The extra tasks are stored in a local
# queue, _ready_batches, that is looked-up prior to re-consuming
# tasks from the origal iterator.
try:
tasks = self._ready_batches.get(block=False)
except queue.Empty:
# slice the iterator n_jobs * batchsize items at a time. If the
# slice returns less than that, then the current batchsize puts
# too much weight on a subset of workers, while other may end
# up starving. So in this case, re-scale the batch size
# accordingly to distribute evenly the last items between all
# workers.
n_jobs = self._cached_effective_n_jobs
big_batch_size = batch_size * n_jobs
try:
islice = list(itertools.islice(iterator, big_batch_size))
except Exception as e:
# Handle the fact that the generator of task raised an
# exception. As this part of the code can be executed in
# a thread internal to the backend, register a task with
# an error that will be raised in the user's thread.
if isinstance(e.__context__, queue.Empty):
# Suppress the cause of the exception if it is
# queue.Empty to avoid cluttered traceback. Only do it
# if the __context__ is really empty to avoid messing
# with causes of the original error.
e.__cause__ = None
batch_tracker = BatchCompletionCallBack(0, batch_size, self)
self._register_new_job(batch_tracker)
batch_tracker._register_outcome(dict(result=e, status=TASK_ERROR))
return True
if len(islice) == 0:
return False
elif (
iterator is self._original_iterator and len(islice) < big_batch_size
):
# We reached the end of the original iterator (unless
# iterator is the ``pre_dispatch``-long initial slice of
# the original iterator) -- decrease the batch size to
# account for potential variance in the batches running
# time.
final_batch_size = max(1, len(islice) // (10 * n_jobs))
else:
final_batch_size = max(1, len(islice) // n_jobs)
# enqueue n_jobs batches in a local queue
for i in range(0, len(islice), final_batch_size):
tasks = BatchedCalls(
islice[i : i + final_batch_size],
self._backend.get_nested_backend(),
self._reducer_callback,
self._pickle_cache,
)
self._ready_batches.put(tasks)
# finally, get one task.
tasks = self._ready_batches.get(block=False)
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _get_batch_size(self):
"""Returns the effective batch size for dispatch"""
if self.batch_size == "auto":
return self._backend.compute_batch_size()
else:
# Fixed batch size strategy
return self.batch_size
def _print(self, msg):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
writer(f"[{self}]: {msg}\n")
def _is_completed(self):
"""Check if all tasks have been completed"""
return self.n_completed_tasks == self.n_dispatched_tasks and not (
self._iterating or self._aborting
)
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
if self.n_tasks is not None and self.n_tasks > 0:
width = floor(log10(self.n_tasks)) + 1
else:
width = 3
elapsed_time = time.time() - self._start_time
if self._is_completed():
# Make sure that we get a last message telling us we are done
self._print(
f"Done {self.n_completed_tasks:{width}d} out of "
f"{self.n_completed_tasks:{width}d} | elapsed: "
f"{short_format_time(elapsed_time)} finished"
)
return
# Original job iterator becomes None once it has been fully
# consumed: at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
elif self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
fmt_time = f"| elapsed: {short_format_time(elapsed_time)}"
index = self.n_completed_tasks
if self.n_tasks is not None:
self._print(
f"Done {index:{width}d} out of {self.n_tasks:{width}d} {fmt_time}"
)
else:
pad = " " * (len("out of ") + width - len("tasks"))
self._print(f"Done {index:{width}d} tasks {pad}{fmt_time}")
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if index != 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = total_tasks - index + 1 - self._pre_dispatch_amount
frequency = (total_tasks // self.verbose) + 1
is_last_item = index + 1 == total_tasks
if is_last_item or cursor % frequency:
return
remaining_time = (elapsed_time / max(index, 1)) * (
self.n_dispatched_tasks - index
)
# only display status if remaining time is greater or equal to 0
self._print(
f"Done {index:{width}d} out of {total_tasks:{width}d} "
f"| elapsed: {short_format_time(elapsed_time)} remaining: "
f"{short_format_time(remaining_time)}"
)
def _abort(self):
# Stop dispatching new jobs in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if not self._aborted and hasattr(backend, "abort_everything"):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
self._aborted = True
def _start(self, iterator, pre_dispatch):
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator. If
# self._original_iterator is None, then this means either
# that pre_dispatch == "all", n_jobs == 1 or that the first batch
# was very quick and its callback already dispatched all the
# remaining jobs.
self._iterating = False
if self.dispatch_one_batch(iterator):
self._iterating = self._original_iterator is not None
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all":
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
def _get_outputs(self, iterator, pre_dispatch):
"""Iterator returning the tasks' output as soon as they are ready."""
dispatch_thread_id = threading.get_ident()
detach_generator_exit = False
try:
self._start(iterator, pre_dispatch)
# first yield returns None, for internal use only. This ensures
# that we enter the try/except block and start dispatching the
# tasks.
yield
with self._backend.retrieval_context():
yield from self._retrieve()
except GeneratorExit:
# The generator has been garbage collected before being fully
# consumed. This aborts the remaining tasks if possible and warn
# the user if necessary.
self._exception = True
# In some interpreters such as PyPy, GeneratorExit can be raised in
# a different thread than the one used to start the dispatch of the
# parallel tasks. This can lead to hang when a thread attempts to
# join itself. As workaround, we detach the execution of the
# aborting code to a dedicated thread. We then need to make sure
# the rest of the function does not call `_terminate_and_reset`
# in finally.
if dispatch_thread_id != threading.get_ident():
warnings.warn(
"A generator produced by joblib.Parallel has been "
"gc'ed in an unexpected thread. This behavior should "
"not cause major -issues but to make sure, please "
"report this warning and your use case at "
"https://github.com/joblib/joblib/issues so it can "
"be investigated."
)
detach_generator_exit = True
_parallel = self
class _GeneratorExitThread(threading.Thread):
def run(self):
_parallel._abort()
if _parallel.return_generator:
_parallel._warn_exit_early()
_parallel._terminate_and_reset()
_GeneratorExitThread(name="GeneratorExitThread").start()
return
# Otherwise, we are in the thread that started the dispatch: we can
# safely abort the execution and warn the user.
self._abort()
if self.return_generator:
self._warn_exit_early()
raise
# Note: we catch any BaseException instead of just Exception instances
# to also include KeyboardInterrupt
except BaseException:
self._exception = True
self._abort()
raise
finally:
# Store the unconsumed tasks and terminate the workers if necessary
_remaining_outputs = [] if self._exception else self._jobs
self._jobs = collections.deque()
self._jobs_set = set()
self._running = False
if not detach_generator_exit:
self._terminate_and_reset()
while len(_remaining_outputs) > 0:
batched_results = _remaining_outputs.popleft()
batched_results = batched_results.get_result(self.timeout)
for result in batched_results:
yield result
def _wait_retrieval(self):
"""Return True if we need to continue retrieving some tasks."""
# If the input load is still being iterated over, it means that tasks
# are still on the dispatch waitlist and their results will need to
# be retrieved later on.
if self._iterating:
return True
# If some of the dispatched tasks are still being processed by the
# workers, wait for the compute to finish before starting retrieval
if self.n_completed_tasks < self.n_dispatched_tasks:
return True
# For backends that does not support retrieving asynchronously the
# result to the main process, all results must be carefully retrieved
# in the _retrieve loop in the main thread while the backend is alive.
# For other backends, the actual retrieval is done asynchronously in
# the callback thread, and we can terminate the backend before the
# `self._jobs` result list has been emptied. The remaining results
# will be collected in the `finally` step of the generator.
if not self._backend.supports_retrieve_callback:
if len(self._jobs) > 0:
return True
return False
def _retrieve(self):
timeout_control_job = None
while self._wait_retrieval():
# If the callback thread of a worker has signaled that its task
# triggered an exception, or if the retrieval loop has raised an
# exception (e.g. `GeneratorExit`), exit the loop and surface the
# worker traceback.
if self._aborting:
self._raise_error_fast()
break
nb_jobs = len(self._jobs)
# Now wait for a job to be ready for retrieval.
if self.return_ordered:
# Case ordered: wait for completion (or error) of the next job
# that have been dispatched and not retrieved yet. If no job
# have been dispatched yet, wait for dispatch.
# We assume that the time to wait for the next job to be
# dispatched is always low, so that the timeout
# control only have to be done on the amount of time the next
# dispatched job is pending.
if (nb_jobs == 0) or (
self._jobs[0].get_status(timeout=self.timeout) == TASK_PENDING
):
time.sleep(0.01)
continue
elif nb_jobs == 0:
# Case unordered: jobs are added to the list of jobs to
# retrieve `self._jobs` only once completed or in error, which
# is too late to enable timeout control in the same way than in
# the previous case.
# Instead, if no job is ready to be retrieved yet, we
# arbitrarily pick a dispatched job, and the timeout control is
# done such that an error is raised if this control job
# timeouts before any other dispatched job has completed and
# been added to `self._jobs` to be retrieved.
if timeout_control_job is None:
timeout_control_job = next(iter(self._jobs_set), None)
# NB: it can be None if no job has been dispatched yet.
if timeout_control_job is not None:
timeout_control_job.get_status(timeout=self.timeout)
time.sleep(0.01)
continue
elif timeout_control_job is not None:
# Case unordered, when `nb_jobs > 0`:
# It means that a job is ready to be retrieved, so no timeout
# will occur during this iteration.
# Before proceeding to retrieval of the next ready job, reset
# the timeout control state to prepare the next iteration.
timeout_control_job._completion_timeout_counter = None
timeout_control_job = None
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by
# default hence the use of the lock
with self._lock:
batched_results = self._jobs.popleft()
if not self.return_ordered:
self._jobs_set.remove(batched_results)
# Flatten the batched results to output one output at a time
batched_results = batched_results.get_result(self.timeout)
for result in batched_results:
self._nb_consumed += 1
yield result
def _raise_error_fast(self):
"""If we are aborting, raise if a job caused an error."""
# Find the first job whose status is TASK_ERROR if it exists.
with self._lock:
error_job = next(
(job for job in self._jobs if job.status == TASK_ERROR), None
)
# If this error job exists, immediately raise the error by
# calling get_result. This job might not exists if abort has been
# called directly or if the generator is gc'ed.
if error_job is not None:
error_job.get_result(self.timeout)
def _warn_exit_early(self):
"""Warn the user if the generator is gc'ed before being consumned."""
ready_outputs = self.n_completed_tasks - self._nb_consumed
is_completed = self._is_completed()
msg = ""
if ready_outputs:
msg += (
f"{ready_outputs} tasks have been successfully executed but not used."
)
if not is_completed:
msg += " Additionally, "
if not is_completed:
msg += (
f"{self.n_dispatched_tasks - self.n_completed_tasks} tasks "
"which were still being processed by the workers have been "
"cancelled."
)
if msg:
msg += (
" You could benefit from adjusting the input task "
"iterator to limit unnecessary computation time."
)
warnings.warn(msg)
def _get_sequential_output(self, iterable):
"""Separate loop for sequential output.
This simplifies the traceback in case of errors and reduces the
overhead of calling sequential tasks with `joblib`.
"""
try:
self._iterating = True
self._original_iterator = iterable
batch_size = self._get_batch_size()
if batch_size != 1:
it = iter(iterable)
iterable_batched = iter(
lambda: tuple(itertools.islice(it, batch_size)), ()
)
iterable = (task for batch in iterable_batched for task in batch)
# first yield returns None, for internal use only. This ensures
# that we enter the try/except block and setup the generator.
yield None
# Sequentially call the tasks and yield the results.
for func, args, kwargs in iterable:
self.n_dispatched_batches += 1
self.n_dispatched_tasks += 1
res = func(*args, **kwargs)
self.n_completed_tasks += 1
self.print_progress()
yield res
self._nb_consumed += 1
except BaseException:
self._exception = True
self._aborting = True
self._aborted = True
raise
finally:
self._running = False
self._iterating = False
self._original_iterator = None
self.print_progress()
def _reset_run_tracking(self):
"""Reset the counters and flags used to track the execution."""
# Makes sur the parallel instance was not previously running in a
# thread-safe way.
with getattr(self, "_lock", nullcontext()):
if self._running:
msg = "This Parallel instance is already running !"
if self.return_generator is True:
msg += (
" Before submitting new tasks, you must wait for the "
"completion of all the previous tasks, or clean all "
"references to the output generator."
)
raise RuntimeError(msg)
self._running = True
# Counter to keep track of the task dispatched and completed.
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
# Following count is incremented by one each time the user iterates
# on the output generator, it is used to prepare an informative
# warning message in case the generator is deleted before all the
# dispatched tasks have been consumed.
self._nb_consumed = 0
# Following flags are used to synchronize the threads in case one of
# the tasks error-out to ensure that all workers abort fast and that
# the backend terminates properly.
# Set to True as soon as a worker signals that a task errors-out
self._exception = False
# Set to True in case of early termination following an incident
self._aborting = False
# Set to True after abortion is complete
self._aborted = False
def __call__(self, iterable):
"""Main function to dispatch parallel tasks."""
self._reset_run_tracking()
self.n_tasks = len(iterable) if hasattr(iterable, "__len__") else None
self._start_time = time.time()
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
if n_jobs == 1:
# If n_jobs==1, run the computation sequentially and return
# immediately to avoid overheads.
output = self._get_sequential_output(iterable)
next(output)
return output if self.return_generator else list(output)
# Let's create an ID that uniquely identifies the current call. If the
# call is interrupted early and that the same instance is immediately
# reused, this id will be used to prevent workers that were
# concurrently finalizing a task from the previous call to run the
# callback.
with self._lock:
self._call_id = uuid4().hex
# self._effective_n_jobs should be called in the Parallel.__call__
# thread only -- store its value in an attribute for further queries.
self._cached_effective_n_jobs = n_jobs
if isinstance(self._backend, LokyBackend):
# For the loky backend, we add a callback executed when reducing
# BatchCalls, that makes the loky executor use a temporary folder
# specific to this Parallel object when pickling temporary memmaps.
# This callback is necessary to ensure that several Parallel
# objects using the same reusable executor don't use the same
# temporary resources.
def _batched_calls_reducer_callback():
# Relevant implementation detail: the following lines, called
# when reducing BatchedCalls, are called in a thread-safe
# situation, meaning that the context of the temporary folder
# manager will not be changed in between the callback execution
# and the end of the BatchedCalls pickling. The reason is that
# pickling (the only place where set_current_context is used)
# is done from a single thread (the queue_feeder_thread).
self._backend._workers._temp_folder_manager.set_current_context( # noqa
self._id
)
self._reducer_callback = _batched_calls_reducer_callback
# self._effective_n_jobs should be called in the Parallel.__call__
# thread only -- store its value in an attribute for further queries.
self._cached_effective_n_jobs = n_jobs
backend_name = self._backend.__class__.__name__
if n_jobs == 0:
raise RuntimeError("%s has no active worker." % backend_name)
self._print(f"Using backend {backend_name} with {n_jobs} concurrent workers.")
if hasattr(self._backend, "start_call"):
self._backend.start_call()
# Following flag prevents double calls to `backend.stop_call`.
self._calling = True
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == "all":
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, "endswith"):
pre_dispatch = eval_expr(pre_dispatch.replace("n_jobs", str(n_jobs)))
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
# TODO: this iterator should be batch_size * n_jobs
iterator = itertools.islice(iterator, self._pre_dispatch_amount)
# Use a caching dict for callables that are pickled with cloudpickle to
# improve performances. This cache is used only in the case of
# functions that are defined in the __main__ module, functions that
# are defined locally (inside another function) and lambda expressions.
self._pickle_cache = dict()
output = self._get_outputs(iterator, pre_dispatch)
self._call_ref = weakref.ref(output)
# The first item from the output is blank, but it makes the interpreter
# progress until it enters the Try/Except block of the generator and
# reaches the first `yield` statement. This starts the asynchronous
# dispatch of the tasks to the workers.
next(output)
return output if self.return_generator else list(output)
def __repr__(self):
return "%s(n_jobs=%s)" % (self.__class__.__name__, self.n_jobs)
| Parallel |
python | huggingface__transformers | src/transformers/models/opt/modeling_opt.py | {
"start": 11988,
"end": 12336
} | class ____(PreTrainedModel):
config: OPTConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["OPTDecoderLayer"]
_supports_attention_backend = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
| OPTPreTrainedModel |
python | apache__thrift | compiler/cpp/test/compiler/staleness_check.py | {
"start": 907,
"end": 5627
} | class ____(unittest.TestCase):
CURRENT_DIR_PATH = os.path.dirname(os.path.realpath(__file__))
THRIFT_EXECUTABLE_PATH = None
SINGLE_THRIFT_FILE_PATH = os.path.join(CURRENT_DIR_PATH, "Single.thrift")
INCLUDING_THRIFT_FILE_PATH = os.path.join(CURRENT_DIR_PATH, "Including.thrift")
INCLUDED_THRIFT_FILE_PATH = os.path.join(CURRENT_DIR_PATH, "Included.thrift")
def test_staleness_check_of_single_thrift_file_without_changed_output(self):
temp_dir = tempfile.mkdtemp(dir=TestStalenessCheck.CURRENT_DIR_PATH)
command = [TestStalenessCheck.THRIFT_EXECUTABLE_PATH, "-gen", "cpp", "-o", temp_dir]
command += [TestStalenessCheck.SINGLE_THRIFT_FILE_PATH]
subprocess.call(command)
used_file_path = os.path.join(temp_dir, "gen-cpp", "Single_constants.cpp")
first_modification_time = os.path.getmtime(os.path.join(used_file_path))
time.sleep(1.0)
subprocess.call(command)
second_modification_time = os.path.getmtime(used_file_path)
self.assertEqual(second_modification_time, first_modification_time)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_staleness_check_of_single_thrift_file_with_changed_output(self):
temp_dir = tempfile.mkdtemp(dir=TestStalenessCheck.CURRENT_DIR_PATH)
command = [TestStalenessCheck.THRIFT_EXECUTABLE_PATH, "-gen", "cpp", "-o", temp_dir]
command += [TestStalenessCheck.SINGLE_THRIFT_FILE_PATH]
subprocess.call(command)
used_file_path = os.path.join(temp_dir, "gen-cpp", "Single_constants.cpp")
first_modification_time = os.path.getmtime(os.path.join(used_file_path))
used_file = open(used_file_path, "r")
first_contents = used_file.read()
used_file.close()
used_file = open(used_file_path, "a")
used_file.write("\n/* This is a comment */\n")
used_file.close()
time.sleep(1.0)
subprocess.call(command)
second_modification_time = os.path.getmtime(used_file_path)
used_file = open(used_file_path, "r")
second_contents = used_file.read()
used_file.close()
self.assertGreater(second_modification_time, first_modification_time)
self.assertEqual(first_contents, second_contents)
shutil.rmtree(temp_dir, ignore_errors=True)
def test_staleness_check_of_included_file(self):
temp_dir = tempfile.mkdtemp(dir=TestStalenessCheck.CURRENT_DIR_PATH)
temp_included_file_path = os.path.join(temp_dir, "Included.thrift")
temp_including_file_path = os.path.join(temp_dir, "Including.thrift")
shutil.copy2(TestStalenessCheck.INCLUDED_THRIFT_FILE_PATH, temp_included_file_path)
shutil.copy2(TestStalenessCheck.INCLUDING_THRIFT_FILE_PATH, temp_including_file_path)
command = [TestStalenessCheck.THRIFT_EXECUTABLE_PATH, "-gen", "cpp", "-recurse", "-o", temp_dir]
command += [temp_including_file_path]
subprocess.call(command)
included_constants_cpp_file_path = os.path.join(temp_dir, "gen-cpp", "Included_constants.cpp")
including_constants_cpp_file_path = os.path.join(temp_dir, "gen-cpp", "Including_constants.cpp")
included_constants_cpp_first_modification_time = os.path.getmtime(included_constants_cpp_file_path)
including_constants_cpp_first_modification_time = os.path.getmtime(including_constants_cpp_file_path)
temp_included_file = open(temp_included_file_path, "a")
temp_included_file.write("\nconst i32 an_integer = 42\n")
temp_included_file.close()
time.sleep(1.0)
subprocess.call(command)
included_constants_cpp_second_modification_time = os.path.getmtime(included_constants_cpp_file_path)
including_constants_cpp_second_modification_time = os.path.getmtime(including_constants_cpp_file_path)
self.assertGreater(
included_constants_cpp_second_modification_time, included_constants_cpp_first_modification_time)
self.assertEqual(
including_constants_cpp_first_modification_time, including_constants_cpp_second_modification_time)
shutil.rmtree(temp_dir, ignore_errors=True)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestStalenessCheck))
return suite
if __name__ == "__main__":
# The path of Thrift compiler is passed as an argument to the test script.
# Remove it to not confuse the unit testing framework
TestStalenessCheck.THRIFT_EXECUTABLE_PATH = sys.argv[-1]
del sys.argv[-1]
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| TestStalenessCheck |
python | pytorch__pytorch | torch/fx/_graph_pickler.py | {
"start": 1234,
"end": 1451
} | class ____:
# A filter for which ops will cause the pickler to raise a
# BypassFxGraphCache exception. If None then all ops are allowed.
ops_filter: Optional[Callable[[str], bool]] = _ops_filter_safe
| Options |
python | doocs__leetcode | solution/2000-2099/2047.Number of Valid Words in a Sentence/Solution.py | {
"start": 0,
"end": 664
} | class ____:
def countValidWords(self, sentence: str) -> int:
def check(s: str) -> bool:
st = False
for i, c in enumerate(s):
if c.isdigit() or (c in "!.," and i < len(s) - 1):
return False
if c == "-":
if (
st
or i in (0, len(s) - 1)
or not s[i - 1].isalpha()
or not s[i + 1].isalpha()
):
return False
st = True
return True
return sum(check(s) for s in sentence.split())
| Solution |
python | django__django | tests/delete/models.py | {
"start": 1583,
"end": 2095
} | class ____(models.Model):
db_setdefault = models.ForeignKey(
RelatedDbOptionParent,
models.DB_SET_DEFAULT,
db_default=models.Value(1),
related_name="db_setdefault_set",
)
db_setdefault_none = models.ForeignKey(
RelatedDbOptionParent,
models.DB_SET_DEFAULT,
db_default=None,
null=True,
related_name="db_setnull_nullable_set",
)
class Meta:
required_db_features = {"supports_on_delete_db_default"}
| SetDefaultDbModel |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 21693,
"end": 21860
} | class ____(Glm4MoeDecoderLayer):
def __init__(self, config: Glm4vMoeTextConfig, layer_idx: int):
super().__init__(config, layer_idx)
| Glm4vMoeTextDecoderLayer |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_tilde.py | {
"start": 40,
"end": 5167
} | class ____(util.MdCase):
"""Test escaping cases for Tilde with smart enabled."""
extension = [
'pymdownx.tilde'
]
extension_configs = {
"pymdownx.tilde": {
"smart_delete": True
}
}
def test_case_1(self):
"""Test case 1."""
self.check_markdown(
R"CH~3~CH~2~OH",
"<p>CH<sub>3</sub>CH<sub>2</sub>OH</p>",
True
)
def test_case_2(self):
"""Test case 2."""
self.check_markdown(
R"Text~subscript~",
"<p>Text<sub>subscript</sub></p>",
True
)
def test_case_3(self):
"""Test case 3."""
self.check_markdown(
R"Text~subscript failed~",
"<p>Text~subscript failed~</p>",
True
)
def test_case_4(self):
"""Test case 4."""
self.check_markdown(
R"Text~subscript\ success~",
"<p>Text<sub>subscript success</sub></p>",
True
)
def test_case_5(self):
"""Test case 5."""
self.check_markdown(
R"Test: ~~ Won't delete ~~",
"<p>Test: ~~ Won't delete ~~</p>",
True
)
def test_case_6(self):
"""Test case 6."""
self.check_markdown(
R"Test: ~~Will delete~~",
"<p>Test: <del>Will delete</del></p>",
True
)
def test_case_7(self):
"""Test case 7."""
self.check_markdown(
R"Test: \~\~Escaped\~\~",
"<p>Test: ~~Escaped~~</p>",
True
)
def test_case_8(self):
"""Test case 8."""
self.check_markdown(
R"Test: ~~This will all be deleted ~~because of the placement of the center tilde.~~",
"<p>Test: <del>This will all be deleted ~~because of the placement of the center tilde.</del></p>",
True
)
def test_case_9(self):
"""Test case 9."""
self.check_markdown(
R"Test: ~~This will all be deleted ~~ because of the placement of the center tilde.~~",
"<p>Test: <del>This will all be deleted ~~ because of the placement of the center tilde.</del></p>",
True
)
def test_case_10(self):
"""Test case 10."""
self.check_markdown(
R"Test: ~~This will NOT all be deleted~~ because of the placement of the center tilde.~~",
"<p>Test: <del>This will NOT all be deleted</del> because of the placement of the center tilde.~~</p>",
True
)
def test_case_11(self):
"""Test case 11."""
self.check_markdown(
R"Test: ~~This will all be deleted~ because of the token is less than that of the tilde.~~",
"<p>Test: <del>This will all be deleted~ because of the token is less than that of the tilde.</del></p>",
True
)
def test_complex_cases(self):
"""Test some complex cases."""
self.check_markdown(
R'''
~~~I'm\ delete\ and\ sub~ I am just delete.~~
~~~I'm\ delete\ and\ sub!~~\ I\ am\ just\ sub.~
~sub\ and\ ~~sub\ delete~~~ and ~sub~
~~delete and ~sub\ delete~~~ and ~sub~
~~~I'm\ sub\ and\ delete~ I am just delete.~~ ~sub~
~~~I'm\ delete\ and\ sub!~~\ I\ am\ just\ sub.~ ~sub~
~sub\ and\ ~~sub\ delete~~~ and not sub~
~~delete and ~sub\ delete~~~ and not sub~
~sub\ and\ ~~sub\ delete~~~
~~delete and ~sub\ delete~~~
~sub\ ~~sub\ delete~~\ sub~
~~~sub\ and\ delete~ delete~~: foo bar ~~delete~~
~~~sub\ and\ delete~~\ sub~ foo bar ~~delete~~
~sub\ and\ ~~sub\ delete~~~ ~~delete~~
~~delete and ~sub\ delete~~~ ~~delete~~
''',
'''
<p><del><sub>I'm delete and sub</sub> I am just delete.</del></p>
<p><sub><del>I'm delete and sub!</del> I am just sub.</sub></p>
<p><sub>sub and <del>sub delete</del></sub> and <sub>sub</sub></p>
<p><del>delete and <sub>sub delete</sub></del> and <sub>sub</sub></p>
<p><del><sub>I'm sub and delete</sub> I am just delete.</del> <sub>sub</sub></p>
<p><sub><del>I'm delete and sub!</del> I am just sub.</sub> <sub>sub</sub></p>
<p><sub>sub and <del>sub delete</del></sub> and not sub~</p>
<p><del>delete and <sub>sub delete</sub></del> and not sub~</p>
<p><sub>sub and <del>sub delete</del></sub></p>
<p><del>delete and <sub>sub delete</sub></del></p>
<p><sub>sub <del>sub delete</del> sub</sub></p>
<p><del><sub>sub and delete</sub> delete</del>: foo bar <del>delete</del></p>
<p><sub><del>sub and delete</del> sub</sub> foo bar <del>delete</del></p>
<p><sub>sub and <del>sub delete</del></sub> <del>delete</del></p>
<p><del>delete and <sub>sub delete</sub></del> <del>delete</del></p>
''',
True
)
| TestTildeSmart |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/lsp.py | {
"start": 1423,
"end": 1582
} | class ____:
Markup = 1
Code = 2
# https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#errorCodes
| NotebookCellKind |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 91898,
"end": 91971
} | class ____(Binop):
operation = operator.eq
_operator_repr = "=="
| EQ |
python | PyCQA__pylint | tests/functional/i/implicit/implicit_flag_alias.py | {
"start": 209,
"end": 369
} | class ____(IntFlag):
"""Class with flags that overlap using explicit union syntax"""
X = 1
W = 2
R = 4
RO = 4
RW = R | W
| ExplicitUnionFlags |
python | django__django | tests/admin_views/admin.py | {
"start": 3101,
"end": 3434
} | class ____(admin.TabularInline):
model = Article
fk_name = "section"
prepopulated_fields = {"title": ("content",)}
fieldsets = (
("Some fields", {"classes": ("collapse",), "fields": ("title", "content")}),
("Some other fields", {"classes": ("wide",), "fields": ("date", "section")}),
)
| ArticleInline |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 19846,
"end": 24122
} | class ____(nn.Module):
def __init__(self, config, is_cross_attention=False):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention heads (%d)"
% (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.save_attention = False
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
mixed_query_layer = self.query(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if is_cross_attention and self.save_attention:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (
context_layer,
attention_probs,
)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Blip2QFormer
| Blip2QFormerMultiHeadAttention |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 20719,
"end": 25837
} | class ____(nn.Module):
r"""
Compute a single vector summary of a sequence hidden states.
Args:
config ([`ClvpConfig`]):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
- `"last"` -- Take the last token hidden state (like XLNet)
- `"first"` -- Take the first token hidden state (like Bert)
- `"mean"` -- Take the mean of all tokens hidden states
- `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- `"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
(otherwise to `config.hidden_size`).
- **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
another string or `None` will add no activation.
- **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
"""
def __init__(self, config: ClvpConfig):
super().__init__()
self.summary_type = getattr(config, "summary_type", "last")
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = nn.Identity()
if hasattr(config, "summary_use_proj") and config.summary_use_proj:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
activation_string = getattr(config, "summary_activation", None)
self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()
self.first_dropout = nn.Identity()
if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = nn.Identity()
if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(
self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
) -> torch.FloatTensor:
"""
Compute a single vector summary of a sequence hidden states.
Args:
hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
The hidden states of the last layer.
cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Returns:
`torch.FloatTensor`: The summary of the sequence hidden states.
"""
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = hidden_states.mean(dim=1)
elif self.summary_type == "cls_index":
if cls_index is None:
cls_index = torch.full_like(
hidden_states[..., :1, :],
hidden_states.shape[-2] - 1,
dtype=torch.long,
)
else:
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->ClvpDecoderMLP
| ClvpSequenceSummary |
python | PrefectHQ__prefect | tests/server/orchestration/test_core_policy.py | {
"start": 4488,
"end": 7365
} | class ____:
@pytest.mark.parametrize(
"initial_state_type", [states.StateType.SCHEDULED, states.StateType.PENDING]
)
async def test_running_after_scheduled_start_time_is_not_delayed(
self,
session,
run_type,
initialize_orchestration,
initial_state_type,
):
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
initial_details={
"scheduled_time": now("UTC") - datetime.timedelta(minutes=5)
},
)
async with WaitForScheduledTime(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.validated_state_type == proposed_state_type
@pytest.mark.parametrize(
"initial_state_type", [states.StateType.SCHEDULED, states.StateType.PENDING]
)
async def test_running_before_scheduled_start_time_is_delayed(
self,
session,
run_type,
initialize_orchestration,
initial_state_type,
):
proposed_state_type = states.StateType.RUNNING
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
initial_details={"scheduled_time": now("UTC") + timedelta(minutes=5)},
)
async with WaitForScheduledTime(ctx, *intended_transition) as ctx:
await ctx.validate_proposed_state()
assert ctx.response_status == SetStateStatus.WAIT
assert ctx.proposed_state is None
assert abs(ctx.response_details.delay_seconds - 300) < 2
@pytest.mark.parametrize(
"proposed_state_type",
[
states.StateType.COMPLETED,
states.StateType.FAILED,
states.StateType.CANCELLED,
states.StateType.CRASHED,
],
)
async def test_scheduling_rule_does_not_fire_against_other_state_types(
self,
session,
run_type,
initialize_orchestration,
proposed_state_type,
):
initial_state_type = states.StateType.SCHEDULED
intended_transition = (initial_state_type, proposed_state_type)
ctx = await initialize_orchestration(
session,
run_type,
*intended_transition,
initial_details={"scheduled_time": now("UTC") + timedelta(minutes=5)},
)
scheduling_rule = WaitForScheduledTime(ctx, *intended_transition)
async with scheduling_rule as ctx:
pass
assert await scheduling_rule.invalid()
@pytest.mark.parametrize("run_type", ["task", "flow"])
| TestWaitForScheduledTimeRule |
python | kamyu104__LeetCode-Solutions | Python/reverse-string.py | {
"start": 29,
"end": 332
} | class ____(object):
def reverseString(self, s):
"""
:type s: List[str]
:rtype: None Do not return anything, modify s in-place instead.
"""
i, j = 0, len(s) - 1
while i < j:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
| Solution |
python | wandb__wandb | wandb/sdk/lib/service/service_token.py | {
"start": 1324,
"end": 2150
} | class ____(abc.ABC):
"""A way of connecting to a running service process."""
@abc.abstractmethod
def connect(
self,
*,
asyncer: asyncio_manager.AsyncioManager,
) -> ServiceClient:
"""Connect to the service process.
Args:
asyncer: A started AsyncioManager for asyncio operations.
Returns:
A socket object for communicating with the service.
Raises:
WandbServiceConnectionError: on failure to connect.
"""
def save_to_env(self) -> None:
"""Save the token in this process's environment variables."""
os.environ[env.SERVICE] = self._as_env_string()
@abc.abstractmethod
def _as_env_string(self) -> str:
"""Returns a string representation of this token."""
@final
| ServiceToken |
python | pyca__cryptography | tests/hazmat/primitives/test_hmac_vectors.py | {
"start": 2366,
"end": 3073
} | class ____:
def test_blake2b(self, backend):
h = hmac.HMAC(b"0" * 64, hashes.BLAKE2b(digest_size=64), backend)
h.update(b"test")
digest = h.finalize()
assert digest == binascii.unhexlify(
b"b5319122f8a24ba134a0c9851922448104e25be5d1b91265c0c68b22722f0f29"
b"87dba4aeaa69e6bed7edc44f48d6b1be493a3ce583f9c737c53d6bacc09e2f32"
)
def test_blake2s(self, backend):
h = hmac.HMAC(b"0" * 32, hashes.BLAKE2s(digest_size=32), backend)
h.update(b"test")
digest = h.finalize()
assert digest == binascii.unhexlify(
b"51477cc5bdf1faf952cf97bb934ee936de1f4d5d7448a84eeb6f98d23b392166"
)
| TestHMACBLAKE2 |
python | openai__openai-python | src/openai/types/eval_create_response.py | {
"start": 1042,
"end": 1939
} | class ____(BaseModel):
schema_: Dict[str, object] = FieldInfo(alias="schema")
"""
The json schema for the run data source items. Learn how to build JSON schemas
[here](https://json-schema.org/).
"""
type: Literal["logs"]
"""The type of data source. Always `logs`."""
metadata: Optional[Metadata] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
DataSourceConfig: TypeAlias = Annotated[
Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig],
PropertyInfo(discriminator="type"),
]
| DataSourceConfigLogs |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-twilio/components.py | {
"start": 3881,
"end": 5922
} | class ____(StateMigration):
"""
Migrate legacy `usage_records` state to low-code shape.
- Add empty `parent_slice` to each partition.
- Drop legacy `partition.date_created`.
- Run if any partition lacks `parent_slice`.
Initial:
{
"states": [
{
"cursor": { "start_date": "2025-08-21T00:00:00Z" },
"partition": {
"account_sid": "ACdade166c12e160e9ed0a6088226718fb",
"date_created": "Tue, 17 Nov 2020 04:08:53 +0000"
}
},
{
"cursor": { "start_date": "2025-08-21T00:00:00Z" },
"partition": {
"account_sid": "AC4cac489c46197c9ebc91c840120a4dee",
"date_created": "Wed, 25 Nov 2020 09:36:42 +0000"
}
}
]
}
Final:
{
"states": [
{
"cursor": { "start_date": "2025-08-21T00:00:00Z" },
"partition": { "account_sid": "ACdade166c12e160e9ed0a6088226718fb", "parent_slice": {} }
},
{
"cursor": { "start_date": "2025-08-21T00:00:00Z" },
"partition": { "account_sid": "AC4cac489c46197c9ebc91c840120a4dee", "parent_slice": {} }
}
]
}
"""
def migrate(self, stream_state: Mapping[str, Any]) -> Mapping[str, Any]:
new_state = {"states": []}
for state in stream_state.get("states", []):
partition_state = {}
if "partition" not in state or "account_sid" not in state["partition"]:
continue
partition_state["partition"] = {"account_sid": state["partition"]["account_sid"], "parent_slice": {}}
partition_state["cursor"] = state.get("cursor", {})
new_state["states"].append(partition_state)
return new_state
def should_migrate(self, stream_state: Mapping[str, Any]) -> bool:
if stream_state and any("parent_slice" not in state["partition"] for state in stream_state.get("states", [])):
return True
return False
| TwilioUsageRecordsStateMigration |
python | ray-project__ray | python/ray/_private/function_manager.py | {
"start": 1487,
"end": 29610
} | class ____:
"""A class used to export/load remote functions and actors.
Attributes:
_worker: The associated worker that this manager related.
_functions_to_export: The remote functions to export when
the worker gets connected.
_actors_to_export: The actors to export when the worker gets
connected.
_function_execution_info: The function_id
and execution_info.
_num_task_executions: The function
execution times.
imported_actor_classes: The set of actor classes keys (format:
ActorClass:function_id) that are already in GCS.
"""
def __init__(self, worker):
self._worker = worker
self._functions_to_export = []
self._actors_to_export = []
# This field is a dictionary that maps function IDs
# to a FunctionExecutionInfo object. This should only be used on
# workers that execute remote functions.
self._function_execution_info = defaultdict(lambda: {})
self._num_task_executions = defaultdict(lambda: {})
# A set of all of the actor class keys that have been imported by the
# import thread. It is safe to convert this worker into an actor of
# these types.
self.imported_actor_classes = set()
self._loaded_actor_classes = {}
# Deserialize an ActorHandle will call load_actor_class(). If a
# function closure captured an ActorHandle, the deserialization of the
# function will be:
# -> fetch_and_register_remote_function (acquire lock)
# -> _load_actor_class_from_gcs (acquire lock, too)
# So, the lock should be a reentrant lock.
self.lock = threading.RLock()
self.execution_infos = {}
# This is the counter to keep track of how many keys have already
# been exported so that we can find next key quicker.
self._num_exported = 0
# This is to protect self._num_exported when doing exporting
self._export_lock = threading.Lock()
def increase_task_counter(self, function_descriptor):
function_id = function_descriptor.function_id
self._num_task_executions[function_id] += 1
def get_task_counter(self, function_descriptor):
function_id = function_descriptor.function_id
return self._num_task_executions[function_id]
def compute_collision_identifier(self, function_or_class):
"""The identifier is used to detect excessive duplicate exports.
The identifier is used to determine when the same function or class is
exported many times. This can yield false positives.
Args:
function_or_class: The function or class to compute an identifier
for.
Returns:
The identifier. Note that different functions or classes can give
rise to same identifier. However, the same function should
hopefully always give rise to the same identifier. TODO(rkn):
verify if this is actually the case. Note that if the
identifier is incorrect in any way, then we may give warnings
unnecessarily or fail to give warnings, but the application's
behavior won't change.
"""
import io
string_file = io.StringIO()
dis.dis(function_or_class, file=string_file, depth=2)
collision_identifier = function_or_class.__name__ + ":" + string_file.getvalue()
# Return a hash of the identifier in case it is too large.
return hashlib.sha1(collision_identifier.encode("utf-8")).digest()
def load_function_or_class_from_local(self, module_name, function_or_class_name):
"""Try to load a function or class in the module from local."""
module = importlib.import_module(module_name)
parts = [part for part in function_or_class_name.split(".") if part]
object = module
try:
for part in parts:
object = getattr(object, part)
return object
except Exception:
return None
def export_setup_func(
self, setup_func: Callable, timeout: Optional[int] = None
) -> bytes:
"""Export the setup hook function and return the key."""
pickled_function = pickle_dumps(
setup_func,
"Cannot serialize the worker_process_setup_hook " f"{setup_func.__name__}",
)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE
)
key = make_function_table_key(
# This value should match with gcs_function_manager.h.
# Otherwise, it won't be GC'ed.
WORKER_PROCESS_SETUP_HOOK_KEY_NAME_GCS.encode(),
# b"FunctionsToRun",
self._worker.current_job_id.binary(),
function_to_run_id,
)
check_oversized_function(
pickled_function, setup_func.__name__, "function", self._worker
)
try:
self._worker.gcs_client.internal_kv_put(
key,
pickle.dumps(
{
"job_id": self._worker.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
}
),
# overwrite
True,
ray_constants.KV_NAMESPACE_FUNCTION_TABLE,
timeout=timeout,
)
except Exception as e:
logger.exception(
"Failed to export the setup hook " f"{setup_func.__name__}."
)
raise e
return key
def export(self, remote_function):
"""Pickle a remote function and export it to redis.
Args:
remote_function: the RemoteFunction object.
"""
if self._worker.load_code_from_local:
function_descriptor = remote_function._function_descriptor
module_name, function_name = (
function_descriptor.module_name,
function_descriptor.function_name,
)
# If the function is dynamic, we still export it to GCS
# even if load_code_from_local is set True.
if (
self.load_function_or_class_from_local(module_name, function_name)
is not None
):
return
function = remote_function._function
pickled_function = remote_function._pickled_function
check_oversized_function(
pickled_function,
remote_function._function_name,
"remote function",
self._worker,
)
key = make_function_table_key(
b"RemoteFunction",
self._worker.current_job_id,
remote_function._function_descriptor.function_id.binary(),
)
if self._worker.gcs_client.internal_kv_exists(key, KV_NAMESPACE_FUNCTION_TABLE):
return
val = pickle.dumps(
{
"job_id": self._worker.current_job_id.binary(),
"function_id": remote_function._function_descriptor.function_id.binary(), # noqa: E501
"function_name": remote_function._function_name,
"module": function.__module__,
"function": pickled_function,
"collision_identifier": self.compute_collision_identifier(function),
"max_calls": remote_function._max_calls,
}
)
self._worker.gcs_client.internal_kv_put(
key, val, True, KV_NAMESPACE_FUNCTION_TABLE
)
def fetch_registered_method(
self, key: str, timeout: Optional[int] = None
) -> Optional[ImportedFunctionInfo]:
vals = self._worker.gcs_client.internal_kv_get(
key, KV_NAMESPACE_FUNCTION_TABLE, timeout=timeout
)
if vals is None:
return None
else:
vals = pickle.loads(vals)
fields = [
"job_id",
"function_id",
"function_name",
"function",
"module",
"max_calls",
]
return ImportedFunctionInfo._make(vals.get(field) for field in fields)
def fetch_and_register_remote_function(self, key):
"""Import a remote function."""
remote_function_info = self.fetch_registered_method(key)
if not remote_function_info:
return False
(
job_id_str,
function_id_str,
function_name,
serialized_function,
module,
max_calls,
) = remote_function_info
function_id = ray.FunctionID(function_id_str)
job_id = ray.JobID(job_id_str)
max_calls = int(max_calls)
# This function is called by ImportThread. This operation needs to be
# atomic. Otherwise, there is race condition. Another thread may use
# the temporary function above before the real function is ready.
with self.lock:
self._num_task_executions[function_id] = 0
try:
function = pickle.loads(serialized_function)
except Exception:
# If an exception was thrown when the remote function was
# imported, we record the traceback and notify the scheduler
# of the failure.
traceback_str = format_error_message(traceback.format_exc())
def f(*args, **kwargs):
raise RuntimeError(
"The remote function failed to import on the "
"worker. This may be because needed library "
"dependencies are not installed in the worker "
"environment or cannot be found from sys.path "
f"{sys.path}:\n\n{traceback_str}"
)
# Use a placeholder method when function pickled failed
self._function_execution_info[function_id] = FunctionExecutionInfo(
function=f, function_name=function_name, max_calls=max_calls
)
# Log the error message. Log at DEBUG level to avoid overly
# spamming the log on import failure. The user gets the error
# via the RuntimeError message above.
logger.debug(
"Failed to unpickle the remote function "
f"'{function_name}' with "
f"function ID {function_id.hex()}. "
f"Job ID:{job_id}."
f"Traceback:\n{traceback_str}. "
)
else:
# The below line is necessary. Because in the driver process,
# if the function is defined in the file where the python
# script was started from, its module is `__main__`.
# However in the worker process, the `__main__` module is a
# different module, which is `default_worker.py`
function.__module__ = module
self._function_execution_info[function_id] = FunctionExecutionInfo(
function=function, function_name=function_name, max_calls=max_calls
)
return True
def get_execution_info(self, job_id, function_descriptor):
"""Get the FunctionExecutionInfo of a remote function.
Args:
job_id: ID of the job that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
"""
function_id = function_descriptor.function_id
# If the function has already been loaded,
# There's no need to load again
if function_id in self._function_execution_info:
return self._function_execution_info[function_id]
if self._worker.load_code_from_local:
# Load function from local code.
if not function_descriptor.is_actor_method():
# If the function is not able to be loaded,
# try to load it from GCS,
# even if load_code_from_local is set True
if self._load_function_from_local(function_descriptor) is True:
return self._function_execution_info[function_id]
# Load function from GCS.
# Wait until the function to be executed has actually been
# registered on this worker. We will push warnings to the user if
# we spend too long in this loop.
# The driver function may not be found in sys.path. Try to load
# the function from GCS.
with profiling.profile("wait_for_function"):
self._wait_for_function(function_descriptor, job_id)
try:
function_id = function_descriptor.function_id
info = self._function_execution_info[function_id]
except KeyError as e:
message = (
"Error occurs in get_execution_info: "
"job_id: %s, function_descriptor: %s. Message: %s"
% (job_id, function_descriptor, e)
)
raise KeyError(message)
return info
def _load_function_from_local(self, function_descriptor):
assert not function_descriptor.is_actor_method()
function_id = function_descriptor.function_id
module_name, function_name = (
function_descriptor.module_name,
function_descriptor.function_name,
)
object = self.load_function_or_class_from_local(module_name, function_name)
if object is not None:
# Directly importing from local may break function with dynamic ray.remote,
# such as the _start_controller function utilized for the Ray service.
if isinstance(object, RemoteFunction):
function = object._function
else:
function = object
self._function_execution_info[function_id] = FunctionExecutionInfo(
function=function,
function_name=function_name,
max_calls=0,
)
self._num_task_executions[function_id] = 0
return True
else:
return False
def _wait_for_function(self, function_descriptor, job_id: str, timeout=10):
"""Wait until the function to be executed is present on this worker.
This method will simply loop until the import thread has imported the
relevant function. If we spend too long in this loop, that may indicate
a problem somewhere and we will push an error message to the user.
If this worker is an actor, then this will wait until the actor has
been defined.
Args:
function_descriptor : The FunctionDescriptor of the function that
we want to execute.
job_id: The ID of the job to push the error message to
if this times out.
"""
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
with self.lock:
if self._worker.actor_id.is_nil():
if function_descriptor.function_id in self._function_execution_info:
break
else:
key = make_function_table_key(
b"RemoteFunction",
job_id,
function_descriptor.function_id.binary(),
)
if self.fetch_and_register_remote_function(key) is True:
break
else:
assert not self._worker.actor_id.is_nil()
# Actor loading will happen when execute_task is called.
assert self._worker.actor_id in self._worker.actors
break
if time.time() - start_time > timeout:
warning_message = (
"This worker was asked to execute a function "
f"that has not been registered ({function_descriptor}, "
f"node={self._worker.node_ip_address}, "
f"worker_id={self._worker.worker_id.hex()}, "
f"pid={os.getpid()}). You may have to restart Ray."
)
if not warning_sent:
logger.error(warning_message)
ray._private.utils.push_error_to_driver(
self._worker,
ray_constants.WAIT_FOR_FUNCTION_PUSH_ERROR,
warning_message,
job_id=job_id,
)
warning_sent = True
time.sleep(0.001)
def export_actor_class(
self, Class, actor_creation_function_descriptor, actor_method_names
):
if self._worker.load_code_from_local:
module_name, class_name = (
actor_creation_function_descriptor.module_name,
actor_creation_function_descriptor.class_name,
)
# If the class is dynamic, we still export it to GCS
# even if load_code_from_local is set True.
if (
self.load_function_or_class_from_local(module_name, class_name)
is not None
):
return
# `current_job_id` shouldn't be NIL, unless:
# 1) This worker isn't an actor;
# 2) And a previous task started a background thread, which didn't
# finish before the task finished, and still uses Ray API
# after that.
assert not self._worker.current_job_id.is_nil(), (
"You might have started a background thread in a non-actor "
"task, please make sure the thread finishes before the "
"task finishes."
)
job_id = self._worker.current_job_id
key = make_function_table_key(
b"ActorClass",
job_id,
actor_creation_function_descriptor.function_id.binary(),
)
serialized_actor_class = pickle_dumps(
Class,
f"Could not serialize the actor class "
f"{actor_creation_function_descriptor.repr}",
)
actor_class_info = {
"class_name": actor_creation_function_descriptor.class_name.split(".")[-1],
"module": actor_creation_function_descriptor.module_name,
"class": serialized_actor_class,
"job_id": job_id.binary(),
"collision_identifier": self.compute_collision_identifier(Class),
"actor_method_names": json.dumps(list(actor_method_names)),
}
check_oversized_function(
actor_class_info["class"],
actor_class_info["class_name"],
"actor",
self._worker,
)
self._worker.gcs_client.internal_kv_put(
key, pickle.dumps(actor_class_info), True, KV_NAMESPACE_FUNCTION_TABLE
)
# TODO(rkn): Currently we allow actor classes to be defined
# within tasks. I tried to disable this, but it may be necessary
# because of https://github.com/ray-project/ray/issues/1146.
def load_actor_class(self, job_id, actor_creation_function_descriptor):
"""Load the actor class.
Args:
job_id: job ID of the actor.
actor_creation_function_descriptor: Function descriptor of
the actor constructor.
Returns:
The actor class.
"""
function_id = actor_creation_function_descriptor.function_id
# Check if the actor class already exists in the cache.
actor_class = self._loaded_actor_classes.get(function_id, None)
if actor_class is None:
# Load actor class.
if self._worker.load_code_from_local:
# Load actor class from local code first.
actor_class = self._load_actor_class_from_local(
actor_creation_function_descriptor
)
# If the actor is unable to be loaded
# from local, try to load it
# from GCS even if load_code_from_local is set True
if actor_class is None:
actor_class = self._load_actor_class_from_gcs(
job_id, actor_creation_function_descriptor
)
else:
# Load actor class from GCS.
actor_class = self._load_actor_class_from_gcs(
job_id, actor_creation_function_descriptor
)
# Save the loaded actor class in cache.
self._loaded_actor_classes[function_id] = actor_class
# Generate execution info for the methods of this actor class.
module_name = actor_creation_function_descriptor.module_name
actor_class_name = actor_creation_function_descriptor.class_name
actor_methods = inspect.getmembers(
actor_class, predicate=is_function_or_method
)
for actor_method_name, actor_method in actor_methods:
# Actor creation function descriptor use a unique function
# hash to solve actor name conflict. When constructing an
# actor, the actor creation function descriptor will be the
# key to find __init__ method execution info. So, here we
# use actor creation function descriptor as method descriptor
# for generating __init__ method execution info.
if actor_method_name == "__init__":
method_descriptor = actor_creation_function_descriptor
else:
method_descriptor = PythonFunctionDescriptor(
module_name, actor_method_name, actor_class_name
)
method_id = method_descriptor.function_id
executor = self._make_actor_method_executor(
actor_method_name, actor_method
)
self._function_execution_info[method_id] = FunctionExecutionInfo(
function=executor,
function_name=actor_method_name,
max_calls=0,
)
self._num_task_executions[method_id] = 0
self._num_task_executions[function_id] = 0
return actor_class
def _load_actor_class_from_local(self, actor_creation_function_descriptor):
"""Load actor class from local code."""
module_name, class_name = (
actor_creation_function_descriptor.module_name,
actor_creation_function_descriptor.class_name,
)
object = self.load_function_or_class_from_local(module_name, class_name)
if object is not None:
if isinstance(object, ray.actor.ActorClass):
return object.__ray_metadata__.modified_class
else:
return object
else:
return None
def _create_fake_actor_class(
self, actor_class_name, actor_method_names, traceback_str
):
class TemporaryActor:
async def __dummy_method(self):
"""Dummy method for this fake actor class to work for async actors.
Without this method, this temporary actor class fails to initialize
if the original actor class was async."""
pass
def temporary_actor_method(*args, **kwargs):
raise RuntimeError(
f"The actor with name {actor_class_name} "
"failed to import on the worker. This may be because "
"needed library dependencies are not installed in the "
f"worker environment:\n\n{traceback_str}"
)
for method in actor_method_names:
setattr(TemporaryActor, method, temporary_actor_method)
return TemporaryActor
def _load_actor_class_from_gcs(self, job_id, actor_creation_function_descriptor):
"""Load actor class from GCS."""
key = make_function_table_key(
b"ActorClass",
job_id,
actor_creation_function_descriptor.function_id.binary(),
)
# Fetch raw data from GCS.
vals = self._worker.gcs_client.internal_kv_get(key, KV_NAMESPACE_FUNCTION_TABLE)
fields = ["job_id", "class_name", "module", "class", "actor_method_names"]
if vals is None:
vals = {}
else:
vals = pickle.loads(vals)
(job_id_str, class_name, module, pickled_class, actor_method_names) = (
vals.get(field) for field in fields
)
class_name = ensure_str(class_name)
module_name = ensure_str(module)
job_id = ray.JobID(job_id_str)
actor_method_names = json.loads(ensure_str(actor_method_names))
actor_class = None
try:
with self.lock:
actor_class = pickle.loads(pickled_class)
except Exception:
logger.debug("Failed to load actor class %s.", class_name)
# If an exception was thrown when the actor was imported, we record
# the traceback and notify the scheduler of the failure.
traceback_str = format_error_message(traceback.format_exc())
# The actor class failed to be unpickled, create a fake actor
# class instead (just to produce error messages and to prevent
# the driver from hanging).
actor_class = self._create_fake_actor_class(
class_name, actor_method_names, traceback_str
)
# The below line is necessary. Because in the driver process,
# if the function is defined in the file where the python script
# was started from, its module is `__main__`.
# However in the worker process, the `__main__` module is a
# different module, which is `default_worker.py`
actor_class.__module__ = module_name
return actor_class
def _make_actor_method_executor(self, method_name: str, method):
"""Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
method_name: The name of the actor method.
method: The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
Returns:
A function that executes the given actor method on the worker's
stored instance of the actor. The function also updates the
worker's internal state to record the executed method.
"""
def actor_method_executor(__ray_actor, *args, **kwargs):
# Execute the assigned method.
is_bound = is_class_method(method) or is_static_method(
type(__ray_actor), method_name
)
if is_bound:
return method(*args, **kwargs)
else:
return method(__ray_actor, *args, **kwargs)
# Set method_name and method as attributes to the executor closure
# so we can make decision based on these attributes in task executor.
# Precisely, asyncio support requires to know whether:
# - the method is a ray internal method: starts with __ray
# - the method is a coroutine function: defined by async def
actor_method_executor.name = method_name
actor_method_executor.method = method
return actor_method_executor
| FunctionActorManager |
python | run-llama__llama_index | llama-index-core/llama_index/core/ingestion/transformations.py | {
"start": 2364,
"end": 2873
} | class ____(BaseModel):
"""
A class containing metadata for a type of transformation that can be in a pipeline.
"""
name: str = Field(
description="Unique and human-readable name for the type of transformation"
)
transformation_category: TransformationCategories = Field(
description="Type of transformation"
)
component_type: Type[BaseComponent] = Field(
description="Type of component that implements the transformation"
)
| ConfigurableTransformation |
python | wandb__wandb | wandb/vendor/pygments/lexers/parsers.py | {
"start": 8550,
"end": 9013
} | class ____(DelegatingLexer):
"""
A lexer for `Ragel`_ in a Ruby host file.
.. versionadded:: 1.1
"""
name = 'Ragel in Ruby Host'
aliases = ['ragel-ruby', 'ragel-rb']
filenames = ['*.rl']
def __init__(self, **options):
super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer,
**options)
def analyse_text(text):
return '@LANG: ruby' in text
| RagelRubyLexer |
python | numpy__numpy | numpy/f2py/tests/test_array_from_pyobj.py | {
"start": 11086,
"end": 11439
} | class ____:
def test_in_out(self):
assert str(intent.in_.out) == "intent(in,out)"
assert intent.in_.c.is_intent("c")
assert not intent.in_.c.is_intent_exact("c")
assert intent.in_.c.is_intent_exact("c", "in")
assert intent.in_.c.is_intent_exact("in", "c")
assert not intent.in_.is_intent("c")
| TestIntent |
python | sqlalchemy__sqlalchemy | test/sql/test_returning.py | {
"start": 34067,
"end": 36430
} | class ____(fixtures.TablesTest):
__requires__ = ("delete_returning",)
run_define_tables = "each"
__sparse_driver_backend__ = True
define_tables = InsertReturnDefaultsTest.define_tables
def test_delete(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(t1.delete().return_defaults(t1.c.upddef))
eq_(
[result.returned_defaults._mapping[k] for k in (t1.c.upddef,)], [1]
)
def test_delete_empty_return_defaults(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=5))
result = connection.execute(t1.delete().return_defaults())
# there's no "delete" default, so we get None. we have to
# ask for them in all cases
eq_(result.returned_defaults, None)
def test_delete_non_default(self, connection):
"""test that a column not marked at all as a
default works with this feature."""
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(t1.delete().return_defaults(t1.c.data))
eq_(
[result.returned_defaults._mapping[k] for k in (t1.c.data,)],
[None],
)
def test_delete_non_default_plus_default(self, connection):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.delete().return_defaults(t1.c.data, t1.c.upddef)
)
eq_(
dict(result.returned_defaults._mapping),
{"data": None, "upddef": 1},
)
def test_delete_supplemental_cols(self, connection):
"""with supplemental_cols, we can get back arbitrary cols."""
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
result = connection.execute(
t1.delete().return_defaults(
t1.c.id, supplemental_cols=[t1.c.data, t1.c.insdef]
)
)
row = result.returned_defaults
# row has all the cols in it
eq_(row, (1, None, 0))
eq_(row._mapping[t1.c.insdef], 0)
# result is rewound
# but has both return_defaults + supplemental_cols
eq_(result.all(), [(1, None, 0)])
| DeleteReturnDefaultsTest |
python | google__jax | jax/_src/pallas/pipelining/internal.py | {
"start": 2501,
"end": 2713
} | class ____:
stages: Sequence[PipelineStage]
grid: Sequence[int]
def make_token(obj: Hashable) -> str:
"""Returns a fake input ID used to thread data dependencies."""
return f"token_{hash(obj)}"
| NDLoopStruct |
python | redis__redis-py | redis/commands/core.py | {
"start": 187148,
"end": 188225
} | class ____(CommandsProtocol):
"""
Redis commands of HyperLogLogs data type.
see: https://redis.io/topics/data-types-intro#hyperloglogs
"""
def pfadd(self, name: KeyT, *values: FieldT) -> ResponseT:
"""
Adds the specified elements to the specified HyperLogLog.
For more information, see https://redis.io/commands/pfadd
"""
return self.execute_command("PFADD", name, *values)
def pfcount(self, *sources: KeyT) -> ResponseT:
"""
Return the approximated cardinality of
the set observed by the HyperLogLog at key(s).
For more information, see https://redis.io/commands/pfcount
"""
return self.execute_command("PFCOUNT", *sources)
def pfmerge(self, dest: KeyT, *sources: KeyT) -> ResponseT:
"""
Merge N different HyperLogLogs into a single one.
For more information, see https://redis.io/commands/pfmerge
"""
return self.execute_command("PFMERGE", dest, *sources)
AsyncHyperlogCommands = HyperlogCommands
| HyperlogCommands |
python | django__django | django/contrib/gis/geos/libgeos.py | {
"start": 3457,
"end": 3497
} | class ____(Structure):
pass
| GEOSGeom_t |
python | pytorch__pytorch | torch/ao/pruning/_experimental/pruner/FPGM_pruner.py | {
"start": 173,
"end": 3471
} | class ____(BaseStructuredSparsifier):
r"""Filter Pruning via Geometric Median (FPGM) Structured Pruner
This sparsifier prune filter (row) in a tensor according to distances among filters according to
`Filter Pruning via Geometric Median for Deep Convolutional Neural Networks Acceleration <https://arxiv.org/abs/1811.00250>`_.
This sparsifier is controlled by three variables:
1. `sparsity_level` defines the number of filters (rows) that are zeroed-out.
2. `dist` defines the distance measurement type. Default: 3 (L2 distance).
Available options are: [1, 2, (custom callable distance function)].
Note::
Inputs should be a 4D convolutional tensor of shape (N, C, H, W).
- N: output channels size
- C: input channels size
- H: height of kernel
- W: width of kernel
"""
def __init__(self, sparsity_level: float = 0.5, dist: Callable | int | None = None):
defaults = {
"sparsity_level": sparsity_level,
}
if dist is None:
dist = 2
if callable(dist):
self.dist_fn = dist
elif dist == 1:
self.dist_fn = lambda x: torch.cdist(x, x, p=1)
elif dist == 2:
self.dist_fn = lambda x: torch.cdist(x, x, p=2)
else:
raise NotImplementedError("Distance function is not yet implemented.")
super().__init__(defaults=defaults)
def _compute_distance(self, t):
r"""Compute distance across all entries in tensor `t` along all dimension
except for the one identified by dim.
Args:
t (torch.Tensor): tensor representing the parameter to prune
Returns:
distance (torch.Tensor): distance computed across filtters
"""
dim = 0 # prune filter (row)
size = t.size(dim)
slc = [slice(None)] * t.dim()
# flatten the tensor along the dimension
t_flatten = [
t[tuple(slc[:dim] + [slice(i, i + 1)] + slc[dim + 1 :])].reshape(-1)
for i in range(size)
]
t_flatten = torch.stack(t_flatten)
# distance measurement
dist_matrix = self.dist_fn(t_flatten)
# more similar with other filter indicates large in the sum of row
# pyrefly: ignore [bad-argument-type]
distance = torch.sum(torch.abs(dist_matrix), 1)
return distance
def update_mask( # type: ignore[override]
self, module, tensor_name, sparsity_level, **kwargs
):
tensor_weight = getattr(module, tensor_name)
mask = getattr(module.parametrizations, tensor_name)[0].mask
if sparsity_level <= 0:
mask.data = torch.ones_like(mask).bool()
elif sparsity_level >= 1.0:
mask.data = torch.zeros_like(mask).bool()
else:
distance = self._compute_distance(tensor_weight)
tensor_size = tensor_weight.shape[0] # prune filter (row)
nparams_toprune = round(sparsity_level * tensor_size)
nparams_toprune = min(
max(nparams_toprune, 0), tensor_size
) # clamp to [0, tensor_size]
topk = torch.topk(distance, k=nparams_toprune, largest=False)
mask[topk.indices] = False
| FPGMPruner |
python | graphql-python__graphene | graphene/types/tests/test_base64.py | {
"start": 172,
"end": 2798
} | class ____(ObjectType):
base64 = Base64(_in=Base64(name="input"), _match=String(name="match"))
bytes_as_base64 = Base64()
string_as_base64 = Base64()
number_as_base64 = Base64()
def resolve_base64(self, info, _in=None, _match=None):
if _match:
assert _in == _match
return _in
def resolve_bytes_as_base64(self, info):
return b"Hello world"
def resolve_string_as_base64(self, info):
return "Spam and eggs"
def resolve_number_as_base64(self, info):
return 42
schema = Schema(query=Query)
def test_base64_query():
base64_value = base64.b64encode(b"Random string").decode("utf-8")
result = schema.execute(
"""{{ base64(input: "{}", match: "Random string") }}""".format(base64_value)
)
assert not result.errors
assert result.data == {"base64": base64_value}
def test_base64_query_with_variable():
base64_value = base64.b64encode(b"Another string").decode("utf-8")
# test datetime variable in string representation
result = schema.execute(
"""
query GetBase64($base64: Base64) {
base64(input: $base64, match: "Another string")
}
""",
variables={"base64": base64_value},
)
assert not result.errors
assert result.data == {"base64": base64_value}
def test_base64_query_none():
result = schema.execute("""{ base64 }""")
assert not result.errors
assert result.data == {"base64": None}
def test_base64_query_invalid():
bad_inputs = [dict(), 123, "This is not valid base64"]
for input_ in bad_inputs:
result = schema.execute(
"""{ base64(input: $input) }""", variables={"input": input_}
)
assert isinstance(result.errors, list)
assert len(result.errors) == 1
assert isinstance(result.errors[0], GraphQLError)
assert result.data is None
def test_base64_from_bytes():
base64_value = base64.b64encode(b"Hello world").decode("utf-8")
result = schema.execute("""{ bytesAsBase64 }""")
assert not result.errors
assert result.data == {"bytesAsBase64": base64_value}
def test_base64_from_string():
base64_value = base64.b64encode(b"Spam and eggs").decode("utf-8")
result = schema.execute("""{ stringAsBase64 }""")
assert not result.errors
assert result.data == {"stringAsBase64": base64_value}
def test_base64_from_number():
base64_value = base64.b64encode(b"42").decode("utf-8")
result = schema.execute("""{ numberAsBase64 }""")
assert not result.errors
assert result.data == {"numberAsBase64": base64_value}
| Query |
python | redis__redis-py | redis/commands/search/__init__.py | {
"start": 3872,
"end": 5648
} | class ____(Search, AsyncSearchCommands):
class BatchIndexer(Search.BatchIndexer):
"""
A batch indexer allows you to automatically batch
document indexing in pipelines, flushing it every N documents.
"""
async def add_document(
self,
doc_id,
nosave=False,
score=1.0,
payload=None,
replace=False,
partial=False,
no_create=False,
**fields,
):
"""
Add a document to the batch query
"""
self.client._add_document(
doc_id,
conn=self._pipeline,
nosave=nosave,
score=score,
payload=payload,
replace=replace,
partial=partial,
no_create=no_create,
**fields,
)
self.current_chunk += 1
self.total += 1
if self.current_chunk >= self.chunk_size:
await self.commit()
async def commit(self):
"""
Manually commit and flush the batch indexing query
"""
await self._pipeline.execute()
self.current_chunk = 0
def pipeline(self, transaction=True, shard_hint=None):
"""Creates a pipeline for the SEARCH module, that can be used for executing
SEARCH commands, as well as classic core commands.
"""
p = AsyncPipeline(
connection_pool=self.client.connection_pool,
response_callbacks=self._MODULE_CALLBACKS,
transaction=transaction,
shard_hint=shard_hint,
)
p.index_name = self.index_name
return p
| AsyncSearch |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 65787,
"end": 68353
} | class ____(Locator):
"""
Place ticks at evenly spaced values.
The first time this function is called, it will try to set the number of
ticks to make a nice tick partitioning. Thereafter, the number of ticks
will be fixed to avoid jumping during interactive navigation.
"""
def __init__(self, numticks=None, presets=None):
"""
Parameters
----------
numticks : int or None, default None
Number of ticks. If None, *numticks* = 11.
presets : dict or None, default: None
Dictionary mapping ``(vmin, vmax)`` to an array of locations.
Overrides *numticks* if there is an entry for the current
``(vmin, vmax)``.
"""
self.numticks = numticks
if presets is None:
self.presets = {}
else:
self.presets = presets
@property
def numticks(self):
# Old hard-coded default.
return self._numticks if self._numticks is not None else 11
@numticks.setter
def numticks(self, numticks):
self._numticks = numticks
def set_params(self, numticks=None, presets=None):
"""Set parameters within this locator."""
if presets is not None:
self.presets = presets
if numticks is not None:
self.numticks = numticks
def __call__(self):
"""Return the locations of the ticks."""
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
vmin, vmax = mtransforms.nonsingular(vmin, vmax, expander=0.05)
if (vmin, vmax) in self.presets:
return self.presets[(vmin, vmax)]
if self.numticks == 0:
return []
ticklocs = np.linspace(vmin, vmax, self.numticks)
return self.raise_if_exceeds(ticklocs)
def view_limits(self, vmin, vmax):
"""Try to choose the view limits intelligently."""
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':
exponent, remainder = divmod(
math.log10(vmax - vmin), math.log10(max(self.numticks - 1, 1)))
exponent -= (remainder < .5)
scale = max(self.numticks - 1, 1) ** (-exponent)
vmin = math.floor(scale * vmin) / scale
vmax = math.ceil(scale * vmax) / scale
return mtransforms.nonsingular(vmin, vmax)
| LinearLocator |
python | huggingface__transformers | tests/models/pvt_v2/test_modeling_pvt_v2.py | {
"start": 9779,
"end": 12365
} | class ____(unittest.TestCase):
@slow
def test_inference_image_classification(self):
# only resize + normalize
image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0")
model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0").to(torch_device).eval()
image = prepare_img()
encoded_inputs = image_processor(images=image, return_tensors="pt")
pixel_values = encoded_inputs.pixel_values.to(torch_device)
with torch.no_grad():
outputs = model(pixel_values)
expected_shape = torch.Size((1, model.config.num_labels))
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_model(self):
model = PvtV2Model.from_pretrained("OpenGVLab/pvt_v2_b0").to(torch_device).eval()
image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(pixel_values)
# verify the logits
expected_shape = torch.Size((1, 50, 512))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
@require_accelerate
@require_torch_accelerator
@require_torch_fp16
def test_inference_fp16(self):
r"""
A small test to make sure that inference work in half precision without any problem.
"""
model = PvtV2ForImageClassification.from_pretrained("OpenGVLab/pvt_v2_b0", dtype=torch.float16)
model.to(torch_device)
image_processor = AutoImageProcessor.from_pretrained("OpenGVLab/pvt_v2_b0")
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
pixel_values = inputs.pixel_values.to(torch_device, dtype=torch.float16)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_ = model(pixel_values)
@require_torch
| PvtV2ModelIntegrationTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.