language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | tensorflow__tensorflow | tensorflow/python/compiler/xla/tests/jit_test.py | {
"start": 1642,
"end": 7443
} | class ____(test.TestCase, parameterized.TestCase):
def compute(self, use_jit, compute_fn):
random_seed.set_random_seed(1234)
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(use_jit):
r = compute_fn()
sess.run(variables.global_variables_initializer())
return (r, sess.run(r))
@test_util.run_v2_only
def testJITInEager(self):
with self.assertRaisesRegex(
RuntimeError, "xla.experimental.jit_scope is not supported when eager "
"execution is enabled. Try use it inside tf.function."):
with jit.experimental_jit_scope(True):
constant_op.constant(1)
@test_util.build_as_function_and_v1_graph
def testJITCreateOpsLambda(self):
"""Test several ways of customizing the compilation attribute."""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), minval=-10, maxval=10, seed=1)
return inputs
v_false_1_t, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
v_all_true_t, _ = self.compute(True, create_ops)
self.assertFalse(v_false_1_t.op.get_attr("_XlaCompile"))
v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
self.assertFalse(v_true_1_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_true_1_t.op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t.op.get_attr("_XlaCompile"))
# Additionally ensure that where no JIT compilation happens on the
# random_uniform op, the output values are identical to the case
# where no JIT compilation happens anywhere.
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
@test_util.build_as_function_and_v1_graph
def testJITXlaScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 1
a3 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 2
a4 = constant_op.constant(1)
# XlaScope still 1, depth 1
a5 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope now 2, depth 0
a6 = constant_op.constant(1)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope"))
@test_util.build_as_function_and_v1_graph
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
@test_util.build_as_function_and_v1_graph
def testDefunNoJitScope(self):
with self.session(graph=ops.Graph()):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# No enclosing jit scope so function sets its own value for _XlaScope.
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
@test_util.build_as_function_and_v1_graph
def testDefunInheritsJitScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# Ensure _XlaScope is inherited from enclosing context.
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
| JITTest |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_blocks/test_general_blocks.py | {
"start": 13531,
"end": 26989
} | class ____(util.MdCase):
"""Test Blocks tab cases."""
extension = [
'admonition',
'pymdownx.blocks.tab',
'pymdownx.superfences',
'markdown.extensions.def_list',
'pymdownx.details'
]
extension_configs = {
'pymdownx.blocks.tab': {'alternate_style': True}
}
def test_with_preceding_text(self):
"""Test content directly before tabs."""
expected = r'''
<p>foo
<strong>foo</strong></p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block"></div>
</div>
</div>
''' # noqa: E501
self.check_markdown(
R'''
foo
**foo**
/// tab | Tab
///
''',
expected,
True
)
def test_nested_tabbed(self):
"""Test nested tabbed."""
self.check_markdown(
R'''
//// tab | Tab
Some *content*
/// tab | Tab A
- item 1
- item 2
///
/// tab | Tab B
- item A
- item B
///
////
/// tab | Another Tab
Some more content.
///
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><input id="__tabbed_1_2" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label><label for="__tabbed_1_2">Another Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<div class="tabbed-set tabbed-alternate" data-tabs="2:2"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><input id="__tabbed_2_2" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="__tabbed_2_1">Tab A</label><label for="__tabbed_2_2">Tab B</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>item 1</p>
</li>
<li>
<p>item 2</p>
</li>
</ul>
</div>
<div class="tabbed-block">
<ul>
<li>
<p>item A</p>
</li>
<li>
<p>item B</p>
</li>
</ul>
</div>
</div>
</div>
</div>
<div class="tabbed-block">
<p>Some more content.</p>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_break(self):
"""Test that tabs are properly terminated on blocks that are not under the tab."""
self.check_markdown(
r'''
/// tab | Tab
Some *content*
And more `content`.
///
Content
''',
r'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Some <em>content</em></p>
<p>And more <code>content</code>.</p>
</div>
</div>
</div>
<p>Content</p>
''', # noqa: E501
True
)
def test_with_lists(self):
"""Test with lists."""
self.check_markdown(
'''
- List
/// tab | Tab
- Paragraph
Paragraph
///
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_big_lists(self):
"""Test details with a longer list."""
self.check_markdown(
'''
- List
/// tab | Tab
- Paragraph
Paragraph
- Paragraph
paragraph
///
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
<li>
<p>Paragraph</p>
<p>paragraph</p>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_complex_lists(self):
"""Test details in a complex list scenario."""
self.check_markdown(
'''
- List
/// tab | Tab
- Paragraph
//// tab | Tab
1. Paragraph
Paragraph
Paragraph
////
///
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Paragraph</p>
<div class="tabbed-set tabbed-alternate" data-tabs="2:1"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="__tabbed_2_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ol>
<li>
<p>Paragraph</p>
<p>Paragraph</p>
<p>Paragraph</p>
</li>
</ol>
</div>
</div>
</div>
</li>
</ul>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_definition_list(self):
"""Test with definition list."""
self.check_markdown(
'''
- List
/// tab | Tab
Term
: Definition
More text
: Another
definition
Even more text
///
''',
'''
<ul>
<li>
<p>List</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<dl>
<dt>Term</dt>
<dd>
<p>Definition</p>
<p>More text</p>
</dd>
<dd>
<p>Another
definition</p>
<p>Even more text</p>
</dd>
</dl>
</div>
</div>
</div>
</li>
</ul>
''', # noqa: E501
True
)
def test_with_details(self):
"""Test with definition list."""
self.check_markdown(
'''
/// tab | Output
???+ note "Open styled details"
??? danger "Nested details!"
And more content again.
///
''',
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Output</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<details class="note" open="open">
<summary>Open styled details</summary>
<details class="danger">
<summary>Nested details!</summary>
<p>And more content again.</p>
</details>
</details>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_complex_list(self):
"""Test tabbed complex list scenario."""
self.check_markdown(
'''
/// tab | Tab with loose lists
- Parent 1
- Child 1
- Child 2
///
''',
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab with loose lists</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
</div>
</div>
''', # noqa: E501
True
)
def test_tabbed_complex_list_unindented_content(self):
"""Test tabbed complex list scenario with un-indented content."""
self.check_markdown(
'''
/// tab | Tab with loose lists
- Parent 1
- Child 1
- Child 2
///
- Parent 2
''',
'''
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab with loose lists</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<ul>
<li>
<p>Parent 1</p>
<ul>
<li>Child 1</li>
<li>Child 2</li>
</ul>
</li>
</ul>
</div>
</div>
</div>
<ul>
<li>Parent 2</li>
</ul>
''', # noqa: E501
True
)
def test_nesting_with_legacy_style(self):
"""Test nesting with legacy style admonitions."""
self.check_markdown(
'''
!!! note "Admonition 1"
Admonition 1
/// tab | Tab 1
Test tab 1
!!! note "Admonition 2"
Admonition 2
//// tab | Tab 2
Test tab 2
!!! note "Admonition 3"
Admonition 3
////
///
''',
'''
<div class="admonition note">
<p class="admonition-title">Admonition 1</p>
<p>Admonition 1</p>
<div class="tabbed-set tabbed-alternate" data-tabs="1:1"><input checked="checked" id="__tabbed_1_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="__tabbed_1_1">Tab 1</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Test tab 1</p>
<div class="admonition note">
<p class="admonition-title">Admonition 2</p>
<p>Admonition 2</p>
<div class="tabbed-set tabbed-alternate" data-tabs="2:1"><input checked="checked" id="__tabbed_2_1" name="__tabbed_2" type="radio" /><div class="tabbed-labels"><label for="__tabbed_2_1">Tab 2</label></div>
<div class="tabbed-content">
<div class="tabbed-block">
<p>Test tab 2</p>
<div class="admonition note">
<p class="admonition-title">Admonition 3</p>
<p>Admonition 3</p>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
''', # noqa: E501
True
)
| TestBlocksTab |
python | sympy__sympy | sympy/physics/quantum/commutator.py | {
"start": 627,
"end": 8148
} | class ____(Expr):
"""The standard commutator, in an unevaluated state.
Explanation
===========
Evaluating a commutator is defined [1]_ as: ``[A, B] = A*B - B*A``. This
class returns the commutator in an unevaluated form. To evaluate the
commutator, use the ``.doit()`` method.
Canonical ordering of a commutator is ``[A, B]`` for ``A < B``. The
arguments of the commutator are put into canonical order using comparison operators.
If ``B < A``, then ``[B, A]`` is returned as ``-[A, B]``.
Parameters
==========
A : Expr
The first argument of the commutator [A,B].
B : Expr
The second argument of the commutator [A,B].
Examples
========
>>> from sympy.physics.quantum import Commutator, Dagger, Operator
>>> from sympy.abc import x, y
>>> A = Operator('A')
>>> B = Operator('B')
>>> C = Operator('C')
Create a commutator and use ``.doit()`` to evaluate it:
>>> comm = Commutator(A, B)
>>> comm
[A,B]
>>> comm.doit()
A*B - B*A
The commutator orders it arguments in canonical order:
>>> comm = Commutator(B, A); comm
-[A,B]
Commutative constants are factored out:
>>> Commutator(3*x*A, x*y*B)
3*x**2*y*[A,B]
Using ``.expand(commutator=True)``, the standard commutator expansion rules
can be applied:
>>> Commutator(A+B, C).expand(commutator=True)
[A,C] + [B,C]
>>> Commutator(A, B+C).expand(commutator=True)
[A,B] + [A,C]
>>> Commutator(A*B, C).expand(commutator=True)
[A,C]*B + A*[B,C]
>>> Commutator(A, B*C).expand(commutator=True)
[A,B]*C + B*[A,C]
Adjoint operations applied to the commutator are properly applied to the
arguments:
>>> Dagger(Commutator(A, B))
-[Dagger(A),Dagger(B)]
References
==========
.. [1] https://en.wikipedia.org/wiki/Commutator
"""
is_commutative = False
_kind_dispatcher = KindDispatcher("Commutator_kind_dispatcher", commutative=True)
@property
def kind(self):
arg_kinds = (a.kind for a in self.args)
return self._kind_dispatcher(*arg_kinds)
def __new__(cls, A, B):
r = cls.eval(A, B)
if r is not None:
return r
obj = Expr.__new__(cls, A, B)
return obj
@classmethod
def eval(cls, a, b):
if not (a and b):
return S.Zero
if a == b:
return S.Zero
if a.is_commutative or b.is_commutative:
return S.Zero
# [xA,yB] -> xy*[A,B]
ca, nca = a.args_cnc()
cb, ncb = b.args_cnc()
c_part = ca + cb
if c_part:
return Mul(Mul(*c_part), cls(Mul._from_args(nca), Mul._from_args(ncb)))
# Canonical ordering of arguments
# The Commutator [A, B] is in canonical form if A < B.
if a.compare(b) == 1:
return S.NegativeOne*cls(b, a)
def _expand_pow(self, A, B, sign):
exp = A.exp
if not exp.is_integer or not exp.is_constant() or abs(exp) <= 1:
# nothing to do
return self
base = A.base
if exp.is_negative:
base = A.base**-1
exp = -exp
comm = Commutator(base, B).expand(commutator=True)
result = base**(exp - 1) * comm
for i in range(1, exp):
result += base**(exp - 1 - i) * comm * base**i
return sign*result.expand()
def _eval_expand_commutator(self, **hints):
A = self.args[0]
B = self.args[1]
if isinstance(A, Add):
# [A + B, C] -> [A, C] + [B, C]
sargs = []
for term in A.args:
comm = Commutator(term, B)
if isinstance(comm, Commutator):
comm = comm._eval_expand_commutator()
sargs.append(comm)
return Add(*sargs)
elif isinstance(B, Add):
# [A, B + C] -> [A, B] + [A, C]
sargs = []
for term in B.args:
comm = Commutator(A, term)
if isinstance(comm, Commutator):
comm = comm._eval_expand_commutator()
sargs.append(comm)
return Add(*sargs)
elif isinstance(A, Mul):
# [A*B, C] -> A*[B, C] + [A, C]*B
a = A.args[0]
b = Mul(*A.args[1:])
c = B
comm1 = Commutator(b, c)
comm2 = Commutator(a, c)
if isinstance(comm1, Commutator):
comm1 = comm1._eval_expand_commutator()
if isinstance(comm2, Commutator):
comm2 = comm2._eval_expand_commutator()
first = Mul(a, comm1)
second = Mul(comm2, b)
return Add(first, second)
elif isinstance(B, Mul):
# [A, B*C] -> [A, B]*C + B*[A, C]
a = A
b = B.args[0]
c = Mul(*B.args[1:])
comm1 = Commutator(a, b)
comm2 = Commutator(a, c)
if isinstance(comm1, Commutator):
comm1 = comm1._eval_expand_commutator()
if isinstance(comm2, Commutator):
comm2 = comm2._eval_expand_commutator()
first = Mul(comm1, c)
second = Mul(b, comm2)
return Add(first, second)
elif isinstance(A, Pow):
# [A**n, C] -> A**(n - 1)*[A, C] + A**(n - 2)*[A, C]*A + ... + [A, C]*A**(n-1)
return self._expand_pow(A, B, 1)
elif isinstance(B, Pow):
# [A, C**n] -> C**(n - 1)*[C, A] + C**(n - 2)*[C, A]*C + ... + [C, A]*C**(n-1)
return self._expand_pow(B, A, -1)
# No changes, so return self
return self
def doit(self, **hints):
""" Evaluate commutator """
# Keep the import of Operator here to avoid problems with
# circular imports.
from sympy.physics.quantum.operator import Operator
A = self.args[0]
B = self.args[1]
if isinstance(A, Operator) and isinstance(B, Operator):
try:
comm = A._eval_commutator(B, **hints)
except NotImplementedError:
try:
comm = -1*B._eval_commutator(A, **hints)
except NotImplementedError:
comm = None
if comm is not None:
return comm.doit(**hints)
return (A*B - B*A).doit(**hints)
def _eval_adjoint(self):
return Commutator(Dagger(self.args[1]), Dagger(self.args[0]))
def _sympyrepr(self, printer, *args):
return "%s(%s,%s)" % (
self.__class__.__name__, printer._print(
self.args[0]), printer._print(self.args[1])
)
def _sympystr(self, printer, *args):
return "[%s,%s]" % (
printer._print(self.args[0]), printer._print(self.args[1]))
def _pretty(self, printer, *args):
pform = printer._print(self.args[0], *args)
pform = prettyForm(*pform.right(prettyForm(',')))
pform = prettyForm(*pform.right(printer._print(self.args[1], *args)))
pform = prettyForm(*pform.parens(left='[', right=']'))
return pform
def _latex(self, printer, *args):
return "\\left[%s,%s\\right]" % tuple([
printer._print(arg, *args) for arg in self.args])
@Commutator._kind_dispatcher.register(_OperatorKind, _OperatorKind)
def find_op_kind(e1, e2):
"""Find the kind of an anticommutator of two OperatorKinds."""
return OperatorKind
| Commutator |
python | coleifer__peewee | tests/prefetch_tests.py | {
"start": 21617,
"end": 21688
} | class ____(TestModel):
name = TextField()
c = ForeignKeyField(C)
| C1 |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 10502,
"end": 16497
} | class ____(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories: dict[type[_ModuleLike], _ProviderFactoryType] = {}
PY_MAJOR = f'{sys.version_info.major}.{sys.version_info.minor}'
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(
loader_type: type[_ModuleLike], provider_factory: _ProviderFactoryType
) -> None:
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
@overload
def get_provider(moduleOrReq: str) -> IResourceProvider: ...
@overload
def get_provider(moduleOrReq: Requirement) -> Distribution: ...
def get_provider(moduleOrReq: str | Requirement) -> IResourceProvider | Distribution:
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
@functools.cache
def _macos_vers():
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
with open(plist, 'rb') as fh:
plist_content = plistlib.load(fh)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
return version.split('.')
def _macos_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macos_vers()
machine = _macos_arch(os.uname()[4].replace(" ", "_"))
return f"macosx-{version[0]}.{version[1]}-{machine}"
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided: str | None, required: str | None) -> bool:
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# macOS special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macOS designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = f"{reqMac.group(1)}.{reqMac.group(2)}"
if (
dversion == 7
and macosversion >= "10.3"
or dversion == 8
and macosversion >= "10.4"
):
return True
# egg isn't macOS or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
@overload
def get_distribution(dist: _DistributionT) -> _DistributionT: ...
@overload
def get_distribution(dist: _PkgReqType) -> Distribution: ...
def get_distribution(dist: Distribution | _PkgReqType) -> Distribution:
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, str):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected str, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist: _EPDistType, group: str, name: str) -> _ResolvedEntryPoint:
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
@overload
def get_entry_map(
dist: _EPDistType, group: None = None
) -> dict[str, dict[str, EntryPoint]]: ...
@overload
def get_entry_map(dist: _EPDistType, group: str) -> dict[str, EntryPoint]: ...
def get_entry_map(dist: _EPDistType, group: str | None = None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist: _EPDistType, group: str, name: str) -> EntryPoint | None:
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
| UnknownExtra |
python | getsentry__sentry | tests/sentry/seer/autofix/test_issue_summary.py | {
"start": 38491,
"end": 41685
} | class ____:
@pytest.mark.parametrize(
"fixability,user_pref,expected",
[
# Fixability is None - return user preference if available, otherwise ROOT_CAUSE
(None, "open_pr", AutofixStoppingPoint.OPEN_PR),
(None, "code_changes", AutofixStoppingPoint.CODE_CHANGES),
(None, "solution", AutofixStoppingPoint.SOLUTION),
(None, "root_cause", AutofixStoppingPoint.ROOT_CAUSE),
(None, None, AutofixStoppingPoint.ROOT_CAUSE),
# User preference is None - return fixability suggestion
(AutofixStoppingPoint.OPEN_PR, None, AutofixStoppingPoint.OPEN_PR),
(AutofixStoppingPoint.CODE_CHANGES, None, AutofixStoppingPoint.CODE_CHANGES),
(AutofixStoppingPoint.SOLUTION, None, AutofixStoppingPoint.SOLUTION),
(AutofixStoppingPoint.ROOT_CAUSE, None, AutofixStoppingPoint.ROOT_CAUSE),
# User preference limits automation (user is more conservative)
(
AutofixStoppingPoint.OPEN_PR,
"code_changes",
AutofixStoppingPoint.CODE_CHANGES,
),
(AutofixStoppingPoint.OPEN_PR, "solution", AutofixStoppingPoint.SOLUTION),
(AutofixStoppingPoint.OPEN_PR, "root_cause", AutofixStoppingPoint.ROOT_CAUSE),
(AutofixStoppingPoint.CODE_CHANGES, "solution", AutofixStoppingPoint.SOLUTION),
(
AutofixStoppingPoint.CODE_CHANGES,
"root_cause",
AutofixStoppingPoint.ROOT_CAUSE,
),
(AutofixStoppingPoint.SOLUTION, "root_cause", AutofixStoppingPoint.ROOT_CAUSE),
# Fixability is more conservative (fixability limits automation)
(AutofixStoppingPoint.SOLUTION, "open_pr", AutofixStoppingPoint.SOLUTION),
(
AutofixStoppingPoint.SOLUTION,
"code_changes",
AutofixStoppingPoint.SOLUTION,
),
(AutofixStoppingPoint.ROOT_CAUSE, "open_pr", AutofixStoppingPoint.ROOT_CAUSE),
(
AutofixStoppingPoint.ROOT_CAUSE,
"code_changes",
AutofixStoppingPoint.ROOT_CAUSE,
),
(AutofixStoppingPoint.ROOT_CAUSE, "solution", AutofixStoppingPoint.ROOT_CAUSE),
# Same level - return fixability
(AutofixStoppingPoint.OPEN_PR, "open_pr", AutofixStoppingPoint.OPEN_PR),
(
AutofixStoppingPoint.CODE_CHANGES,
"code_changes",
AutofixStoppingPoint.CODE_CHANGES,
),
(AutofixStoppingPoint.SOLUTION, "solution", AutofixStoppingPoint.SOLUTION),
(
AutofixStoppingPoint.ROOT_CAUSE,
"root_cause",
AutofixStoppingPoint.ROOT_CAUSE,
),
],
)
def test_upper_bound_combinations(self, fixability, user_pref, expected):
result = _apply_user_preference_upper_bound(fixability, user_pref)
assert result == expected
@with_feature({"organizations:gen-ai-features": True, "projects:triage-signals-v0": True})
| TestApplyUserPreferenceUpperBound |
python | scipy__scipy | scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py | {
"start": 11014,
"end": 23781
} | class ____:
def __init__(self):
self.eigs = eigs
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
self.mattypes = [csr_array, aslinearoperator, np.asarray]
self.sigmas_OPparts = {None: [None],
0.1: ['r'],
0.1 + 0.1j: ['r', 'i']}
# generate matrices
# these should all be float32 so that the eigenvalues
# are the same in float32 and float64
N = 6
rng = np.random.RandomState(2300)
Ar = generate_matrix(N, rng=rng).astype('f').astype('d')
M = generate_matrix(N, hermitian=True,
pos_definite=True, rng=rng).astype('f').astype('d')
Ac = generate_matrix(N, complex_=True, rng=rng).astype('F').astype('D')
v0 = rng.random(N)
# standard real nonsymmetric problem
SNR = DictWithRepr("std-real-nonsym")
SNR['mat'] = Ar
SNR['v0'] = v0
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
# general real nonsymmetric problem
GNR = DictWithRepr("gen-real-nonsym")
GNR['mat'] = Ar
GNR['bmat'] = M
GNR['v0'] = v0
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
# standard complex nonsymmetric problem
SNC = DictWithRepr("std-cmplx-nonsym")
SNC['mat'] = Ac
SNC['v0'] = v0
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
# general complex nonsymmetric problem
GNC = DictWithRepr("gen-cmplx-nonsym")
GNC['mat'] = Ac
GNC['bmat'] = M
GNC['v0'] = v0
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
self.real_test_cases = [SNR, GNR]
self.complex_test_cases = [SNC, GNC]
@pytest.mark.parametrize("sigma, mode", [(None, 'normal'), (0.5, 'normal'),
(0.5, 'buckling'), (0.5, 'cayley')])
@pytest.mark.parametrize("mattype", [csr_array, aslinearoperator, np.asarray])
@pytest.mark.parametrize("which", ['LM', 'SM', 'LA', 'SA', 'BE'])
@pytest.mark.parametrize("typ", ['f', 'd'])
@pytest.mark.parametrize("D", SymmetricParams().real_test_cases)
def test_symmetric_modes(D, typ, which, mattype, sigma, mode):
rng = np.random.default_rng(1749531508689996)
k = 2
eval_evec(True, D, typ, k, which, None, sigma, mattype, None, mode, rng=rng)
@pytest.mark.parametrize("sigma", [None, 0.5])
@pytest.mark.parametrize("mattype", [csr_array, aslinearoperator, np.asarray])
@pytest.mark.parametrize("which", ['LM', 'SM', 'LA', 'SA'])
@pytest.mark.parametrize("typ", ['F', 'D'])
@pytest.mark.parametrize("D", SymmetricParams().complex_test_cases)
def test_hermitian_modes(D, typ, which, mattype, sigma):
rng = np.random.default_rng(1749531706842957)
k = 2
eval_evec(True, D, typ, k, which, None, sigma, mattype, rng=rng)
@pytest.mark.parametrize("typ", ['f', 'd'])
@pytest.mark.parametrize("D", SymmetricParams().real_test_cases)
@pytest.mark.parametrize("k", [1, 2, 3, 4, 5])
def test_symmetric_starting_vector(k, D, typ):
rng = np.random.default_rng(1749532110418901)
v0 = rng.uniform(size=len(D['v0'])).astype(typ)
eval_evec(True, D, typ, k, 'LM', v0, rng=rng)
def test_symmetric_no_convergence():
rng = np.random.RandomState(1234)
m = generate_matrix(30, hermitian=True, pos_definite=True, rng=rng)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
@pytest.mark.parametrize("sigma, OPpart", [(None, None), (0.1, 'r'),
(0.1 + 0.1j, 'r'), (0.1 + 0.1j, 'i')])
@pytest.mark.parametrize("mattype", [csr_array, aslinearoperator, np.asarray])
@pytest.mark.parametrize("which", ['LM', 'LR', 'LI'])
@pytest.mark.parametrize("typ", ['f', 'd'])
@pytest.mark.parametrize("D", NonSymmetricParams().real_test_cases)
def test_real_nonsymmetric_modes(D, typ, which, mattype,
sigma, OPpart):
rng = np.random.default_rng(174953334412726)
k = 2
eval_evec(False, D, typ, k, which, None, sigma, mattype, OPpart, rng=rng)
@pytest.mark.parametrize("sigma", [None, 0.1, 0.1 + 0.1j])
@pytest.mark.parametrize("mattype", [csr_array, aslinearoperator, np.asarray])
@pytest.mark.parametrize("which", ['LM', 'LR', 'LI'])
@pytest.mark.parametrize("typ", ['F', 'D'])
@pytest.mark.parametrize("D", NonSymmetricParams().complex_test_cases)
def test_complex_nonsymmetric_modes(D, typ, which, mattype, sigma):
rng = np.random.default_rng(1749533536274527)
k = 2
eval_evec(False, D, typ, k, which, None, sigma, mattype, rng=rng)
@pytest.mark.parametrize("typ", ['F', 'D'])
@pytest.mark.parametrize("D", NonSymmetricParams().complex_test_cases)
@pytest.mark.parametrize("k", [1, 2, 3, 4])
def test_nonsymmetric_starting_vector(k, D, typ):
rng = np.random.default_rng(174953366983161)
A = D['mat']
n = A.shape[0]
v0 = rng.uniform(size=n).astype(typ)
eval_evec(False, D, typ, k, "LM", v0, sigma=None, rng=rng)
def test_standard_nonsymmetric_no_convergence():
rng = np.random.RandomState(1234)
m = generate_matrix(30, complex_=True, rng=rng)
tol, rtol, atol = _get_test_tolerance('d')
try:
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
raise AssertionError("Spurious no-error exit")
except ArpackNoConvergence as err:
k = len(err.eigenvalues)
if k <= 0:
raise AssertionError("Spurious no-eigenvalues-found case") from err
w, v = err.eigenvalues, err.eigenvectors
for ww, vv in zip(w, v.T):
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
def test_eigen_bad_shapes():
# A is not square.
A = csc_array(np.zeros((2, 3)))
assert_raises(ValueError, eigs, A)
def test_eigen_bad_kwargs():
# Test eigen on wrong keyword argument
A = csc_array(np.zeros((8, 8)))
assert_raises(ValueError, eigs, A, which='XX')
def test_ticket_1459_arpack_crash():
for dtype in [np.float32, np.float64]:
# This test does not seem to catch the issue for float32,
# but we made the same fix there, just to be sure
N = 6
k = 2
np.random.seed(2301)
A = np.random.random((N, N)).astype(dtype)
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
-0.34365925382227402451, 0.46122533684552280420,
-0.58001341115969040629, -0.78844877570084292984e-01],
dtype=dtype)
# Should not crash:
evals, evecs = eigs(A, k, v0=v0)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_linearoperator_deallocation():
# Check that the linear operators used by the Arpack wrappers are
# deallocatable by reference counting -- they are big objects, so
# Python's cyclic GC may not collect them fast enough before
# running out of memory if eigs/eigsh are called in a tight loop.
M_d = np.eye(10)
M_s = csc_array(M_d)
M_o = aslinearoperator(M_d)
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
pass
with assert_deallocated(lambda: arpack.LuInv(M_d)):
pass
with assert_deallocated(lambda: arpack.IterInv(M_s)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
pass
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
pass
def test_parallel_threads(num_parallel_threads):
results = []
rng = np.random.default_rng(1234)
v0 = rng.random(50)
def worker():
x = diags_array([1.0, -2.0, 1.0], offsets=[-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=3, v0=v0)
results.append(w)
w, v = eigsh(x, k=3, v0=v0)
results.append(w)
nthreads = 9 // num_parallel_threads + 1
threads = [threading.Thread(target=worker) for _ in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
worker()
for r in results:
assert_allclose(r, results[-1])
def test_reentering():
# Just some linear operator that calls eigs recursively
def A_matvec(x):
x = diags_array([1.0, -2.0, 1.0], offsets=[-1, 0, 1], shape=(50, 50))
w, v = eigs(x, k=1)
return v.real / w[0].real
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
# ================= Old Fortran tests ==================
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
# assert_raises(RuntimeError, eigs, A, k=1)
# assert_raises(RuntimeError, eigsh, A, k=1)
#
# These should not crash upon reentrance
eigs(A, k=1)
eigsh(A, k=1)
def test_regression_arpackng_1315():
# Check that issue arpack-ng/#1315 is not present.
# Adapted from arpack-ng/TESTS/bug_1315_single.c
# If this fails, then the installed ARPACK library is faulty.
for dtype in [np.float32, np.float64]:
np.random.seed(1234)
w0 = np.arange(1, 1000+1).astype(dtype)
A = diags_array([w0], offsets=[0], shape=(1000, 1000))
v0 = np.random.rand(1000).astype(dtype)
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
assert_allclose(np.sort(w), np.sort(w0[-9:]),
rtol=1e-4)
def test_eigs_for_k_greater():
# Test eigs() for k beyond limits.
rng = np.random.RandomState(1234)
A_sparse = diags_array([1.0, -2.0, 1.0], offsets=[-1, 0, 1], shape=(4, 4))
A = generate_matrix(4, sparse=False, rng=rng)
M_dense = rng.random((4, 4))
M_sparse = generate_matrix(4, sparse=True, rng=rng)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eig(A, b=M_dense)
eig_tuple2 = eig(A, b=M_sparse)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
# Test 'A' for different types
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
assert_raises(TypeError, eigs, A_sparse, k=3)
def test_eigsh_for_k_greater():
# Test eigsh() for k beyond limits.
rng = np.random.RandomState(1234)
A_sparse = diags_array([1.0, -2.0, 1.0], offsets=[-1, 0, 1], shape=(4, 4))
A = generate_matrix(4, sparse=False, rng=rng)
M_dense = generate_matrix_symmetric(4, pos_definite=True, rng=rng)
M_sparse = generate_matrix_symmetric(
4, pos_definite=True, sparse=True, rng=rng)
M_linop = aslinearoperator(M_dense)
eig_tuple1 = eigh(A, b=M_dense)
eig_tuple2 = eigh(A, b=M_sparse)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
# M as LinearOperator
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
# Test 'A' for different types
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
def test_real_eigs_real_k_subset():
rng = np.random.default_rng(2)
n = 10
A = random_array(shape=(n, n), density=0.5, rng=rng)
A.data *= 2
A.data -= 1
A += A.T # make symmetric to test real eigenvalues
v0 = np.ones(n)
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
dtypes = [np.float32, np.float64]
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
prev_w = np.array([], dtype=dtype)
eps = np.finfo(dtype).eps
for k in range(1, 9):
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
v0=v0.astype(dtype), tol=0)
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
dist = abs(prev_w[:,None] - w).min(axis=1)
assert_allclose(dist, 0, atol=np.sqrt(eps))
prev_w = w
| NonSymmetricParams |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 8880,
"end": 9114
} | class ____(Message):
message = "'...' %% ... has invalid format string: %s"
def __init__(self, filename, loc, error):
Message.__init__(self, filename, loc)
self.message_args = (error,)
| PercentFormatInvalidFormat |
python | realpython__materials | python-self-type/accounts_string.py | {
"start": 621,
"end": 1632
} | class ____(BankAccount):
interest_rate: float
@classmethod
def from_application(
cls, deposit: float = 0, interest_rate: float = 1
) -> "SavingsAccount":
# Generate a random seven-digit bank account number
account_number = random.randint(1000000, 9999999)
return cls(account_number, deposit, interest_rate)
def calculate_interest(self) -> float:
return self.balance * self.interest_rate / 100
def add_interest(self) -> "SavingsAccount":
self.deposit(self.calculate_interest())
return self
account = BankAccount(account_number=1534899324, balance=50)
(
account.display_balance()
.deposit(50)
.display_balance()
.withdraw(30)
.display_balance()
)
savings = SavingsAccount.from_application(deposit=100, interest_rate=5)
(
savings.display_balance()
.add_interest()
.display_balance()
.deposit(50)
.display_balance()
.withdraw(30)
.add_interest()
.display_balance()
)
| SavingsAccount |
python | tensorflow__tensorflow | tensorflow/python/ops/math_grad_test.py | {
"start": 17963,
"end": 18979
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithDenominatorIsZero(self):
x = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
y = array_ops.zeros_like(x,
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx, np.zeros(x.shape.as_list()))
self.assertAllClose(dy, np.zeros(y.shape.as_list()))
| DivNoNanGradientTest |
python | getsentry__sentry | tests/sentry/replays/endpoints/test_project_replay_viewed_by.py | {
"start": 439,
"end": 9624
} | class ____(APITestCase, ReplaysSnubaTestCase):
endpoint = "sentry-api-0-project-replay-viewed-by"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.replay_id = uuid4().hex
self.url = reverse(
self.endpoint, args=(self.organization.slug, self.project.slug, self.replay_id)
)
def test_get_replay_viewed_by(self) -> None:
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, self.replay_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, self.replay_id))
self.store_replays(
mock_replay_viewed(time.time(), self.project.id, self.replay_id, self.user.id)
)
with self.feature(REPLAYS_FEATURES):
response = self.client.get(self.url)
assert response.status_code == 200
assert response.status_code == 200
assert_viewed_by_expected_ids_and_unique(
response.data["data"]["viewed_by"], {self.user.id}
)
# Assert the viewed_by_user value matches the blueprint.
viewed_by_user = response.data["data"]["viewed_by"][0]
assert len(viewed_by_user) == 18
assert "avatarUrl" in viewed_by_user
assert "dateJoined" in viewed_by_user
assert "email" in viewed_by_user
assert "experiments" in viewed_by_user
assert "has2fa" in viewed_by_user
assert "hasPasswordAuth" in viewed_by_user
assert "id" in viewed_by_user
assert "isActive" in viewed_by_user
assert "isManaged" in viewed_by_user
assert "isStaff" in viewed_by_user
assert "isSuperuser" in viewed_by_user
assert "lastActive" in viewed_by_user
assert "lastLogin" in viewed_by_user
assert "name" in viewed_by_user
assert "type" in viewed_by_user
assert "username" in viewed_by_user
assert "avatar" in viewed_by_user
assert isinstance(viewed_by_user["avatar"], dict)
assert "avatarType" in viewed_by_user["avatar"]
assert "avatarUuid" in viewed_by_user["avatar"]
assert "avatarUrl" in viewed_by_user["avatar"]
assert "emails" in viewed_by_user
assert isinstance(viewed_by_user["emails"], list)
assert "id" in viewed_by_user["emails"][0]
assert "email" in viewed_by_user["emails"][0]
assert "is_verified" in viewed_by_user["emails"][0]
# Assert the returned user is the viewed-by user.
assert viewed_by_user["type"] == "user"
assert viewed_by_user["username"] == self.user.username
assert viewed_by_user["email"] == self.user.email
assert viewed_by_user["isActive"] == self.user.is_active
assert viewed_by_user["isManaged"] == self.user.is_managed
assert dateutil.parser.parse(viewed_by_user["dateJoined"]) == self.user.date_joined
assert dateutil.parser.parse(viewed_by_user["lastActive"]) == self.user.last_active
assert viewed_by_user["isSuperuser"] == self.user.is_superuser
assert viewed_by_user["isStaff"] == self.user.is_staff
def test_get_replay_viewed_by_nonexistent_user(self) -> None:
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, self.replay_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, self.replay_id))
# Nonexistent users do not show up in response).
self.store_replays(
mock_replay_viewed(time.time(), self.project.id, self.replay_id, 2387562378)
)
with self.feature(REPLAYS_FEATURES):
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.data["data"]["viewed_by"]) == 0
def test_get_replay_viewed_by_no_viewers(self) -> None:
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, self.replay_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, self.replay_id))
with self.feature(REPLAYS_FEATURES):
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.data["data"]["viewed_by"]) == 0
def test_get_replay_viewed_by_not_found(self) -> None:
with self.feature(REPLAYS_FEATURES):
response = self.client.get(self.url)
assert response.status_code == 404
def test_get_replay_viewed_by_feature_flag_disabled(self) -> None:
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, self.replay_id))
response = self.client.get(self.url)
assert response.status_code == 404
@patch("sentry.replays.endpoints.project_replay_viewed_by.publish_replay_event")
def test_post_replay_viewed_by(self, publish_replay_event: MagicMock) -> None:
with self.feature(REPLAYS_FEATURES):
finished_at_dt = datetime.datetime.now() - datetime.timedelta(seconds=20)
self.store_replays(mock_replay(finished_at_dt, self.project.id, self.replay_id))
response = self.client.post(self.url, data="")
assert response.status_code == 204
assert publish_replay_event.called
replay_event = json.loads(publish_replay_event.call_args[0][0])
payload = replay_event["payload"]
assert payload["type"] == "replay_viewed"
assert payload["viewed_by_id"] == self.user.id
assert isinstance(payload["timestamp"], float)
# time should match the last replay segment with second-level precision
assert int(payload["timestamp"]) == int(finished_at_dt.timestamp())
def test_post_replay_viewed_by_not_exist(self) -> None:
with self.feature(REPLAYS_FEATURES):
response = self.client.post(self.url, data="")
assert response.status_code == 404
@patch("sentry.replays.endpoints.project_replay_viewed_by.publish_replay_event")
def test_post_replay_viewed_by_not_in_org(self, publish_replay_event: MagicMock) -> None:
with self.feature(REPLAYS_FEATURES):
finished_at_dt = datetime.datetime.now() - datetime.timedelta(seconds=20)
self.store_replays(mock_replay(finished_at_dt, self.project.id, self.replay_id))
self.login_as(user=self.create_user(is_superuser=True, is_staff=True), superuser=True)
response = self.client.post(self.url, data="")
assert response.status_code == 204
assert not publish_replay_event.called
def test_get_replay_viewed_by_user_in_other_org(self) -> None:
other_org_member = self.create_member(
organization=self.create_organization(), user=self.create_user()
)
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, self.replay_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, self.replay_id))
self.store_replays(
mock_replay_viewed(
time.time(), self.project.id, self.replay_id, other_org_member.user_id
)
)
with self.feature(REPLAYS_FEATURES):
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.data["data"]["viewed_by"]) == 0
def test_get_replay_viewed_by_denylist(self) -> None:
seq1_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
seq2_timestamp = datetime.datetime.now() - datetime.timedelta(seconds=5)
self.store_replays(mock_replay(seq1_timestamp, self.project.id, self.replay_id))
self.store_replays(mock_replay(seq2_timestamp, self.project.id, self.replay_id))
self.store_replays(
mock_replay_viewed(time.time(), self.project.id, self.replay_id, self.user.id)
)
with self.feature(REPLAYS_FEATURES):
with self.options({"replay.viewed-by.project-denylist": [self.project.id]}):
response = self.client.get(self.url)
assert response.status_code == 400
assert (
response.json()["detail"]["message"]
== "Viewed by search has been disabled for your project due to a data irregularity."
)
| ProjectReplayViewedByTest |
python | psf__black | tests/data/cases/dummy_implementations.py | {
"start": 1575,
"end": 1640
} | class ____:
def f(self):
...# Comment 2
| ClassF |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_resource_claim_consumer_reference.py | {
"start": 383,
"end": 6991
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_group': 'str',
'name': 'str',
'resource': 'str',
'uid': 'str'
}
attribute_map = {
'api_group': 'apiGroup',
'name': 'name',
'resource': 'resource',
'uid': 'uid'
}
def __init__(self, api_group=None, name=None, resource=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1beta2ResourceClaimConsumerReference - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_group = None
self._name = None
self._resource = None
self._uid = None
self.discriminator = None
if api_group is not None:
self.api_group = api_group
self.name = name
self.resource = resource
self.uid = uid
@property
def api_group(self):
"""Gets the api_group of this V1beta2ResourceClaimConsumerReference. # noqa: E501
APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
:return: The api_group of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""Sets the api_group of this V1beta2ResourceClaimConsumerReference.
APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources. # noqa: E501
:param api_group: The api_group of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
self._api_group = api_group
@property
def name(self):
"""Gets the name of this V1beta2ResourceClaimConsumerReference. # noqa: E501
Name is the name of resource being referenced. # noqa: E501
:return: The name of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta2ResourceClaimConsumerReference.
Name is the name of resource being referenced. # noqa: E501
:param name: The name of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def resource(self):
"""Gets the resource of this V1beta2ResourceClaimConsumerReference. # noqa: E501
Resource is the type of resource being referenced, for example \"pods\". # noqa: E501
:return: The resource of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this V1beta2ResourceClaimConsumerReference.
Resource is the type of resource being referenced, for example \"pods\". # noqa: E501
:param resource: The resource of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and resource is None: # noqa: E501
raise ValueError("Invalid value for `resource`, must not be `None`") # noqa: E501
self._resource = resource
@property
def uid(self):
"""Gets the uid of this V1beta2ResourceClaimConsumerReference. # noqa: E501
UID identifies exactly one incarnation of the resource. # noqa: E501
:return: The uid of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1beta2ResourceClaimConsumerReference.
UID identifies exactly one incarnation of the resource. # noqa: E501
:param uid: The uid of this V1beta2ResourceClaimConsumerReference. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uid is None: # noqa: E501
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ResourceClaimConsumerReference):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2ResourceClaimConsumerReference):
return True
return self.to_dict() != other.to_dict()
| V1beta2ResourceClaimConsumerReference |
python | numba__numba | numba/core/typed_passes.py | {
"start": 20280,
"end": 20795
} | class ____(AnalysisPass):
"""NoPython Mode check: Validates the IR to ensure that features in use are
in a form that is supported"""
_name = "nopython_supported_feature_validation"
def __init__(self):
AnalysisPass.__init__(self)
def run_pass(self, state):
raise_on_unsupported_feature(state.func_ir, state.typemap)
warn_deprecated(state.func_ir, state.typemap)
return False
@register_pass(mutates_CFG=False, analysis_only=True)
| NoPythonSupportedFeatureValidation |
python | scipy__scipy | scipy/sparse/_base.py | {
"start": 2878,
"end": 57043
} | class ____(SparseABC):
""" This class provides a base class for all sparse arrays. It
cannot be instantiated. Most of the work is provided by subclasses.
"""
__array_priority__ = 10.1
_format = 'und' # undefined
_allow_nd = (2,)
@property
def ndim(self) -> int:
return len(self._shape)
@property
def _shape_as_2d(self):
s = self._shape
return (1, s[-1]) if len(s) == 1 else s
@property
def _bsr_container(self):
from ._bsr import bsr_array
return bsr_array
@property
def _coo_container(self):
from ._coo import coo_array
return coo_array
@property
def _csc_container(self):
from ._csc import csc_array
return csc_array
@property
def _csr_container(self):
from ._csr import csr_array
return csr_array
@property
def _dia_container(self):
from ._dia import dia_array
return dia_array
@property
def _dok_container(self):
from ._dok import dok_array
return dok_array
@property
def _lil_container(self):
from ._lil import lil_array
return lil_array
def __init__(self, arg1, *, maxprint=None):
self._shape = None
if self.__class__.__name__ == '_spbase':
raise ValueError("This class is not intended"
" to be instantiated directly.")
if isinstance(self, sparray) and np.isscalar(arg1):
raise ValueError(
"scipy sparse array classes do not support instantiation from a scalar"
)
self.maxprint = MAXPRINT if maxprint is None else maxprint
@property
def shape(self):
return self._shape
def reshape(self, *args, **kwargs):
"""reshape(self, shape, order='C', copy=False)
Gives a new shape to a sparse array/matrix without changing its data.
Parameters
----------
shape : tuple of ints
The new shape should be compatible with the original shape.
order : {'C', 'F'}, optional
Read the elements using this index order. 'C' means to read and
write the elements using C-like index order; e.g., read entire first
row, then second row, etc. 'F' means to read and write the elements
using Fortran-like index order; e.g., read entire first column, then
second column, etc.
copy : bool, optional
Indicates whether or not attributes of self should be copied
whenever possible. The degree to which attributes are copied varies
depending on the type of sparse array being used.
Returns
-------
reshaped : sparse array/matrix
A sparse array/matrix with the given `shape`, not necessarily of the same
format as the current object.
See Also
--------
numpy.reshape : NumPy's implementation of 'reshape' for ndarrays
"""
# If the shape already matches, don't bother doing an actual reshape
# Otherwise, the default is to convert to COO and use its reshape
# Don't restrict ndim on this first call. That happens in constructor
shape = check_shape(args, self.shape, allow_nd=range(1, 65))
order, copy = check_reshape_kwargs(kwargs)
if shape == self.shape:
if copy:
return self.copy()
else:
return self
return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
def resize(self, shape):
"""Resize the array/matrix in-place to dimensions given by ``shape``
Any elements that lie within the new shape will remain at the same
indices, while non-zero elements lying outside the new shape are
removed.
Parameters
----------
shape : (int, int)
number of rows and columns in the new array/matrix
Notes
-----
The semantics are not identical to `numpy.ndarray.resize` or
`numpy.resize`. Here, the same data will be maintained at each index
before and after reshape, if that index is within the new bounds. In
numpy, resizing maintains contiguity of the array, moving elements
around in the logical array but not within a flattened representation.
We give no guarantees about whether the underlying data attributes
(arrays, etc.) will be modified in place or replaced with new objects.
"""
# As an inplace operation, this requires implementation in each format.
raise NotImplementedError(
f'{type(self).__name__}.resize is not implemented')
def astype(self, dtype, casting='unsafe', copy=True):
"""Cast the array/matrix elements to a specified type.
Parameters
----------
dtype : string or numpy dtype
Typecode or data-type to which to cast the data.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
Defaults to 'unsafe' for backwards compatibility.
'no' means the data types should not be cast at all.
'equiv' means only byte-order changes are allowed.
'safe' means only casts which can preserve values are allowed.
'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
'unsafe' means any data conversions may be done.
copy : bool, optional
If `copy` is `False`, the result might share some memory with this
array/matrix. If `copy` is `True`, it is guaranteed that the result and
this array/matrix do not share any memory.
"""
dtype = getdtype(dtype)
if self.dtype != dtype:
return self.tocsr().astype(
dtype, casting=casting, copy=copy).asformat(self.format)
elif copy:
return self.copy()
else:
return self
@classmethod
def _ascontainer(cls, X, **kwargs):
if issubclass(cls, sparray):
return np.asarray(X, **kwargs)
else:
return asmatrix(X, **kwargs)
@classmethod
def _container(cls, X, **kwargs):
if issubclass(cls, sparray):
return np.array(X, **kwargs)
else:
return matrix(X, **kwargs)
def _asfptype(self):
"""Upcast array to a floating point format (if necessary)"""
fp_types = ['f', 'd', 'F', 'D']
if self.dtype.char in fp_types:
return self
else:
for fp_type in fp_types:
if self.dtype <= np.dtype(fp_type):
return self.astype(fp_type, copy=False)
raise TypeError(
f'cannot upcast [{self.dtype.name}] to a floating point format'
)
def __iter__(self):
for r in range(self.shape[0]):
yield self[r]
def _getmaxprint(self):
"""Maximum number of elements to display when printed."""
return self.maxprint
def count_nonzero(self, axis=None):
"""Number of non-zero entries, equivalent to
np.count_nonzero(a.toarray(), axis=axis)
Unlike the nnz property, which return the number of stored
entries (the length of the data attribute), this method counts the
actual number of non-zero entries in data.
Duplicate entries are summed before counting.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Count nonzeros for the whole array, or along a specified axis.
.. versionadded:: 1.15.0
Returns
-------
numpy array
A reduced array (no axis `axis`) holding the number of nonzero values
for each of the indices of the nonaxis dimensions.
Notes
-----
If you want to count nonzero and explicit zero stored values (e.g. nnz)
along an axis, two fast idioms are provided by `numpy` functions for the
common CSR, CSC, COO formats.
For the major axis in CSR (rows) and CSC (cols) use `np.diff`:
>>> import numpy as np
>>> import scipy as sp
>>> A = sp.sparse.csr_array([[4, 5, 0], [7, 0, 0]])
>>> major_axis_stored_values = np.diff(A.indptr) # -> np.array([2, 1])
For the minor axis in CSR (cols) and CSC (rows) use `numpy.bincount` with
minlength ``A.shape[1]`` for CSR and ``A.shape[0]`` for CSC:
>>> csr_minor_stored_values = np.bincount(A.indices, minlength=A.shape[1])
For COO, use the minor axis approach for either `axis`:
>>> A = A.tocoo()
>>> coo_axis0_stored_values = np.bincount(A.coords[0], minlength=A.shape[1])
>>> coo_axis1_stored_values = np.bincount(A.coords[1], minlength=A.shape[0])
Examples
--------
>>> A = sp.sparse.csr_array([[4, 5, 0], [7, 0, 0]])
>>> A.count_nonzero(axis=0)
array([2, 1, 0])
"""
clsname = self.__class__.__name__
raise NotImplementedError(f"count_nonzero not implemented for {clsname}.")
def _getnnz(self, axis=None):
"""Number of stored values, including explicit zeros.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Report stored values for the whole array, or along a specified axis.
See also
--------
count_nonzero : Number of non-zero entries
"""
clsname = self.__class__.__name__
raise NotImplementedError(f"getnnz not implemented for {clsname}.")
@property
def nnz(self) -> int:
"""Number of stored values, including explicit zeros.
See also
--------
count_nonzero : Number of non-zero entries
"""
return self._getnnz()
@property
def size(self) -> int:
"""Number of stored values.
See also
--------
count_nonzero : Number of non-zero values.
"""
return self._getnnz()
@property
def format(self) -> str:
"""Format string for matrix."""
return self._format
@property
def T(self):
"""Transpose."""
return self.transpose()
@property
def real(self):
return self._real()
@property
def imag(self):
return self._imag()
def __repr__(self):
_, format_name = _formats[self.format]
sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
return (
f"<{format_name} sparse {sparse_cls} of dtype '{self.dtype}'\n"
f"\twith {self.nnz} stored elements and shape {self.shape}>"
)
def __str__(self):
maxprint = self._getmaxprint()
A = self.tocoo()
# helper function, outputs "(i,j) v"
def tostr(coords, data):
pairs = zip(zip(*(c.tolist() for c in coords)), data)
return '\n'.join(f' {idx}\t{val}' for idx, val in pairs)
out = repr(self)
if self.nnz == 0:
return out
out += '\n Coords\tValues\n'
if self.nnz > maxprint:
half = maxprint // 2
out += tostr(tuple(c[:half] for c in A.coords), A.data[:half])
out += "\n :\t:\n"
half = maxprint - half
out += tostr(tuple(c[-half:] for c in A.coords), A.data[-half:])
else:
out += tostr(A.coords, A.data)
return out
def __bool__(self): # Simple -- other ideas?
if self.shape == (1, 1):
return self.nnz != 0
else:
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all().")
# What should len(sparse) return? For consistency with dense matrices,
# perhaps it should be the number of rows? But for some uses the number of
# non-zeros is more important. For now, raise an exception!
def __len__(self):
raise TypeError("sparse array length is ambiguous; use getnnz()"
" or shape[0]")
def asformat(self, format, copy=False):
"""Return this array/matrix in the passed format.
Parameters
----------
format : {str, None}
The desired sparse format ("csr", "csc", "lil", "dok", "array", ...)
or None for no conversion.
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : This array/matrix in the passed format.
"""
if format is None or format == self.format:
if copy:
return self.copy()
else:
return self
else:
try:
convert_method = getattr(self, 'to' + format)
except AttributeError as e:
raise ValueError(f'Format {format} is unknown.') from e
# Forward the copy kwarg, if it's accepted.
try:
return convert_method(copy=copy)
except TypeError:
return convert_method()
###################################################################
# NOTE: All arithmetic operations use csr_matrix by default.
# Therefore a new sparse array format just needs to define a
# .tocsr() method to provide arithmetic support. Any of these
# methods can be overridden for efficiency.
####################################################################
def multiply(self, other):
"""Element-wise multiplication by another array/matrix."""
if isscalarlike(other):
return self._mul_scalar(other)
if self.ndim < 3:
try:
return self._multiply_2d_with_broadcasting(other)
except AttributeError:
return self.tocsr()._multiply_2d_with_broadcasting(other)
if not (issparse(other) or isdense(other)):
# If it's a list or whatever, treat it like an array
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# numpy creates a 0d object array if all else fails.
# Not interpretable as an array; return NotImplemented so
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
# Allow custom sparse class indicated by attr sparse gh-6520
try:
other.shape
except AttributeError:
other = other_a
if self.shape != other.shape:
raise ValueError("inconsistent shapes: >2D multiply() does not yet "
"support broadcasting")
# self is >2D so must be COO
if isdense(other):
data = np.multiply(self.data, other[self.coords])
result = self.copy()
result.data = data.view(np.ndarray).ravel()
return result
elif issparse(other):
csr_self = self.reshape(1, -1).tocsr()
csr_other = other.reshape(1, -1).tocsr()
return csr_self._binopt(csr_other, '_elmul_').reshape(self.shape)
else:
# Not scalar, dense or sparse. Return NotImplemented so that
# other's __rmul__ can kick in if that's implemented.
return NotImplemented
def _maximum_minimum(self, other, np_op):
if not (issparse(other) or isdense(other) or isscalarlike(other)):
# If it's a list or whatever, treat it like an array
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# numpy creates a 0d object array if all else fails.
# We don't know how to handle it either.
raise NotImplementedError('maximum or minimum with an unrecognized '
'array type is not supported')
# Allow custom sparse class indicated by attr sparse gh-6520
try:
other.shape
except AttributeError:
other = other_a
if isscalarlike(other):
if np_op(0, other):
pos_neg = 'positive' if np_op == np.maximum else 'negative'
warn(f"Taking {np_op.__name__} with a {pos_neg} number results in a"
" dense matrix.", SparseEfficiencyWarning, stacklevel=3)
return self.__class__(np_op(self.toarray(), other))
else:
if self.ndim < 3:
cs_self = self if self.format in ('csr', 'csc') else self.tocsr()
return cs_self._scalar_binopt(other, np_op)
csr_self = self.reshape(1, -1).tocsr()
result = csr_self._scalar_binopt(other, np_op)
return result.tocoo().reshape(self.shape)
elif isdense(other):
return np_op(self.todense(), other)
elif issparse(other):
if self.shape != other.shape:
raise ValueError(f"inconsistent shapes {self.shape=} {other.shape=}")
if self.ndim < 3: # shape is same so other.ndim < 3
cs_self = self if self.format in ('csr', 'csc') else self.tocsr()
return cs_self._binopt(other, f'_{np_op.__name__}_')
csr_self = self.reshape(1, -1).tocsr()
csr_other = other.reshape(1, -1).tocsr()
result = csr_self._binopt(csr_other, f'_{np_op.__name__}_')
return result.tocoo().reshape(self.shape)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
"""Element-wise maximum between this and another array/matrix."""
return self._maximum_minimum(other, np.maximum)
def minimum(self, other):
"""Element-wise minimum between this and another array/matrix."""
return self._maximum_minimum(other, np.minimum)
def dot(self, other):
"""Ordinary dot product
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_array
>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> v = np.array([1, 0, -1])
>>> A.dot(v)
array([ 1, -3, -1], dtype=int64)
"""
if np.isscalar(other):
return self * other
else:
return self @ other
def power(self, n, dtype=None):
"""Element-wise power."""
return self.tocsr().power(n, dtype=dtype)
def _broadcast_to(self, shape, copy=False):
if self.shape == shape:
return self.copy() if copy else self
else:
return self.tocsr()._broadcast_to(shape, copy)
def _comparison(self, other, op):
# We convert to CSR format and use methods _binopt or _scalar_binopt
# If ndim>2 we reshape to 2D, compare and then reshape back to nD
if not (issparse(other) or isdense(other) or isscalarlike(other)):
if is_pydata_spmatrix(other):
# cannot compare with pydata other, but it might compare with us.
return NotImplemented
# If it's a list or whatever, treat it like an array
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# numpy creates a 0d object array if all else fails.
# Not interpretable as an array; return NotImplemented so
# other's dunder methods can kick in if implemented.
return NotImplemented
# Allow custom sparse class indicated by attr sparse gh-6520
try:
other.shape
except AttributeError:
other = other_a
if isscalarlike(other):
if not op(0, other):
if np.isnan(other): # op is not `ne`, so results are all False.
return self.__class__(self.shape, dtype=np.bool_)
if self.ndim < 3:
cs_self = self if self.format in ('csc', 'csr') else self.tocsr()
return cs_self._scalar_binopt(other, op)
csr_self = self.reshape(1, -1).tocsr()
result = csr_self._scalar_binopt(other, op)
return result.tocoo().reshape(self.shape)
else:
warn(f"Comparing a sparse matrix with {other} using {op_sym[op]} "
f"is inefficient. Try using {op_sym[op_neg[op]]} instead.",
SparseEfficiencyWarning, stacklevel=3)
if np.isnan(other):
# op is `ne` cuz op(0, other) and isnan(other). Return all True.
return self.__class__(np.ones(self.shape, dtype=np.bool_))
# op is eq, le, or ge. Use negated op and then negate.
if self.ndim < 3:
cs_self = self if self.format in ('csc', 'csr') else self.tocsr()
inv = cs_self._scalar_binopt(other, op_neg[op])
all_true = cs_self.__class__(np.ones(cs_self.shape, dtype=np.bool_))
return all_true - inv
csr_self = self.reshape(1, -1).tocsr()
inv = csr_self._scalar_binopt(other, op_neg[op])
all_true = csr_self.__class__(np.ones(csr_self.shape, dtype=np.bool_))
result = all_true - inv
return result.tocoo().reshape(self.shape)
elif isdense(other):
return op(self.todense(), other)
elif issparse(other):
# TODO sparse broadcasting
if self.shape != other.shape:
# eq and ne return True or False instead of an array when the shapes
# don't match. Numpy doesn't do this. Is this what we want?
if op in (operator.eq, operator.ne):
return op is operator.ne
raise ValueError("inconsistent shape")
if self.ndim < 3:
cs_self = self if self.format in ('csc', 'csr') else self.tocsr()
cs_other = other
else:
cs_self = self.reshape(1, -1).tocsr()
cs_other = other.reshape(1, -1).tocsr()
if not op(0, 0):
result = cs_self._binopt(cs_other, f'_{op.__name__}_')
return result if self.ndim < 3 else result.tocoo().reshape(self.shape)
else:
# result will not be sparse. Use negated op and then negate.
warn(f"Comparing two sparse matrices using {op_sym[op]} "
f"is inefficient. Try using {op_sym[op_neg[op]]} instead.",
SparseEfficiencyWarning, stacklevel=3)
inv = cs_self._binopt(cs_other, f'_{op_neg[op].__name__}_')
all_true = cs_self.__class__(np.ones(cs_self.shape, dtype=np.bool_))
result = all_true - inv
return result if self.ndim < 3 else result.tocoo().reshape(self.shape)
else:
# cannot compare with other, but it might compare with us.
return NotImplemented
def __eq__(self, other):
return self._comparison(other, operator.eq)
def __ne__(self, other):
return self._comparison(other, operator.ne)
def __lt__(self, other):
return self._comparison(other, operator.lt)
def __gt__(self, other):
return self._comparison(other, operator.gt)
def __le__(self, other):
return self._comparison(other, operator.le)
def __ge__(self, other):
return self._comparison(other, operator.ge)
def __abs__(self):
return abs(self.tocsr())
def __round__(self, ndigits=0):
return round(self.tocsr(), ndigits=ndigits)
def _add_sparse(self, other):
return self.tocsr()._add_sparse(other)
def _add_dense(self, other):
return self.tocoo()._add_dense(other)
def _sub_sparse(self, other):
return self.tocsr()._sub_sparse(other)
def _sub_dense(self, other):
return self.todense() - other
def _rsub_dense(self, other):
# note: this can't be replaced by other + (-self) for unsigned types
return other - self.todense()
def __add__(self, other): # self + other
if isscalarlike(other):
if other == 0:
return self.copy()
# Now we would add this scalar to every element.
raise NotImplementedError('adding a nonzero scalar to a '
'sparse array is not supported')
elif issparse(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._add_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._add_dense(other)
else:
return NotImplemented
def __radd__(self,other): # other + self
return self.__add__(other)
def __sub__(self, other): # self - other
if isscalarlike(other):
if other == 0:
return self.copy()
raise NotImplementedError('subtracting a nonzero scalar from a '
'sparse array is not supported')
elif issparse(other):
if other.shape != self.shape:
raise ValueError("inconsistent shapes")
return self._sub_sparse(other)
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._sub_dense(other)
else:
return NotImplemented
def __rsub__(self,other): # other - self
if isscalarlike(other):
if other == 0:
return -self.copy()
raise NotImplementedError('subtracting a sparse array from a '
'nonzero scalar is not supported')
elif isdense(other):
other = np.broadcast_to(other, self.shape)
return self._rsub_dense(other)
else:
return NotImplemented
def _matmul_dispatch(self, other):
"""np.array-like matmul & `np.matrix`-like mul, i.e. `dot` or `NotImplemented`
interpret other and call one of the following
self._mul_scalar()
self._matmul_vector()
self._matmul_multivector()
self._matmul_sparse()
"""
# This method has to be different from `__matmul__` because it is also
# called by sparse matrix classes.
# Currently matrix multiplication is only supported
# for 2D arrays. Hence we unpacked and use only the
# two last axes' lengths.
M, N = self._shape_as_2d
if other.__class__ is np.ndarray:
# Fast path for the most common case
if other.shape == (N,):
return self._matmul_vector(other)
elif other.shape == (N, 1):
result = self._matmul_vector(other.ravel())
if self.ndim == 1:
return result.reshape(1)
return result.reshape(M, 1)
elif other.ndim == 2 and other.shape[0] == N:
return self._matmul_multivector(other)
if isscalarlike(other):
# scalar value
return self._mul_scalar(other)
err_prefix = "matmul: dimension mismatch with signature"
if issparse(other):
if N != other.shape[0]:
raise ValueError(
f"{err_prefix} (n,k={N}),(k={other.shape[0]},m)->(n,m)"
)
return self._matmul_sparse(other)
# If it's a list or whatever, treat it like an array
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# numpy creates a 0d object array if all else fails.
# Not interpretable as an array; return NotImplemented so that
# other's __rmatmul__ can kick in if that's implemented.
return NotImplemented
# Allow custom sparse class indicated by attr sparse gh-6520
try:
other.shape
except AttributeError:
other = other_a
if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
# dense row or column vector
if other.shape[0] != N:
raise ValueError(
f"{err_prefix} (n,k={N}),(k={other.shape[0]},1?)->(n,1?)"
)
result = self._matmul_vector(np.ravel(other))
if isinstance(other, np.matrix):
result = self._ascontainer(result)
if other.ndim == 2 and other.shape[1] == 1:
# If 'other' was an (nx1) column vector, reshape the result
if self.ndim == 1:
result = result.reshape(1)
else:
result = result.reshape(-1, 1)
return result
elif other.ndim == 2:
##
# dense 2D array or matrix ("multivector")
if other.shape[0] != N:
raise ValueError(
f"{err_prefix} (n,k={N}),(k={other.shape[0]},m)->(n,m)"
)
result = self._matmul_multivector(np.asarray(other))
if isinstance(other, np.matrix):
result = self._ascontainer(result)
return result
else:
raise ValueError('could not interpret dimensions')
def __mul__(self, other):
return self.multiply(other)
def __rmul__(self, other): # other * self
return self.multiply(other)
# by default, use CSR for __mul__ handlers
def _mul_scalar(self, other):
return self.tocsr()._mul_scalar(other)
def _matmul_vector(self, other):
return self.tocsr()._matmul_vector(other)
def _matmul_multivector(self, other):
return self.tocsr()._matmul_multivector(other)
def _matmul_sparse(self, other):
return self.tocsr()._matmul_sparse(other)
def _rmatmul_dispatch(self, other):
if isscalarlike(other):
return self._mul_scalar(other)
else:
# Don't use asarray unless we have to
try:
tr = other.transpose()
except AttributeError:
tr = np.asarray(other).transpose()
ret = self.transpose()._matmul_dispatch(tr)
if ret is NotImplemented:
return NotImplemented
return ret.transpose()
#######################
# matmul (@) operator #
#######################
def __matmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self._matmul_dispatch(other)
def __rmatmul__(self, other):
if isscalarlike(other):
raise ValueError("Scalar operands are not allowed, "
"use '*' instead")
return self._rmatmul_dispatch(other)
####################
# Other Arithmetic #
####################
def _divide(self, other, *, rdivide=False):
if not (issparse(other) or isdense(other) or isscalarlike(other)):
# If it's a list or whatever, treat it like an array
other_a = np.asanyarray(other)
if other_a.ndim == 0 and other_a.dtype == np.object_:
# numpy creates a 0d object array if all else fails.
# Not interpretable as an array; return NotImplemented so that
# other's __rtruediv__ can kick in if that's implemented.
return NotImplemented
# Allow custom sparse class indicated by attr sparse gh-6520
try:
other.shape
except AttributeError:
other = other_a
if isscalarlike(other):
if rdivide:
return np.divide(other, self.todense())
if np.can_cast(self.dtype, np.float64):
return self.astype(np.float64, copy=False)._mul_scalar(1 / other)
else:
r = self._mul_scalar(1 / other)
scalar_dtype = np.asarray(other).dtype
if (np.issubdtype(self.dtype, np.integer) and
np.issubdtype(scalar_dtype, np.integer)):
return r.astype(self.dtype, copy=False)
else:
return r
elif isdense(other):
if rdivide:
return np.divide(other, self.todense())
return self.multiply(np.divide(1, other))
elif issparse(other):
if rdivide:
return other._divide(self, rdivide=False)
csr_self = (self if self.ndim < 3 else self.reshape(1, -1)).tocsr()
csr_other = (other if self.ndim < 3 else other.reshape(1, -1)).tocsr()
if np.can_cast(self.dtype, np.float64):
csr_self = csr_self.astype(np.float64, copy=False)
result = csr_self._divide_sparse(csr_other)
return result if self.ndim < 3 else result.reshape(self.shape)
else:
# not scalar, dense or sparse. Return NotImplemented so
# other's __rtruediv__ can kick in if that's implemented.
return NotImplemented
def __truediv__(self, other):
return self._divide(other)
def __rtruediv__(self, other):
# Implementing this as the inverse would be too magical -- bail out
return NotImplemented
def __neg__(self):
return -self.tocsr()
def __iadd__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __imul__(self, other):
return NotImplemented
def __itruediv__(self, other):
return NotImplemented
def __pow__(self, *args, **kwargs):
return self.power(*args, **kwargs)
def transpose(self, axes=None, copy=False):
"""
Reverses the dimensions of the sparse array/matrix.
Parameters
----------
axes : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value.
copy : bool, optional
Indicates whether or not attributes of `self` should be
copied whenever possible. The degree to which attributes
are copied varies depending on the type of sparse array/matrix
being used.
Returns
-------
p : `self` with the dimensions reversed.
Notes
-----
If `self` is a `csr_array` or a `csc_array`, then this will return a
`csc_array` or a `csr_array`, respectively.
See Also
--------
numpy.transpose : NumPy's implementation of 'transpose' for ndarrays
"""
return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
def conjugate(self, copy=True):
"""Element-wise complex conjugation.
If the array/matrix is of non-complex data type and `copy` is False,
this method does nothing and the data is not copied.
Parameters
----------
copy : bool, optional
If True, the result is guaranteed to not share data with self.
Returns
-------
A : The element-wise complex conjugate.
"""
if np.issubdtype(self.dtype, np.complexfloating):
return self.tocsr(copy=copy).conjugate(copy=False)
elif copy:
return self.copy()
else:
return self
def conj(self, copy=True):
return self.conjugate(copy=copy)
conj.__doc__ = conjugate.__doc__
def _real(self):
return self.tocsr()._real()
def _imag(self):
return self.tocsr()._imag()
def nonzero(self):
"""Nonzero indices of the array/matrix.
Returns a tuple of arrays (row,col) containing the indices
of the non-zero elements of the array.
Examples
--------
>>> from scipy.sparse import csr_array
>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> A.nonzero()
(array([0, 0, 1, 2, 2], dtype=int32), array([0, 1, 2, 0, 2], dtype=int32))
"""
# convert to COOrdinate format
A = self.tocoo()
nz_mask = A.data != 0
return tuple(idx[nz_mask] for idx in A.coords)
def _getcol(self, j):
"""Returns a copy of column j of the array, as an (m x 1) sparse
array (column vector).
"""
if self.ndim == 1:
raise ValueError("getcol not provided for 1d arrays. Use indexing A[j]")
# Subclasses should override this method for efficiency.
# Post-multiply by a (n x 1) column vector 'a' containing all zeros
# except for a_j = 1
N = self.shape[-1]
if j < 0:
j += N
if j < 0 or j >= N:
raise IndexError("index out of bounds")
col_selector = self._csc_container(([1], [[j], [0]]),
shape=(N, 1), dtype=self.dtype)
result = self @ col_selector
return result
def _getrow(self, i):
"""Returns a copy of row i of the array, as a (1 x n) sparse
array (row vector).
"""
if self.ndim == 1:
raise ValueError("getrow not meaningful for a 1d array")
# Subclasses should override this method for efficiency.
# Pre-multiply by a (1 x m) row vector 'a' containing all zeros
# except for a_i = 1
M = self.shape[0]
if i < 0:
i += M
if i < 0 or i >= M:
raise IndexError("index out of bounds")
row_selector = self._csr_container(([1], [[0], [i]]),
shape=(1, M), dtype=self.dtype)
return row_selector @ self
# The following dunder methods cannot be implemented.
#
# def __array__(self):
# # Sparse matrices rely on NumPy wrapping them in object arrays under
# # the hood to make unary ufuncs work on them. So we cannot raise
# # TypeError here - which would be handy to not give users object
# # arrays they probably don't want (they're looking for `.toarray()`).
# #
# # Conversion with `toarray()` would also break things because of the
# # behavior discussed above, plus we want to avoid densification by
# # accident because that can too easily blow up memory.
#
# def __array_ufunc__(self):
# # We cannot implement __array_ufunc__ due to mismatching semantics.
# # See gh-7707 and gh-7349 for details.
#
# def __array_function__(self):
# # We cannot implement __array_function__ due to mismatching semantics.
# # See gh-10362 for details.
def todense(self, order=None, out=None):
"""
Return a dense representation of this sparse array.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', which provides no ordering guarantees.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The
provided array must have the same shape and dtype as
the sparse array on which you are calling the method.
Returns
-------
arr : ndarray, 2-D
An array with the same shape and containing the same
data represented by the sparse array, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self._ascontainer(self.toarray(order=order, out=out))
def toarray(self, order=None, out=None):
"""
Return a dense ndarray representation of this sparse array/matrix.
Parameters
----------
order : {'C', 'F'}, optional
Whether to store multidimensional data in C (row-major)
or Fortran (column-major) order in memory. The default
is 'None', which provides no ordering guarantees.
Cannot be specified in conjunction with the `out`
argument.
out : ndarray, 2-D, optional
If specified, uses this array as the output buffer
instead of allocating a new array to return. The provided
array must have the same shape and dtype as the sparse
array/matrix on which you are calling the method. For most
sparse types, `out` is required to be memory contiguous
(either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-D
An array with the same shape and containing the same
data represented by the sparse array/matrix, with the requested
memory order. If `out` was passed, the same object is
returned after being modified in-place to contain the
appropriate values.
"""
return self.tocoo(copy=False).toarray(order=order, out=out)
# Any sparse array format deriving from _spbase must define one of
# tocsr or tocoo. The other conversion methods may be implemented for
# efficiency, but are not required.
def tocsr(self, copy=False):
"""Convert this array/matrix to Compressed Sparse Row format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant csr_array/matrix.
"""
return self.tocoo(copy=copy).tocsr(copy=False)
def todok(self, copy=False):
"""Convert this array/matrix to Dictionary Of Keys format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant dok_array/matrix.
"""
return self.tocoo(copy=copy).todok(copy=False)
def tocoo(self, copy=False):
"""Convert this array/matrix to COOrdinate format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant coo_array/matrix.
"""
return self.tocsr(copy=False).tocoo(copy=copy)
def tolil(self, copy=False):
"""Convert this array/matrix to List of Lists format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant lil_array/matrix.
"""
return self.tocsr(copy=False).tolil(copy=copy)
def todia(self, copy=False):
"""Convert this array/matrix to sparse DIAgonal format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant dia_array/matrix.
"""
return self.tocoo(copy=copy).todia(copy=False)
def tobsr(self, blocksize=None, copy=False):
"""Convert this array/matrix to Block Sparse Row format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant bsr_array/matrix.
When blocksize=(R, C) is provided, it will be used for construction of
the bsr_array/matrix.
"""
return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
def tocsc(self, copy=False):
"""Convert this array/matrix to Compressed Sparse Column format.
With copy=False, the data/indices may be shared between this array/matrix and
the resultant csc_array/matrix.
"""
return self.tocsr(copy=copy).tocsc(copy=False)
def copy(self):
"""Returns a copy of this array/matrix.
No data/indices will be shared between the returned value and current
array/matrix.
"""
return self.__class__(self, copy=True)
def sum(self, axis=None, dtype=None, out=None):
"""
Sum the array/matrix elements over a given axis.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the sum of all the array/matrix elements, returning a scalar
(i.e., `axis` = `None`).
dtype : dtype, optional
The type of the returned array/matrix and of the accumulator in which
the elements are summed. The dtype of `a` is used by default
unless `a` has an integer dtype of less precision than the default
platform integer. In that case, if `a` is signed then the platform
integer is used while if `a` is unsigned then an unsigned integer
of the same precision as the platform integer is used.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
sum_along_axis : np.matrix
A matrix with the same shape as `self`, with the specified
axis removed.
See Also
--------
numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
"""
axis = validateaxis(axis, ndim=self.ndim)
# Mimic numpy's casting.
res_dtype = get_sum_dtype(self.dtype)
# Note: all valid 1D axis values are canonically `None`.
if axis is None:
if self.nnz == 0:
return np.sum(self._ascontainer([0]), dtype=dtype or res_dtype, out=out)
return np.sum(self._ascontainer(_todata(self)), dtype=dtype, out=out)
elif isspmatrix(self):
# Ensure spmatrix sums stay 2D
new_shape = (1, self.shape[1]) if axis == (0,) else (self.shape[0], 1)
else:
new_shape = tuple(self.shape[i] for i in range(self.ndim) if i not in axis)
if out is None:
# create out array with desired dtype
out = self._ascontainer(np.zeros(new_shape, dtype=dtype or res_dtype))
else:
if out.shape != new_shape:
raise ValueError("out dimensions do not match shape")
if self.ndim > 2:
return self._sum_nd(axis, res_dtype, out)
# We use multiplication by a matrix of ones to sum.
# For some sparse array formats more efficient methods are
# possible -- these should override this function.
if axis == (0,):
ones = self._ascontainer(np.ones((1, self.shape[0]), dtype=res_dtype))
# sets dtype while loading into out
out[...] = (ones @ self).reshape(new_shape)
else: # axis == (1,)
ones = self._ascontainer(np.ones((self.shape[1], 1), dtype=res_dtype))
# sets dtype while loading into out
out[...] = (self @ ones).reshape(new_shape)
return out
def mean(self, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array/matrix elements. The average is taken
over all elements in the array/matrix by default, otherwise over the
specified axis. `float64` intermediate and return values are used
for integer inputs.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the mean is computed. The default is to compute
the mean of all elements in the array/matrix (i.e., `axis` = `None`).
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
.. versionadded:: 0.18.0
out : np.matrix, optional
Alternative output matrix in which to place the result. It must
have the same shape as the expected output, but the type of the
output values will be cast if necessary.
.. versionadded:: 0.18.0
Returns
-------
m : np.matrix
See Also
--------
numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
"""
axis = validateaxis(axis, ndim=self.ndim)
integral = (np.issubdtype(self.dtype, np.integer) or
np.issubdtype(self.dtype, np.bool_))
# intermediate dtype for summation
inter_dtype = np.float64 if integral else self.dtype
inter_self = self.astype(inter_dtype, copy=False)
if axis is None:
denom = math.prod(self.shape)
else:
denom = math.prod(self.shape[ax] for ax in axis)
res = (inter_self * (1.0 / denom)).sum(axis=axis, dtype=inter_dtype, out=out)
if dtype is not None and out is None:
return res.astype(dtype, copy=False)
return res
def diagonal(self, k=0):
"""Returns the kth diagonal of the array/matrix.
Parameters
----------
k : int, optional
Which diagonal to get, corresponding to elements a[i, i+k].
Default: 0 (the main diagonal).
.. versionadded:: 1.0
See also
--------
numpy.diagonal : Equivalent numpy function.
Examples
--------
>>> from scipy.sparse import csr_array
>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
>>> A.diagonal()
array([1, 0, 5])
>>> A.diagonal(k=1)
array([2, 3])
"""
return self.tocsr().diagonal(k=k)
def trace(self, offset=0):
"""Returns the sum along diagonals of the sparse array/matrix.
Parameters
----------
offset : int, optional
Which diagonal to get, corresponding to elements a[i, i+offset].
Default: 0 (the main diagonal).
"""
return self.diagonal(k=offset).sum()
def setdiag(self, values, k=0):
"""
Set diagonal or off-diagonal elements of the array/matrix.
Parameters
----------
values : array_like
New values of the diagonal elements.
Values may have any length. If the diagonal is longer than values,
then the remaining diagonal entries will not be set. If values are
longer than the diagonal, then the remaining values are ignored.
If a scalar value is given, all of the diagonal is set to it.
k : int, optional
Which off-diagonal to set, corresponding to elements a[i,i+k].
Default: 0 (the main diagonal).
"""
M, N = self.shape
if (k > 0 and k >= N) or (k < 0 and -k >= M):
raise ValueError("k exceeds array dimensions")
self._setdiag(np.asarray(values), k)
def _setdiag(self, values, k):
"""This part of the implementation gets overridden by the
different formats.
"""
M, N = self.shape
if k < 0:
if values.ndim == 0:
# broadcast
max_index = min(M+k, N)
for i in range(max_index):
self[i - k, i] = values
else:
max_index = min(M+k, N, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i - k, i] = v
else:
if values.ndim == 0:
# broadcast
max_index = min(M, N-k)
for i in range(max_index):
self[i, i + k] = values
else:
max_index = min(M, N-k, len(values))
if max_index <= 0:
return
for i, v in enumerate(values[:max_index]):
self[i, i + k] = v
def _process_toarray_args(self, order, out):
if out is not None:
if order is not None:
raise ValueError('order cannot be specified if out '
'is not None')
if out.shape != self.shape or out.dtype != self.dtype:
raise ValueError('out array must be same dtype and shape as '
'sparse array')
out[...] = 0.
return out
else:
return np.zeros(self.shape, dtype=self.dtype, order=order)
def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False):
"""
Determine index dtype for array.
This wraps _sputils.get_index_dtype, providing compatibility for both
array and matrix API sparse matrices. Matrix API sparse matrices would
attempt to downcast the indices - which can be computationally
expensive and undesirable for users. The array API changes this
behaviour.
See discussion: https://github.com/scipy/scipy/issues/16774
The get_index_dtype import is due to implementation details of the test
suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a
lower int32 max value for checks on the matrix API's downcasting
behaviour.
"""
from ._sputils import get_index_dtype
# Don't check contents for array API
return get_index_dtype(arrays,
maxval,
(check_contents and not isinstance(self, sparray)))
| _spbase |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_validators.py | {
"start": 645,
"end": 1638
} | class ____(UptimeTestCase):
def test(self) -> None:
assert (
compute_http_request_size(
"GET",
"https://sentry.io",
[("auth", "1234"), ("utf_text", "我喜欢哨兵正常运行时间监视器")],
"some body stuff",
)
== 111
)
assert (
compute_http_request_size(
"GET",
"https://sentry.io",
# Test same number of characters but ascii instead
[("auth", "1234"), ("non_utf_text", "abcdefghijklmn")],
"some body stuff",
)
== 87
)
assert (
compute_http_request_size(
"GET",
"https://sentry.io",
# Test same number of characters but ascii instead
[("auth", "1234"), ("non_utf_text", "abcdefghijklmn")],
None,
)
== 70
)
| ComputeHttpRequestSizeTest |
python | mahmoud__glom | glom/streaming.py | {
"start": 885,
"end": 12926
} | class ____:
"""``Iter()`` is glom's counterpart to Python's built-in :func:`iter()`
function. Given an iterable target, ``Iter()`` yields the result
of applying the passed spec to each element of the target, similar
to the built-in ``[]`` spec, but streaming.
The following turns a list of strings into integers using Iter(),
before deduplicating and converting it to a tuple:
>>> glom(['1', '2', '1', '3'], (Iter(int), set, tuple))
(1, 2, 3)
``Iter()`` also has many useful methods which can be chained to
compose a stream processing pipeline. The above can also be
written as:
>>> glom(['1', '2', '1', '3'], (Iter().map(int).unique(), tuple))
(1, 2, 3)
``Iter()`` also respects glom's :data:`~glom.SKIP` and
:data:`~glom.STOP` singletons for filtering and breaking
iteration.
Args:
subspec: A subspec to be applied on each element from the iterable.
sentinel: Keyword-only argument, which, when found in the
iterable stream, causes the iteration to stop. Same as with the
built-in :func:`iter`.
"""
def __init__(self, subspec=T, **kwargs):
self.subspec = subspec
self._iter_stack = kwargs.pop('_iter_stack', [])
self.sentinel = kwargs.pop('sentinel', STOP)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % sorted(kwargs))
return
def __repr__(self):
base_args = ()
if self.subspec != T:
base_args = (self.subspec,)
base = format_invocation(self.__class__.__name__, base_args, repr=bbrepr)
chunks = [base]
for fname, args, _ in reversed(self._iter_stack):
meth = getattr(self, fname)
fb = FunctionBuilder.from_func(meth)
fb.args = fb.args[1:] # drop self
arg_names = fb.get_arg_names()
# TODO: something fancier with defaults:
kwargs = []
if len(args) > 1 and arg_names:
args, kwargs = (), zip(arg_names, args)
chunks.append('.' + format_invocation(fname, args, kwargs, repr=bbrepr))
return ''.join(chunks)
def glomit(self, target, scope):
iterator = self._iterate(target, scope)
for _, _, callback in reversed(self._iter_stack):
iterator = callback(iterator, scope)
return iter(iterator)
def _iterate(self, target, scope):
iterate = scope[TargetRegistry].get_handler('iterate', target, path=scope[Path])
try:
iterator = iterate(target)
except Exception as e:
raise TypeError('failed to iterate on instance of type %r at %r (got %r)'
% (target.__class__.__name__, Path(*scope[Path]), e))
base_path = scope[Path]
for i, t in enumerate(iterator):
scope[Path] = base_path + [i]
yld = (t if self.subspec is T else scope[glom](t, self.subspec, scope))
if yld is SKIP:
continue
elif yld is self.sentinel or yld is STOP:
# NB: sentinel defaults to STOP so I was torn whether
# to also check for STOP, and landed on the side of
# never letting STOP through.
return
yield yld
return
def _add_op(self, opname, args, callback):
return type(self)(subspec=self.subspec, _iter_stack=[(opname, args, callback)] + self._iter_stack)
def map(self, subspec):
"""Return a new :class:`Iter()` spec which will apply the provided
*subspec* to each element of the iterable.
>>> glom(range(5), Iter().map(lambda x: x * 2).all())
[0, 2, 4, 6, 8]
Because a spec can be a callable, :meth:`Iter.map()` does
everything the built-in :func:`map` does, but with the full
power of glom specs.
>>> glom(['a', 'B', 'C'], Iter().map(T.islower()).all())
[True, False, False]
"""
# whatever validation you want goes here
# TODO: DRY the self._add_op with a decorator?
return self._add_op(
'map',
(subspec,),
lambda iterable, scope: imap(
lambda t: scope[glom](t, subspec, scope), iterable))
def filter(self, key=T):
"""Return a new :class:`Iter()` spec which will include only elements matching the
given *key*.
>>> glom(range(6), Iter().filter(lambda x: x % 2).all())
[1, 3, 5]
Because a spec can be a callable, :meth:`Iter.filter()` does
everything the built-in :func:`filter` does, but with the full
power of glom specs. For even more power, combine,
:meth:`Iter.filter()` with :class:`Check()`.
>>> # PROTIP: Python's ints know how many binary digits they require, using the bit_length method
>>> glom(range(9), Iter().filter(Check(T.bit_length(), one_of=(2, 4), default=SKIP)).all())
[2, 3, 8]
"""
# NB: Check's validate function defaults to bool, and
# *default* is returned on access errors as well validation
# errors, so the lambda passed to ifilter below works fine.
check_spec = key if isinstance(key, Check) else Check(key, default=SKIP)
return self._add_op(
'filter',
(key,),
lambda iterable, scope: ifilter(
lambda t: scope[glom](t, check_spec, scope) is not SKIP, iterable))
def chunked(self, size, fill=_MISSING):
"""Return a new :class:`Iter()` spec which groups elements in the iterable
into lists of length *size*.
If the optional *fill* argument is provided, iterables not
evenly divisible by *size* will be padded out by the *fill*
constant. Otherwise, the final chunk will be shorter than *size*.
>>> list(glom(range(10), Iter().chunked(3)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(glom(range(10), Iter().chunked(3, fill=None)))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]]
"""
kw = {'size': size}
args = size,
if fill is not _MISSING:
kw['fill'] = fill
args += (fill,)
return self._add_op(
'chunked', args, lambda it, scope: chunked_iter(it, **kw))
def windowed(self, size):
"""Return a new :class:`Iter()` spec which will yield a sliding window of
adjacent elements in the iterable. Each tuple yielded will be
of length *size*.
Useful for getting adjacent pairs and triples.
>>> list(glom(range(4), Iter().windowed(2)))
[(0, 1), (1, 2), (2, 3)]
"""
return self._add_op(
'windowed', (size,), lambda it, scope: windowed_iter(it, size))
def split(self, sep=None, maxsplit=None):
"""Return a new :class:`Iter()` spec which will lazily split an iterable based
on a separator (or list of separators), *sep*. Like
:meth:`str.split()`, but for all iterables.
``split_iter()`` yields lists of non-separator values. A separator will
never appear in the output.
>>> target = [1, 2, None, None, 3, None, 4, None]
>>> list(glom(target, Iter().split()))
[[1, 2], [3], [4]]
Note that ``split_iter`` is based on :func:`str.split`, so if
*sep* is ``None``, ``split()`` **groups** separators. If empty lists
are desired between two contiguous ``None`` values, simply use
``sep=[None]``:
>>> list(glom(target, Iter().split(sep=[None])))
[[1, 2], [], [3], [4], []]
A max number of splits may also be set:
>>> list(glom(target, Iter().split(maxsplit=2)))
[[1, 2], [3], [4, None]]
"""
return self._add_op(
'split',
(sep, maxsplit),
lambda it, scope: split_iter(it, sep=sep, maxsplit=maxsplit))
def flatten(self):
"""Returns a new :class:`Iter()` instance which combines iterables into a
single iterable.
>>> target = [[1, 2], [3, 4], [5]]
>>> list(glom(target, Iter().flatten()))
[1, 2, 3, 4, 5]
"""
return self._add_op(
'flatten',
(),
lambda it, scope: chain.from_iterable(it))
def unique(self, key=T):
"""Return a new :class:`Iter()` spec which lazily filters out duplicate
values, i.e., only the first appearance of a value in a stream will
be yielded.
>>> target = list('gloMolIcious')
>>> out = list(glom(target, Iter().unique(T.lower())))
>>> print(''.join(out))
gloMIcus
"""
return self._add_op(
'unique',
(key,),
lambda it, scope: unique_iter(it, key=lambda t: scope[glom](t, key, scope)))
def slice(self, *args):
"""Returns a new :class:`Iter()` spec which trims iterables in the
same manner as :func:`itertools.islice`.
>>> target = [0, 1, 2, 3, 4, 5]
>>> glom(target, Iter().slice(3).all())
[0, 1, 2]
>>> glom(target, Iter().slice(2, 4).all())
[2, 3]
This method accepts only positional arguments.
"""
# TODO: make a kwarg-compatible version of this (islice takes no kwargs)
# TODO: also support slice syntax Iter()[::]
try:
islice([], *args)
except TypeError:
raise TypeError(f'invalid slice arguments: {args!r}')
return self._add_op('slice', args, lambda it, scope: islice(it, *args))
def limit(self, count):
"""A convenient alias for :meth:`~Iter.slice`, which takes a single
argument, *count*, the max number of items to yield.
"""
return self._add_op('limit', (count,), lambda it, scope: islice(it, count))
def takewhile(self, key=T):
"""Returns a new :class:`Iter()` spec which stops the stream once
*key* becomes falsy.
>>> glom([3, 2, 0, 1], Iter().takewhile().all())
[3, 2]
:func:`itertools.takewhile` for more details.
"""
return self._add_op(
'takewhile',
(key,),
lambda it, scope: takewhile(
lambda t: scope[glom](t, key, scope), it))
def dropwhile(self, key=T):
"""Returns a new :class:`Iter()` spec which drops stream items until
*key* becomes falsy.
>>> glom([0, 0, 3, 2, 0], Iter().dropwhile(lambda t: t < 1).all())
[3, 2, 0]
Note that while similar to :meth:`Iter.filter()`, the filter
only applies to the beginning of the stream. In a way,
:meth:`Iter.dropwhile` can be thought of as
:meth:`~str.lstrip()` for streams. See
:func:`itertools.dropwhile` for more details.
"""
return self._add_op(
'dropwhile',
(key,),
lambda it, scope: dropwhile(
lambda t: scope[glom](t, key, scope), it))
# Terminal methods follow
def all(self):
"""A convenience method which returns a new spec which turns an
iterable into a list.
>>> glom(range(5), Iter(lambda t: t * 2).all())
[0, 2, 4, 6, 8]
Note that this spec will always consume the whole iterable, and as
such, the spec returned is *not* an :class:`Iter()` instance.
"""
return Pipe(self, list)
def first(self, key=T, default=None):
"""A convenience method for lazily yielding a single truthy item from
an iterable.
>>> target = [False, 1, 2, 3]
>>> glom(target, Iter().first())
1
This method takes a condition, *key*, which can also be a
glomspec, as well as a *default*, in case nothing matches the
condition.
As this spec yields at most one item, and not an iterable, the
spec returned from this method is not an :class:`Iter()` instance.
"""
return (self, First(key=key, default=default))
| Iter |
python | pytorch__pytorch | test/functorch/common_utils.py | {
"start": 19353,
"end": 20829
} | class ____:
def __enter__(self):
self.prev_state = torch._C._functorch._is_vmap_fallback_enabled()
torch._C._functorch._set_vmap_fallback_enabled(False)
def __exit__(self, *ignored):
torch._C._functorch._set_vmap_fallback_enabled(self.prev_state)
def check_vmap_fallback(test_case, thunk, opinfo, dry_run=False):
try:
with DisableVmapFallback():
thunk()
except Exception:
if not dry_run:
raise
if opinfo.variant_test_name:
print(f"xfail('{opinfo.name}', '{opinfo.variant_test_name}'),")
else:
print(f"xfail('{opinfo.name}'),")
def saved_tensors_hooks_to_gm(
pack_fn, unpack_fn, pack_cache_hash, unpack_cache_hash, symbolic_tracing=True
):
if symbolic_tracing:
pack_gm = torch.fx.symbolic_trace(pack_fn)
unpack_gm = torch.fx.symbolic_trace(unpack_fn)
else:
from torch.functorch import make_fx
inp = torch.randn(2, 3)
torch._dynamo.mark_dynamic(inp, 0)
torch._dynamo.mark_dynamic(inp, 1)
pack_out = pack_fn(inp)
pack_gm = make_fx(pack_fn)(inp)
unpack_gm = make_fx(unpack_fn)(pack_out)
def set_manual_hash(g, manual_hash):
node = next(iter(g.nodes))
node.meta["user_cache_hash"] = manual_hash
set_manual_hash(pack_gm.graph, pack_cache_hash)
set_manual_hash(unpack_gm.graph, unpack_cache_hash)
return pack_gm, unpack_gm
| DisableVmapFallback |
python | django__django | tests/model_meta/models.py | {
"start": 5169,
"end": 5290
} | class ____(models.Model):
class Meta:
swappable = "MODEL_META_TESTS_SWAPPED"
# ParentListTests models
| Swappable |
python | getsentry__sentry | tests/sentry/runner/commands/test_backup.py | {
"start": 14190,
"end": 15325
} | class ____(TestCase):
"""
Test success cases of the `sentry backup sanitize` CLI command.
"""
def test_sanitize(self) -> None:
with TemporaryDirectory() as tmp_dir:
tmp_sanitized_path = Path(tmp_dir).joinpath("sanitized.json")
rv = CliRunner().invoke(
backup,
[
"sanitize",
str(tmp_sanitized_path),
"--src",
GOOD_FILE_PATH,
],
)
assert rv.exit_code == 0, rv.output
def test_sanitize_with_datetime_offset(self) -> None:
with TemporaryDirectory() as tmp_dir:
tmp_sanitized_path = Path(tmp_dir).joinpath("sanitized.json")
rv = CliRunner().invoke(
backup,
[
"sanitize",
str(tmp_sanitized_path),
"--src",
GOOD_FILE_PATH,
"--days-offset",
"-1234",
],
)
assert rv.exit_code == 0, rv.output
| GoodSanitizeCommandTests |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/quality-testing/unit-testing-assets-and-ops/op-config.py | {
"start": 36,
"end": 412
} | class ____(dg.Config):
path: str
@dg.op
def load_file(config: FilepathConfig) -> str:
with open(config.path) as file:
return file.read()
# end_file
# start_test
def test_load_file() -> None:
assert load_file(FilepathConfig(path="path1.txt")) == "contents1"
assert load_file(FilepathConfig(path="path2.txt")) == "contents2"
# end_test
| FilepathConfig |
python | huggingface__transformers | src/transformers/models/cohere2_vision/processing_cohere2_vision.py | {
"start": 1168,
"end": 9447
} | class ____(ProcessorMixin):
r"""
Constructs a Cohere2Vision processor which wraps a [`AutoImageProcessor`] and
[`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
tokenizer functionalities. See the [`~Cohere2VisionProcessor.__call__`] and [`~Cohere2VisionProcessor.decode`] for more information.
Args:
image_processor ([`AutoImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(
self,
image_processor=None,
tokenizer=None,
chat_template=None,
**kwargs,
):
super().__init__(image_processor, tokenizer, chat_template=chat_template)
self.patch_size = self.image_processor.patch_size
self.boi_token = tokenizer.boi_token
self.eoi_token = tokenizer.eoi_token
self.image_token = tokenizer.image_token
self.img_line_break_token = tokenizer.img_line_break_token
self.image_token_id = tokenizer.image_token_id
self.image_ids = tokenizer.convert_tokens_to_ids(
[
self.image_token,
self.boi_token,
self.eoi_token,
self.img_line_break_token,
]
)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
**kwargs: Unpack[Cohere2VisionProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text.
To prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to
GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None:
raise ValueError("You have to specify text.")
elif not isinstance(text, (list, tuple)):
text = [text]
output_kwargs = self._merge_kwargs(
Cohere2VisionProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
# Process images
image_inputs = {}
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
batch_num_patches = iter(image_inputs.pop("num_patches"))
processed_text = []
for sample in text:
while self.image_token in sample:
num_patches = next(batch_num_patches)
img_patches_per_tile = int(self.patch_size**2)
img_string = f"{self.boi_token}"
for idx in range(1, num_patches):
img_string += "<placeholder>" * img_patches_per_tile + self.img_line_break_token
img_string += "<placeholder>" * img_patches_per_tile + self.img_line_break_token
img_string += f"{self.eoi_token}"
sample = sample.replace(self.image_token, img_string, 1)
processed_text.append(sample)
text = [sample.replace("<placeholder>", self.image_token) for sample in processed_text]
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"], return_tensors=None)
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Cohere2VisionProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
token_per_patch = int(self.patch_size**2)
num_image_tokens = [
2 + sum(token_per_patch + 1 for _ in range(num_patches)) for num_patches in num_image_patches
] # Add +2 and +1 for BOI/EOI and image break tokens
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names) + list(image_processor_input_names)
__all__ = ["Cohere2VisionProcessor"]
| Cohere2VisionProcessor |
python | ray-project__ray | rllib/core/learner/tests/test_learner_group.py | {
"start": 4266,
"end": 9375
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_learner_group_build_from_algorithm_config(self):
"""Tests whether we can build a learner_groupobject from algorithm_config."""
env = gym.make("CartPole-v1")
# Config that has its own learner class and RLModule spec.
config = BaseTestingAlgorithmConfig()
learner_group = config.build_learner_group(env=env)
print(learner_group)
learner_group.shutdown()
# Config for which user defines custom learner class and RLModule spec.
config = (
BaseTestingAlgorithmConfig()
.training(learner_class=BCTorchLearner)
.rl_module(
rl_module_spec=RLModuleSpec(
module_class=DiscreteBCTorchModule,
model_config={"fcnet_hiddens": [32]},
)
)
)
learner_group = config.build_learner_group(env=env)
print(learner_group)
learner_group.shutdown()
def test_update_multi_gpu(self):
return
scaling_modes = ["multi-gpu-ddp", "remote-gpu"]
for scaling_mode in scaling_modes:
print(f"Testing scaling mode: {scaling_mode}.")
env = gym.make("CartPole-v1")
config_overrides = REMOTE_CONFIGS[scaling_mode]
config = BaseTestingAlgorithmConfig().update_from_dict(config_overrides)
learner_group = config.build_learner_group(env=env)
min_loss = float("inf")
for iter_i in range(1000):
results = learner_group.update(episodes=FAKE_EPISODES)
loss = np.mean(
[res[ALL_MODULES][Learner.TOTAL_LOSS_KEY] for res in results]
)
min_loss = min(loss, min_loss)
print(f"[iter = {iter_i}] Loss: {loss:.3f}, Min Loss: {min_loss:.3f}")
# The loss is initially around 0.69 (ln2). When it gets to around
# 0.57 the return of the policy gets to around 100.
if min_loss < 0.57:
break
for res1, res2 in zip(results, results[1:]):
self.assertEqual(
res1[DEFAULT_MODULE_ID]["mean_weight"],
res2[DEFAULT_MODULE_ID]["mean_weight"],
)
self.assertLess(min_loss, 0.57)
# Make sure the learner_group resources are freed up so that we don't
# autoscale.
learner_group.shutdown()
del learner_group
def test_add_module_and_remove_module(self):
scaling_modes = ["local-cpu", "multi-cpu-ddp"]
for scaling_mode in scaling_modes:
print(f"Testing scaling mode: {scaling_mode}.")
ma_env = MultiAgentCartPole({"num_agents": 2})
config_overrides = REMOTE_CONFIGS.get(scaling_mode) or LOCAL_CONFIGS.get(
scaling_mode
)
config = (
BCConfig()
.update_from_dict(config_overrides)
.multi_agent(
policies={"p0"},
policy_mapping_fn=lambda aid, *ar, **kw: f"p{aid}",
)
.rl_module(
rl_module_spec=MultiRLModuleSpec(
rl_module_specs={"p0": RLModuleSpec()},
)
)
)
learner_group = config.build_learner_group(env=ma_env)
# Update once with the default policy.
learner_group.update(episodes=FAKE_MA_EPISODES_WO_P1)
# Add a test_module.
learner_group.add_module(
module_id="p1",
module_spec=config.get_multi_rl_module_spec(env=ma_env).module_specs[
"p0"
],
)
# Do training that includes the test_module.
results = learner_group.update(episodes=FAKE_MA_EPISODES)
# check that module ids are updated to include the new module
module_ids_after_add = {"p0", "p1"}
# Compare module IDs in results with expected ones.
self.assertEqual(
set(results[0].keys()) - {ALL_MODULES}, module_ids_after_add
)
# Remove the test_module.
learner_group.remove_module(module_id="p1")
# Run training without the test_module.
results = learner_group.update(episodes=FAKE_MA_EPISODES_WO_P1)
# check that module ids are updated after remove operation to not
# include the new module
# remove the total_loss key since its not a module key
self.assertEqual(set(results[0].keys()) - {ALL_MODULES}, {"p0"})
# make sure the learner_group resources are freed up so that we don't
# autoscale
learner_group.shutdown()
del learner_group
| TestLearnerGroupSyncUpdate |
python | pytorch__pytorch | torch/distributed/tensor/_dtensor_spec.py | {
"start": 1834,
"end": 2131
} | class ____(NamedTuple):
# simple named tuple to represent tensor metadata
# intentionally to stay simple only for sharding
# propagation purposes.
shape: torch.Size
stride: tuple[int, ...]
dtype: torch.dtype
# used internally to propagate the placements
@dataclass
| TensorMeta |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/tools/_beta_functions.py | {
"start": 6046,
"end": 6644
} | class ____(BaseFunctionTool[FunctionT]):
def call(self, input: object) -> BetaFunctionToolResultType:
if iscoroutinefunction(self.func):
raise RuntimeError("Cannot call a coroutine function synchronously. Use `@async_tool` instead.")
if not is_dict(input):
raise TypeError(f"Input must be a dictionary, got {type(input).__name__}")
try:
return self._func_with_validate(**cast(Any, input))
except pydantic.ValidationError as e:
raise ValueError(f"Invalid arguments for function {self.name}") from e
| BetaFunctionTool |
python | pyca__cryptography | tests/hazmat/primitives/test_sm4.py | {
"start": 1686,
"end": 2184
} | class ____:
test_ofb = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "SM4"),
["draft-ribose-cfrg-sm4-10-ofb.txt"],
lambda key, **kwargs: algorithms.SM4(binascii.unhexlify(key)),
lambda iv, **kwargs: OFB(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.SM4(b"\x00" * 16), CFB(b"\x00" * 16)
),
skip_message="Does not support SM4 CFB",
)
| TestSM4ModeOFB |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 62776,
"end": 63838
} | class ____(TestCase):
"""Tests for repeat_each()"""
def test_default(self):
actual = list(mi.repeat_each('ABC'))
expected = ['A', 'A', 'B', 'B', 'C', 'C']
self.assertEqual(actual, expected)
def test_basic(self):
actual = list(mi.repeat_each('ABC', 3))
expected = ['A', 'A', 'A', 'B', 'B', 'B', 'C', 'C', 'C']
self.assertEqual(actual, expected)
def test_empty(self):
actual = list(mi.repeat_each(''))
expected = []
self.assertEqual(actual, expected)
def test_no_repeat(self):
actual = list(mi.repeat_each('ABC', 0))
expected = []
self.assertEqual(actual, expected)
def test_negative_repeat(self):
actual = list(mi.repeat_each('ABC', -1))
expected = []
self.assertEqual(actual, expected)
def test_infinite_input(self):
repeater = mi.repeat_each(cycle('AB'))
actual = mi.take(6, repeater)
expected = ['A', 'A', 'B', 'B', 'A', 'A']
self.assertEqual(actual, expected)
| RepeatEachTests |
python | neetcode-gh__leetcode | python/1642-furthest-building-you-can-reach.py | {
"start": 0,
"end": 540
} | class ____:
def furthestBuilding(self, heights: List[int], bricks: int, ladders: int) -> int:
heap = []
for i in range(len(heights) - 1):
diff = heights[i + 1] - heights[i]
if diff <= 0:
continue
bricks -= diff
heapq.heappush(heap, -diff)
if bricks < 0:
if ladders == 0:
return i
ladders -= 1
bricks += -heapq.heappop(heap)
return len(heights) - 1
| Solution |
python | pytorch__pytorch | test/quantization/core/test_workflow_ops.py | {
"start": 53276,
"end": 63882
} | class ____(TestCase):
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
sampled_dtype=st.sampled_from(['bf16', 'fp16', 'fp32']),
symmetric_quant=st.booleans(), use_bool=st.booleans())
@settings(deadline=None)
def test_fused_obs_fake_quant_moving_avg(self, device, sampled_dtype, symmetric_quant, use_bool) -> None:
"""
Tests the case where we call the fused_obs_fake_quant op multiple times
and update the running_min and max of the activation tensors.
"""
if device == "cpu":
sampled_dtype = "fp32"
dtype = {'bf16' : torch.bfloat16, 'fp16' : torch.half, 'fp32' : torch.float32}[sampled_dtype]
in_running_min_ref = out_running_min_ref = torch.tensor(float("inf"), dtype=dtype)
in_running_min_op = torch.tensor(float("inf"), dtype=dtype, device=device)
in_running_max_ref = out_running_max_ref = torch.tensor(float("-inf"), dtype=dtype)
in_running_max_op = torch.tensor(float("-inf"), dtype=dtype, device=device)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
observer_on = fake_quant_on = False if use_bool else 0
pt_op = torch.fused_moving_avg_obs_fake_quant
# enable observer after 2 iterations and fake_quant after 4 iterations
for i in range(10):
if i > 2:
observer_on = True if use_bool else 1
if i > 4:
fake_quant_on = True if use_bool else 1
x = torch.randn(5, 5, dtype=dtype, device=device)
out = pt_op(
x,
torch.tensor(observer_on, device=device),
torch.tensor(fake_quant_on, device=device),
in_running_min_op,
in_running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
symmetric_quant,
)
if observer_on:
(
in_running_min_ref,
in_running_max_ref,
) = _get_tensor_min_max(
x,
running_min=in_running_min_ref,
running_max=in_running_max_ref,
averaging_const=0.01,
dtype=dtype,
)
if fake_quant_on:
x_scale, x_zero_point = _get_scale_zp(
in_running_min_ref,
in_running_max_ref,
torch.quint8,
preserve_sparsity=symmetric_quant,
)
x_in = _fake_quantize_per_tensor_affine_reference(
x, x_scale, x_zero_point, 0, 255
)
self.assertEqual(scale, x_scale)
self.assertEqual(zero_point, x_zero_point)
else:
x_in = x
self.assertEqual(in_running_min_ref, in_running_min_op)
self.assertEqual(in_running_max_ref, in_running_max_op)
torch.testing.assert_close(out, x_in)
# Test empty input works
x = torch.empty(0, 5, dtype=dtype, device=device)
out = pt_op(
x,
torch.tensor(1, device=device),
torch.tensor(1, device=device),
in_running_min_op,
in_running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
False,
symmetric_quant,
)
output_shape = (0, 5)
self.assertEqual(out.shape, output_shape)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),
symmetric_quant=st.booleans(), use_bool=st.booleans())
@settings(deadline=None)
def test_fused_obs_fake_quant_moving_avg_per_channel(self, device, symmetric_quant, use_bool) -> None:
"""
Tests the case where we call the fused_obs_fake_quant op multiple times
and update the running_min and max of the activation tensors.
"""
m = 5
sizes = [[5, 5], [5, 4, 3]]
for size in sizes:
in_running_min_ref = torch.empty(m, device=device).fill_(float("inf"))
in_running_min_op = torch.empty(m, device=device).fill_(float("inf"))
in_running_max_ref = torch.empty(m, device=device).fill_(float("-inf"))
in_running_max_op = torch.empty(m, device=device).fill_(float("-inf"))
avg_const = 0.01
scale = torch.empty(m, device=device).fill_(0.1)
zero_point = torch.empty(m, dtype=torch.int, device=device).fill_(0)
observer_on = fake_quant_on = False if use_bool else 0
pt_op = torch.fused_moving_avg_obs_fake_quant
# enable observer after 2 iterations and fake_quant after 4 iterations
for i in range(10):
if i > 2:
observer_on = True if use_bool else 1
if i > 4:
fake_quant_on = True if use_bool else 1
x = torch.randn(size, device=device)
out = pt_op(
x,
torch.tensor(observer_on, device=device),
torch.tensor(fake_quant_on, device=device),
in_running_min_op,
in_running_max_op,
scale,
zero_point,
avg_const,
0,
255,
0,
True, # per_channel_enabled
symmetric_quant,
)
if observer_on:
(
in_running_min_ref,
in_running_max_ref,
) = _get_per_row_min_max(x, in_running_min_ref, in_running_max_ref)
if fake_quant_on:
x_scale = torch.empty(m, device=device)
x_zero_point = torch.empty(m, dtype=torch.int, device=device)
for i in range(x_scale.numel()):
x_scale[i], x_zero_point[i] = _get_scale_zp(
in_running_min_ref[i].item(),
in_running_max_ref[i].item(),
torch.quint8,
preserve_sparsity=symmetric_quant,
)
x_in = _fake_quantize_per_channel_affine_reference(
x, x_scale, x_zero_point, 0, 0, 255
)
self.assertEqual(scale, x_scale)
self.assertEqual(zero_point, x_zero_point)
else:
x_in = x
self.assertEqual(in_running_min_ref, in_running_min_op)
self.assertEqual(in_running_max_ref, in_running_max_op)
torch.testing.assert_close(out, x_in)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),)
@settings(deadline=None)
def test_fused_obs_fake_quant_backward_op(self, device) -> None:
n = m = k = 10
input_shape = (m, n)
output_shape = (m, n)
x = torch.randn(input_shape, device=device, requires_grad=True)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
x_min, x_max = _get_tensor_min_max(x)
x_scale, x_zero_point = _get_scale_zp(
x_min, x_max, torch.quint8
)
x_scale = torch.tensor(x_scale, device=device)
x_zero_point = torch.tensor(x_zero_point, dtype=torch.int, device=device)
x_fake_quant = torch.fake_quantize_per_tensor_affine(
x, x_scale, x_zero_point, 0, 255
)
pt_op = torch.fused_moving_avg_obs_fake_quant
out = pt_op(
x,
torch.tensor(1, device=device),
torch.tensor(1, device=device),
torch.tensor(x_min, device=device),
torch.tensor(x_max, device=device),
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# verify the output matches
torch.testing.assert_close(out, x_fake_quant)
# verify the gradient matches expectation of fake_quant op
dout = torch.rand_like(x, dtype=torch.float).to(device)
out.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, x, x_scale, x_zero_point, 0, 255)
self.assertEqual(dX, x.grad)
self.assertTrue(x.grad.dtype == torch.float32)
@given(device=st.sampled_from(['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']),)
@settings(deadline=None)
def test_fused_backward_op_fake_quant_off(self, device) -> None:
n = m = 4
input_shape = (m, n)
output_shape = (m, n)
x = torch.randn(input_shape, device=device, requires_grad=True)
avg_const = 0.01
scale = torch.tensor([1.0], device=device)
zero_point = torch.tensor([0], dtype=torch.int, device=device)
x_min, x_max = _get_tensor_min_max(x)
x_scale, x_zero_point = _get_scale_zp(
x_min, x_max, torch.quint8
)
pt_op = torch.fused_moving_avg_obs_fake_quant
out = pt_op(
x,
torch.tensor(0, device=device),
torch.tensor(0, device=device),
torch.tensor(x_min, device=device),
torch.tensor(x_max, device=device),
scale,
zero_point,
avg_const,
0,
255,
0,
False,
)
# verify the output matches
torch.testing.assert_close(out, x)
# verify the gradient matches expectation of fake_quant op
dout = torch.rand_like(x, dtype=torch.float).to(device)
out.backward(dout)
dX = _fake_quantize_per_tensor_affine_grad_reference(
dout, x, x_scale, x_zero_point, 0, 255)
self.assertEqual(dX, x.grad)
self.assertTrue(x.grad.dtype == torch.float32)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_quantization.py TESTNAME\n\n"
"instead.")
| TestFusedObsFakeQuant |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 366009,
"end": 366448
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "project", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
project = sgqlc.types.Field("Project", graphql_name="project")
repository = sgqlc.types.Field("Repository", graphql_name="repository")
| LinkRepositoryToProjectPayload |
python | pandas-dev__pandas | asv_bench/benchmarks/join_merge.py | {
"start": 10360,
"end": 10963
} | class ____:
params = [4_000_000, 1_000_000]
param_names = ["unique_elements"]
def setup(self, unique_elements):
N = 1_000_000
self.left = DataFrame({"a": np.random.randint(1, unique_elements, (N,))})
self.right = DataFrame({"a": np.random.randint(1, unique_elements, (N,))})
uniques = self.right.a.drop_duplicates()
self.right["a"] = concat(
[uniques, Series(np.arange(0, -(N - len(uniques)), -1))], ignore_index=True
)
def time_unique_merge(self, unique_elements):
merge(self.left, self.right, how="inner")
| UniqueMerge |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 187077,
"end": 187160
} | class ____(_Int8RangeTests, _RangeTypeCompilation):
pass
| Int8RangeCompilationTest |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_clsregistry.py | {
"start": 947,
"end": 12153
} | class ____(fixtures.TestBase):
__requires__ = ("predictable_gc",)
def test_same_module_same_name(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.bar.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
gc_collect()
with expect_warnings(
"This declarative base already contains a class with the "
"same class name and module name as foo.bar.Foo, and "
"will be replaced in the string-lookup table."
):
clsregistry._add_class(
"Foo",
f2,
base._class_registry,
)
def test_resolve(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
gc_collect()
is_(resolver("foo.bar.Foo")(), f1)
is_(resolver("foo.alt.Foo")(), f2)
is_(name_resolver("foo.bar.Foo")(), f1)
is_(name_resolver("foo.alt.Foo")(), f2)
def test_fragment_resolve(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
f3 = MockClass(base, "bat.alt.Hoho")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
clsregistry._add_class("HoHo", f3, base._class_registry)
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
gc_collect()
is_(resolver("bar.Foo")(), f1)
is_(resolver("alt.Foo")(), f2)
is_(name_resolver("bar.Foo")(), f1)
is_(name_resolver("alt.Foo")(), f2)
def test_fragment_ambiguous(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
f3 = MockClass(base, "bat.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
clsregistry._add_class("Foo", f3, base._class_registry)
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
gc_collect()
assert_raises_message(
exc.InvalidRequestError,
'Multiple classes found for path "alt.Foo" in the registry '
"of this declarative base. Please use a fully "
"module-qualified path.",
resolver("alt.Foo"),
)
assert_raises_message(
exc.InvalidRequestError,
'Multiple classes found for path "alt.Foo" in the registry '
"of this declarative base. Please use a fully "
"module-qualified path.",
name_resolver("alt.Foo"),
)
@testing.combinations(
("NonExistentFoo",),
("nonexistent.Foo",),
("existent.nonexistent.Foo",),
("existent.NonExistentFoo",),
("nonexistent.NonExistentFoo",),
("existent.existent.NonExistentFoo",),
argnames="name",
)
def test_name_resolution_failures(self, name, registry):
Base = registry.generate_base()
f1 = MockClass(registry, "existent.Foo")
f2 = MockClass(registry, "existent.existent.Foo")
clsregistry._add_class("Foo", f1, registry._class_registry)
clsregistry._add_class("Foo", f2, registry._class_registry)
class MyClass(Base):
__tablename__ = "my_table"
id = Column(Integer, primary_key=True)
foo = relationship(name)
with expect_raises_message(
exc.InvalidRequestError,
r"When initializing mapper .*MyClass.*, expression '%s' "
r"failed to locate a name" % (name,),
):
registry.configure()
def test_no_fns_in_name_resolve(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
gc_collect()
import sqlalchemy
is_(
resolver("__import__('sqlalchemy.util').util.EMPTY_SET")(),
sqlalchemy.util.EMPTY_SET,
)
assert_raises_message(
exc.InvalidRequestError,
r"When initializing mapper some_parent, expression "
r"\"__import__\('sqlalchemy.util'\).util.EMPTY_SET\" "
"failed to locate a name",
name_resolver("__import__('sqlalchemy.util').util.EMPTY_SET"),
)
def test_resolve_dupe_by_name(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
gc_collect()
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
resolver = resolver("Foo")
assert_raises_message(
exc.InvalidRequestError,
'Multiple classes found for path "Foo" in the '
"registry of this declarative base. Please use a "
"fully module-qualified path.",
resolver,
)
resolver = name_resolver("Foo")
assert_raises_message(
exc.InvalidRequestError,
'Multiple classes found for path "Foo" in the '
"registry of this declarative base. Please use a "
"fully module-qualified path.",
resolver,
)
def test_dupe_classes_back_to_one(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
del f2
gc_collect()
# registry restores itself to just the one class
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
f_resolver = resolver("Foo")
is_(f_resolver(), f1)
f_resolver = name_resolver("Foo")
is_(f_resolver(), f1)
def test_dupe_classes_cleanout(self):
# force this to maintain isolation between tests
clsregistry._registries.clear()
base = registry()
for i in range(3):
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
eq_(len(clsregistry._registries), 11)
del f1
del f2
gc_collect()
eq_(len(clsregistry._registries), 0)
def test_dupe_classes_name_race(self):
"""test the race condition that the class was garbage "
"collected while being resolved from a dupe class."""
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
f2 = MockClass(base, "foo.alt.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
clsregistry._add_class("Foo", f2, base._class_registry)
dupe_reg = base._class_registry["Foo"]
dupe_reg.contents = [lambda: None]
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
f_resolver = resolver("Foo")
assert_raises_message(
exc.InvalidRequestError,
r"When initializing mapper some_parent, expression "
r"'Foo' failed to locate a name \('Foo'\).",
f_resolver,
)
f_resolver = name_resolver("Foo")
assert_raises_message(
exc.InvalidRequestError,
r"When initializing mapper some_parent, expression "
r"'Foo' failed to locate a name \('Foo'\).",
f_resolver,
)
def test_module_reg_cleanout_race(self):
"""test the race condition that a class was gc'ed as we tried
to look it up by module name."""
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
reg = base._class_registry["_sa_module_registry"]
mod_entry = reg["foo"]["bar"]
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
f_resolver = resolver("foo")
del mod_entry.contents["Foo"]
assert_raises_message(
NameError,
"Module 'bar' has no mapped classes registered "
"under the name 'Foo'",
lambda: f_resolver().bar.Foo,
)
f_resolver = name_resolver("foo")
assert_raises_message(
NameError,
"Module 'bar' has no mapped classes registered "
"under the name 'Foo'",
lambda: f_resolver().bar.Foo,
)
def test_module_reg_no_class(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
reg = base._class_registry["_sa_module_registry"]
mod_entry = reg["foo"]["bar"] # noqa
name_resolver, resolver = clsregistry._resolver(f1, MockProp())
f_resolver = resolver("foo")
assert_raises_message(
NameError,
"Module 'bar' has no mapped classes registered "
"under the name 'Bat'",
lambda: f_resolver().bar.Bat,
)
f_resolver = name_resolver("foo")
assert_raises_message(
NameError,
"Module 'bar' has no mapped classes registered "
"under the name 'Bat'",
lambda: f_resolver().bar.Bat,
)
def test_module_reg_cleanout_two_sub(self):
base = registry()
f1 = MockClass(base, "foo.bar.Foo")
clsregistry._add_class("Foo", f1, base._class_registry)
reg = base._class_registry["_sa_module_registry"]
f2 = MockClass(base, "foo.alt.Bar")
clsregistry._add_class("Bar", f2, base._class_registry)
assert reg["foo"]["bar"]
del f1
gc_collect()
assert "bar" not in reg["foo"]
assert "alt" in reg["foo"]
del f2
gc_collect()
assert "foo" not in reg.contents
def test_module_reg_cleanout_sub_to_base(self):
base = registry()
f3 = MockClass(base, "bat.bar.Hoho")
clsregistry._add_class("Hoho", f3, base._class_registry)
reg = base._class_registry["_sa_module_registry"]
assert reg["bat"]["bar"]
del f3
gc_collect()
assert "bat" not in reg
def test_module_reg_cleanout_cls_to_base(self):
base = registry()
f4 = MockClass(base, "single.Blat")
clsregistry._add_class("Blat", f4, base._class_registry)
reg = base._class_registry["_sa_module_registry"]
assert reg["single"]
del f4
gc_collect()
assert "single" not in reg
| ClsRegistryTest |
python | scikit-image__scikit-image | src/skimage/io/collection.py | {
"start": 14001,
"end": 15956
} | class ____(ImageCollection):
"""A class containing all frames from multi-frame TIFF images.
Parameters
----------
load_pattern : str or list of str
Pattern glob or filenames to load. The path can be absolute or
relative.
conserve_memory : bool, optional
Whether to conserve memory by only caching the frames of a single
image. Default is True.
Notes
-----
`MultiImage` returns a list of image-data arrays. In this
regard, it is very similar to `ImageCollection`, but the two differ in
their treatment of multi-frame images.
For a TIFF image containing N frames of size WxH, `MultiImage` stores
all frames of that image as a single element of shape `(N, W, H)` in the
list. `ImageCollection` instead creates N elements of shape `(W, H)`.
For an animated GIF image, `MultiImage` reads only the first frame, while
`ImageCollection` reads all frames by default.
Examples
--------
# Where your images are located
>>> data_dir = os.path.join(os.path.dirname(__file__), '../data')
>>> multipage_tiff = data_dir + '/multipage.tif'
>>> multi_img = MultiImage(multipage_tiff)
>>> len(multi_img) # multi_img contains one element
1
>>> multi_img[0].shape # this element is a two-frame image of shape:
(2, 15, 10)
>>> image_col = ImageCollection(multipage_tiff)
>>> len(image_col) # image_col contains two elements
2
>>> for frame in image_col:
... print(frame.shape) # each element is a frame of shape (15, 10)
...
(15, 10)
(15, 10)
"""
def __init__(self, filename, conserve_memory=True, dtype=None, **imread_kwargs):
"""Load a multi-img."""
from ._io import imread
self._filename = filename
super().__init__(filename, conserve_memory, load_func=imread, **imread_kwargs)
@property
def filename(self):
return self._filename
| MultiImage |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 15656,
"end": 16364
} | class ____(ProviderError):
"""
Raised when unable to find a block.
"""
def __init__(self, block_id: "BlockID", reason: Optional[str] = None):
if isinstance(block_id, bytes):
block_id_str = block_id.hex()
if not block_id_str.startswith("0x"):
block_id_str = f"0x{block_id_str}"
else:
block_id_str: HexStr = f"{block_id}" # type: ignore
message = (
"Missing latest block."
if block_id == "latest"
else f"Block with ID '{block_id_str}' not found."
)
if reason:
message = f"{message} Reason: {reason}"
super().__init__(message)
| BlockNotFoundError |
python | pytorch__pytorch | test/test_sympy_utils.py | {
"start": 21238,
"end": 29847
} | class ____(TestCase):
def _create_integer_symbols(self) -> list[sympy.Symbol]:
return sympy.symbols("a b c", integer=True)
def test_give_up(self):
from sympy import Eq, Ne
a, b, c = self._create_integer_symbols()
cases = [
# Not a relational operation.
a + b,
# 'a' appears on both sides.
Eq(a, a + 1),
# 'a' doesn't appear on neither side.
Eq(b, c + 1),
# Result is a 'sympy.And'.
Eq(FloorDiv(a, b), c),
# Result is a 'sympy.Or'.
Ne(FloorDiv(a, b), c),
]
for case in cases:
e = try_solve(case, a)
self.assertEqual(e, None)
@parametrize_relational_types()
def test_noop(self, op):
a, b, _ = self._create_integer_symbols()
lhs, rhs = a, 42 * b
expr = op(lhs, rhs)
r = try_solve(expr, a)
self.assertNotEqual(r, None)
r_expr, r_rhs = r
self.assertEqual(r_expr, expr)
self.assertEqual(r_rhs, rhs)
@parametrize_relational_types()
def test_noop_rhs(self, op):
a, b, _ = self._create_integer_symbols()
lhs, rhs = 42 * b, a
mirror = mirror_rel_op(op)
self.assertNotEqual(mirror, None)
expr = op(lhs, rhs)
r = try_solve(expr, a)
self.assertNotEqual(r, None)
r_expr, r_rhs = r
self.assertEqual(r_expr, mirror(rhs, lhs))
self.assertEqual(r_rhs, lhs)
def _test_cases(
self,
cases: list[tuple[sympy.Basic, sympy.Basic]],
thing: sympy.Basic,
op: type[sympy.Rel],
**kwargs,
):
for source, expected in cases:
r = try_solve(source, thing, **kwargs)
self.assertTrue(
(r is None and expected is None)
or (r is not None and expected is not None)
)
if r is not None:
r_expr, r_rhs = r
self.assertEqual(r_rhs, expected)
self.assertEqual(r_expr, op(thing, expected))
def test_addition(self):
from sympy import Eq
a, b, c = self._create_integer_symbols()
cases = [
(Eq(a + b, 0), -b),
(Eq(a + 5, b - 5), b - 10),
(Eq(a + c * b, 1), 1 - c * b),
]
self._test_cases(cases, a, Eq)
@parametrize_relational_types(sympy.Eq, sympy.Ne)
def test_multiplication_division(self, op):
a, b, c = self._create_integer_symbols()
cases = [
(op(a * b, 1), 1 / b),
(op(a * 5, b - 5), (b - 5) / 5),
(op(a * b, c), c / b),
]
self._test_cases(cases, a, op)
@parametrize_relational_types(*INEQUALITY_TYPES)
def test_multiplication_division_inequality(self, op):
a, b, _ = self._create_integer_symbols()
intneg = sympy.Symbol("neg", integer=True, negative=True)
intpos = sympy.Symbol("pos", integer=True, positive=True)
cases = [
# Divide/multiply both sides by positive number.
(op(a * intpos, 1), 1 / intpos),
(op(a / (5 * intpos), 1), 5 * intpos),
(op(a * 5, b - 5), (b - 5) / 5),
# 'b' is not strictly positive nor negative, so we can't
# divide/multiply both sides by 'b'.
(op(a * b, 1), None),
(op(a / b, 1), None),
(op(a * b * intpos, 1), None),
]
mirror_cases = [
# Divide/multiply both sides by negative number.
(op(a * intneg, 1), 1 / intneg),
(op(a / (5 * intneg), 1), 5 * intneg),
(op(a * -5, b - 5), -(b - 5) / 5),
]
mirror_op = mirror_rel_op(op)
assert mirror_op is not None
self._test_cases(cases, a, op)
self._test_cases(mirror_cases, a, mirror_op)
@parametrize_relational_types()
def test_floordiv(self, op):
from sympy import Eq, Ge, Gt, Le, Lt, Ne
a, b, c = sympy.symbols("a b c")
pos = sympy.Symbol("pos", positive=True)
integer = sympy.Symbol("integer", integer=True)
# (Eq(FloorDiv(a, pos), integer), And(Ge(a, integer * pos), Lt(a, (integer + 1) * pos))),
# (Eq(FloorDiv(a + 5, pos), integer), And(Ge(a, integer * pos), Lt(a, (integer + 1) * pos))),
# (Ne(FloorDiv(a, pos), integer), Or(Lt(a, integer * pos), Ge(a, (integer + 1) * pos))),
special_case = {
# 'FloorDiv' turns into 'And', which can't be simplified any further.
Eq: (Eq(FloorDiv(a, pos), integer), None),
# 'FloorDiv' turns into 'Or', which can't be simplified any further.
Ne: (Ne(FloorDiv(a, pos), integer), None),
Gt: (Gt(FloorDiv(a, pos), integer), (integer + 1) * pos),
Ge: (Ge(FloorDiv(a, pos), integer), integer * pos),
Lt: (Lt(FloorDiv(a, pos), integer), integer * pos),
Le: (Le(FloorDiv(a, pos), integer), (integer + 1) * pos),
}[op]
cases: list[tuple[sympy.Basic, sympy.Basic]] = [
# 'b' is not strictly positive
(op(FloorDiv(a, b), integer), None),
# 'c' is not strictly positive
(op(FloorDiv(a, pos), c), None),
]
# The result might change after 'FloorDiv' transformation.
# Specifically:
# - [Ge, Gt] => Ge
# - [Le, Lt] => Lt
if op in (sympy.Gt, sympy.Ge):
r_op = sympy.Ge
elif op in (sympy.Lt, sympy.Le):
r_op = sympy.Lt
else:
r_op = op
self._test_cases([special_case, *cases], a, r_op)
self._test_cases(
[(special_case[0], None), *cases], a, r_op, floordiv_inequality=False
)
def test_floordiv_eq_simplify(self):
from sympy import Eq, Le, Lt
a = sympy.Symbol("a", positive=True, integer=True)
def check(expr, expected):
r = try_solve(expr, a)
self.assertNotEqual(r, None)
r_expr, _ = r
self.assertEqual(r_expr, expected)
# (a + 10) // 3 == 3
# =====================================
# 3 * 3 <= a + 10 (always true)
# a + 10 < 4 * 3 (not sure)
check(Eq(FloorDiv(a + 10, 3), 3), Lt(a, (3 + 1) * 3 - 10))
# (a + 10) // 2 == 4
# =====================================
# 4 * 2 <= 10 - a (not sure)
# 10 - a < 5 * 2 (always true)
check(Eq(FloorDiv(10 - a, 2), 4), Le(a, -(4 * 2 - 10)))
@skipIf(not TEST_Z3, "Z3 not installed")
def test_z3_proof_floordiv_eq_simplify(self):
import z3
from sympy import Eq, Lt
a = sympy.Symbol("a", positive=True, integer=True)
a_ = z3.Int("a")
# (a + 10) // 3 == 3
# =====================================
# 3 * 3 <= a + 10 (always true)
# a + 10 < 4 * 3 (not sure)
solver = z3.SolverFor("QF_NRA")
# Add assertions for 'a_'.
solver.add(a_ > 0)
expr = Eq(FloorDiv(a + 10, 3), 3)
r_expr, _ = try_solve(expr, a)
# Check 'try_solve' really returns the 'expected' below.
expected = Lt(a, (3 + 1) * 3 - 10)
self.assertEqual(r_expr, expected)
# Check whether there is an integer 'a_' such that the
# equation below is satisfied.
solver.add(
# expr
(z3.ToInt((a_ + 10) / 3.0) == 3)
!=
# expected
(a_ < (3 + 1) * 3 - 10)
)
# Assert that there's no such an integer.
# i.e. the transformation is sound.
r = solver.check()
self.assertEqual(r, z3.unsat)
def test_simple_floordiv_gcd(self):
x, y, z = sympy.symbols("x y z")
# positive tests
self.assertEqual(simple_floordiv_gcd(x, x), x)
self.assertEqual(simple_floordiv_gcd(128 * x, 2304), 128)
self.assertEqual(simple_floordiv_gcd(128 * x + 128 * y, 2304), 128)
self.assertEqual(simple_floordiv_gcd(128 * x + 128 * y + 8192 * z, 9216), 128)
self.assertEqual(simple_floordiv_gcd(49152 * x, 96 * x), 96 * x)
self.assertEqual(simple_floordiv_gcd(96 * x, 96 * x), 96 * x)
self.assertEqual(simple_floordiv_gcd(x * y, x), x)
self.assertEqual(simple_floordiv_gcd(384 * x * y, x * y), x * y)
self.assertEqual(simple_floordiv_gcd(256 * x * y, 8 * x), 8 * x)
# negative tests
self.assertEqual(simple_floordiv_gcd(x * y + x + y + 1, x + 1), 1)
| TestSympySolve |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table14.py | {
"start": 315,
"end": 1634
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table14.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"num_format": "0.00;[Red]0.00", "dxf_index": 2})
format2 = workbook.add_format(
{"num_format": r"0.00_ ;\-0.00\ ", "dxf_index": 1}
)
format3 = workbook.add_format(
{"num_format": r"0.00_ ;[Red]\-0.00\ ", "dxf_index": 0}
)
data = [
["Foo", 1234, 2000, 4321],
["Bar", 1256, 4000, 4320],
["Baz", 2234, 3000, 4332],
["Bop", 1324, 1000, 4333],
]
worksheet.set_column("C:F", 10.288)
worksheet.add_table(
"C2:F6",
{
"data": data,
"columns": [
{},
{"format": format1},
{"format": format2},
{"format": format3},
],
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 13942,
"end": 14275
} | class ____(Menu):
""" Toolbar represented in a menu or context menu form.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
toolbar = Required(Instance(Toolbar), help="""
Reference to a toolbar.
""")
| ToolMenu |
python | donnemartin__system-design-primer | solutions/object_oriented_design/deck_of_cards/deck_of_cards.py | {
"start": 1473,
"end": 2094
} | class ____(Hand):
BLACKJACK = 21
def __init__(self, cards):
super(BlackJackHand, self).__init__(cards)
def score(self):
min_over = sys.MAXSIZE
max_under = -sys.MAXSIZE
for score in self.possible_scores():
if self.BLACKJACK < score < min_over:
min_over = score
elif max_under < score <= self.BLACKJACK:
max_under = score
return max_under if max_under != -sys.MAXSIZE else min_over
def possible_scores(self):
"""Return a list of possible scores, taking Aces into account."""
pass
| BlackJackHand |
python | PyCQA__flake8 | src/flake8/plugins/finder.py | {
"start": 681,
"end": 826
} | class ____(NamedTuple):
"""A plugin before loading."""
package: str
version: str
entry_point: importlib.metadata.EntryPoint
| Plugin |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 6239,
"end": 6488
} | class ____(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
| InconsistentTableError |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 2714,
"end": 192369
} | class ____(object):
def __init__(
self,
name,
graph: FlowGraph,
flow,
code_package_metadata,
code_package_sha,
code_package_url,
production_token,
metadata,
flow_datastore,
environment,
event_logger,
monitor,
tags=None,
namespace=None,
username=None,
max_workers=None,
workflow_timeout=None,
workflow_priority=None,
auto_emit_argo_events=False,
notify_on_error=False,
notify_on_success=False,
notify_slack_webhook_url=None,
notify_pager_duty_integration_key=None,
notify_incident_io_api_key=None,
incident_io_alert_source_config_id=None,
incident_io_metadata: List[str] = None,
enable_heartbeat_daemon=True,
enable_error_msg_capture=False,
workflow_title=None,
workflow_description=None,
):
# Some high-level notes -
#
# Fail-fast behavior for Argo Workflows - Argo stops
# scheduling new steps as soon as it detects that one of the DAG nodes
# has failed. After waiting for all the scheduled DAG nodes to run till
# completion, Argo with fail the DAG. This implies that after a node
# has failed, it may be awhile before the entire DAG is marked as
# failed. There is nothing Metaflow can do here for failing even
# faster (as of Argo 3.2).
#
# argo stop` vs `argo terminate` - since we don't currently
# rely on any exit handlers, it's safe to either stop or terminate any running
# argo workflow deployed through Metaflow. This may not hold true, once we
# integrate with Argo Events.
#
# Currently, an Argo Workflow can only execute entirely within a single
# Kubernetes namespace. Multi-cluster / Multi-namespace execution is on the
# deck for v3.4 release for Argo Workflows; beyond which point, we will be
# able to support them natively.
#
# Since this implementation generates numerous templates on the fly, please
# ensure that your Argo Workflows controller doesn't restrict
# templateReferencing.
self.name = name
self.graph = graph
self._parse_conditional_branches()
self.flow = flow
self.code_package_metadata = code_package_metadata
self.code_package_sha = code_package_sha
self.code_package_url = code_package_url
self.production_token = production_token
self.metadata = metadata
self.flow_datastore = flow_datastore
self.environment = environment
self.event_logger = event_logger
self.monitor = monitor
self.tags = tags
self.namespace = namespace
self.username = username
self.max_workers = max_workers
self.workflow_timeout = workflow_timeout
self.workflow_priority = workflow_priority
self.auto_emit_argo_events = auto_emit_argo_events
self.notify_on_error = notify_on_error
self.notify_on_success = notify_on_success
self.notify_slack_webhook_url = notify_slack_webhook_url
self.notify_pager_duty_integration_key = notify_pager_duty_integration_key
self.notify_incident_io_api_key = notify_incident_io_api_key
self.incident_io_alert_source_config_id = incident_io_alert_source_config_id
self.incident_io_metadata = self.parse_incident_io_metadata(
incident_io_metadata
)
self.enable_heartbeat_daemon = enable_heartbeat_daemon
self.enable_error_msg_capture = enable_error_msg_capture
self.workflow_title = workflow_title
self.workflow_description = workflow_description
self.parameters = self._process_parameters()
self.config_parameters = self._process_config_parameters()
self.triggers, self.trigger_options = self._process_triggers()
self._schedule, self._timezone = self._get_schedule()
self._base_labels = self._base_kubernetes_labels()
self._base_annotations = self._base_kubernetes_annotations()
self._workflow_template = self._compile_workflow_template()
self._sensor = self._compile_sensor()
def __str__(self):
return str(self._workflow_template)
def deploy(self):
self.cleanup_previous_sensors()
try:
# Register workflow template.
ArgoClient(namespace=KUBERNETES_NAMESPACE).register_workflow_template(
self.name, self._workflow_template.to_json()
)
except Exception as e:
raise ArgoWorkflowsException(str(e))
def cleanup_previous_sensors(self):
try:
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
# Check for existing deployment and do cleanup
old_template = client.get_workflow_template(self.name)
if not old_template:
return None
# Clean up old sensors
old_sensor_namespace = old_template["metadata"]["annotations"].get(
"metaflow/sensor_namespace"
)
if old_sensor_namespace is None:
# This workflow was created before sensor annotations
# and may have a sensor in the default namespace
# we will delete it and it'll get recreated if need be
old_sensor_name = ArgoWorkflows._sensor_name(self.name)
client.delete_sensor(old_sensor_name, client._namespace)
else:
# delete old sensor only if it was somewhere else, otherwise it'll get replaced
old_sensor_name = old_template["metadata"]["annotations"][
"metaflow/sensor_name"
]
if (
not self._sensor
or old_sensor_namespace != ARGO_EVENTS_SENSOR_NAMESPACE
):
client.delete_sensor(old_sensor_name, old_sensor_namespace)
except Exception as e:
raise ArgoWorkflowsSensorCleanupException(str(e))
@staticmethod
def _sanitize(name):
# Metaflow allows underscores in node names, which are disallowed in Argo
# Workflow template names - so we swap them with hyphens which are not
# allowed by Metaflow - guaranteeing uniqueness.
return name.replace("_", "-")
@staticmethod
def _sensor_name(name):
# Unfortunately, Argo Events Sensor names don't allow for
# dots (sensors run into an error) which rules out self.name :(
return name.replace(".", "-")
@staticmethod
def list_templates(flow_name, all=False, page_size=100):
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
for template in client.get_workflow_templates(page_size=page_size):
if all or flow_name == template["metadata"].get("annotations", {}).get(
"metaflow/flow_name", None
):
yield template["metadata"]["name"]
@staticmethod
def delete(name):
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
# the workflow template might not exist, but we still want to try clean up associated sensors and schedules.
workflow_template = client.get_workflow_template(name) or {}
workflow_annotations = workflow_template.get("metadata", {}).get(
"annotations", {}
)
sensor_name = ArgoWorkflows._sensor_name(
workflow_annotations.get("metaflow/sensor_name", name)
)
# if below is missing then it was deployed before custom sensor namespaces
sensor_namespace = workflow_annotations.get(
"metaflow/sensor_namespace", KUBERNETES_NAMESPACE
)
# Always try to delete the schedule. Failure in deleting the schedule should not
# be treated as an error, due to any of the following reasons
# - there might not have been a schedule, or it was deleted by some other means
# - retaining these resources should have no consequences as long as the workflow deletion succeeds.
# - regarding cost and compute, the significant resources are part of the workflow teardown, not the schedule.
schedule_deleted = client.delete_cronworkflow(name)
# The workflow might have sensors attached to it, which consume actual resources.
# Try to delete these as well.
sensor_deleted = client.delete_sensor(sensor_name, sensor_namespace)
# After cleaning up related resources, delete the workflow in question.
# Failure in deleting is treated as critical and will be made visible to the user
# for further action.
workflow_deleted = client.delete_workflow_template(name)
if workflow_deleted is None:
raise ArgoWorkflowsException(
"The workflow *%s* doesn't exist on Argo Workflows." % name
)
return schedule_deleted, sensor_deleted, workflow_deleted
@classmethod
def terminate(cls, flow_name, name):
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
response = client.terminate_workflow(name)
if response is None:
raise ArgoWorkflowsException(
"No execution found for {flow_name}/{run_id} in Argo Workflows.".format(
flow_name=flow_name, run_id=name
)
)
return True
@staticmethod
def get_workflow_status(flow_name, name):
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
# TODO: Only look for workflows for the specified flow
workflow = client.get_workflow(name)
if workflow:
# return workflow phase for now
status = workflow.get("status", {}).get("phase")
return status
else:
raise ArgoWorkflowsException(
"No execution found for {flow_name}/{run_id} in Argo Workflows.".format(
flow_name=flow_name, run_id=name
)
)
@staticmethod
def suspend(name):
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
client.suspend_workflow(name)
return True
@staticmethod
def unsuspend(name):
client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
client.unsuspend_workflow(name)
return True
@staticmethod
def parse_incident_io_metadata(metadata: List[str] = None):
"parse key value pairs into a dict for incident.io metadata if given"
parsed_metadata = None
if metadata is not None:
parsed_metadata = {}
for kv in metadata:
key, value = kv.split("=", 1)
if key in parsed_metadata:
raise MetaflowException(
"Incident.io Metadata *%s* provided multiple times" % key
)
parsed_metadata[key] = value
return parsed_metadata
@classmethod
def trigger(cls, name, parameters=None):
if parameters is None:
parameters = {}
try:
workflow_template = ArgoClient(
namespace=KUBERNETES_NAMESPACE
).get_workflow_template(name)
except Exception as e:
raise ArgoWorkflowsException(str(e))
if workflow_template is None:
raise ArgoWorkflowsException(
"The workflow *%s* doesn't exist on Argo Workflows in namespace *%s*. "
"Please deploy your flow first." % (name, KUBERNETES_NAMESPACE)
)
else:
try:
# Check that the workflow was deployed through Metaflow
workflow_template["metadata"]["annotations"]["metaflow/owner"]
except KeyError:
raise ArgoWorkflowsException(
"An existing non-metaflow workflow with the same name as "
"*%s* already exists in Argo Workflows. \nPlease modify the "
"name of this flow or delete your existing workflow on Argo "
"Workflows before proceeding." % name
)
try:
id_parts = resolve_identity().split(":")
parts_size = len(id_parts)
usertype = id_parts[0] if parts_size > 0 else "unknown"
username = id_parts[1] if parts_size > 1 else "unknown"
return ArgoClient(namespace=KUBERNETES_NAMESPACE).trigger_workflow_template(
name,
usertype,
username,
parameters,
)
except Exception as e:
raise ArgoWorkflowsException(str(e))
def _base_kubernetes_labels(self):
"""
Get shared Kubernetes labels for Argo resources.
"""
# TODO: Add configuration through an environment variable or Metaflow config in the future if required.
labels = {"app.kubernetes.io/part-of": "metaflow"}
return labels
def _base_kubernetes_annotations(self):
"""
Get shared Kubernetes annotations for Argo resources.
"""
from datetime import datetime, timezone
# TODO: Add configuration through an environment variable or Metaflow config in the future if required.
# base annotations
annotations = {
"metaflow/production_token": self.production_token,
"metaflow/owner": self.username,
"metaflow/user": "argo-workflows",
"metaflow/flow_name": self.flow.name,
"metaflow/deployment_timestamp": str(
datetime.now(timezone.utc).isoformat()
),
}
if current.get("project_name"):
annotations.update(
{
"metaflow/project_name": current.project_name,
"metaflow/branch_name": current.branch_name,
"metaflow/project_flow_name": current.project_flow_name,
}
)
# Add Argo Workflows title and description annotations
# https://argo-workflows.readthedocs.io/en/latest/title-and-description/
# Use CLI-provided values or auto-populate from metadata
title = (
(self.workflow_title.strip() if self.workflow_title else None)
or current.get("project_flow_name")
or self.flow.name
)
description = (
self.workflow_description.strip() if self.workflow_description else None
) or (self.flow.__doc__.strip() if self.flow.__doc__ else None)
if title:
annotations["workflows.argoproj.io/title"] = title
if description:
annotations["workflows.argoproj.io/description"] = description
return annotations
def _get_schedule(self):
schedule = self.flow._flow_decorators.get("schedule")
if schedule:
# Remove the field "Year" if it exists
schedule = schedule[0]
return " ".join(schedule.schedule.split()[:5]), schedule.timezone
return None, None
def schedule(self):
try:
argo_client = ArgoClient(namespace=KUBERNETES_NAMESPACE)
argo_client.schedule_workflow_template(
self.name, self._schedule, self._timezone
)
# Register sensor.
# Metaflow will overwrite any existing sensor.
sensor_name = ArgoWorkflows._sensor_name(self.name)
if self._sensor:
# The new sensor will go into the sensor namespace specified
ArgoClient(namespace=ARGO_EVENTS_SENSOR_NAMESPACE).register_sensor(
sensor_name, self._sensor.to_json(), ARGO_EVENTS_SENSOR_NAMESPACE
)
except Exception as e:
raise ArgoWorkflowsSchedulingException(str(e))
def trigger_explanation(self):
# Trigger explanation for cron workflows
if self.flow._flow_decorators.get("schedule"):
return (
"This workflow triggers automatically via the CronWorkflow *%s*."
% self.name
)
# Trigger explanation for @trigger
elif self.flow._flow_decorators.get("trigger"):
return (
"This workflow triggers automatically when the upstream %s "
"is/are published."
% self.list_to_prose(
[event["name"] for event in self.triggers], "event"
)
)
# Trigger explanation for @trigger_on_finish
elif self.flow._flow_decorators.get("trigger_on_finish"):
return (
"This workflow triggers automatically when the upstream %s succeed(s)"
% self.list_to_prose(
[
# Truncate prefix `metaflow.` and suffix `.end` from event name
event["name"][len("metaflow.") : -len(".end")]
for event in self.triggers
],
"flow",
)
)
else:
return "No triggers defined. You need to launch this workflow manually."
@classmethod
def get_existing_deployment(cls, name):
workflow_template = ArgoClient(
namespace=KUBERNETES_NAMESPACE
).get_workflow_template(name)
if workflow_template is not None:
try:
return (
workflow_template["metadata"]["annotations"]["metaflow/owner"],
workflow_template["metadata"]["annotations"][
"metaflow/production_token"
],
)
except KeyError:
raise ArgoWorkflowsException(
"An existing non-metaflow workflow with the same name as "
"*%s* already exists in Argo Workflows. \nPlease modify the "
"name of this flow or delete your existing workflow on Argo "
"Workflows before proceeding." % name
)
return None
@classmethod
def get_execution(cls, name):
workflow = ArgoClient(namespace=KUBERNETES_NAMESPACE).get_workflow(name)
if workflow is not None:
try:
return (
workflow["metadata"]["annotations"]["metaflow/owner"],
workflow["metadata"]["annotations"]["metaflow/production_token"],
workflow["metadata"]["annotations"]["metaflow/flow_name"],
workflow["metadata"]["annotations"].get(
"metaflow/branch_name", None
),
workflow["metadata"]["annotations"].get(
"metaflow/project_name", None
),
)
except KeyError:
raise ArgoWorkflowsException(
"A non-metaflow workflow *%s* already exists in Argo Workflows."
% name
)
return None
def _process_parameters(self):
parameters = {}
has_schedule = self.flow._flow_decorators.get("schedule") is not None
seen = set()
for var, param in self.flow._get_parameters():
# Throw an exception if the parameter is specified twice.
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
# NOTE: We skip config parameters as these do not have dynamic values,
# and need to be treated differently.
if param.IS_CONFIG_PARAMETER:
continue
extra_attrs = {}
if param.kwargs.get("type") == JSONType:
param_type = str(param.kwargs.get("type").name)
elif isinstance(param.kwargs.get("type"), FilePathClass):
param_type = str(param.kwargs.get("type").name)
extra_attrs["is_text"] = getattr(
param.kwargs.get("type"), "_is_text", True
)
extra_attrs["encoding"] = getattr(
param.kwargs.get("type"), "_encoding", "utf-8"
)
else:
param_type = str(param.kwargs.get("type").__name__)
is_required = param.kwargs.get("required", False)
# Throw an exception if a schedule is set for a flow with required
# parameters with no defaults. We currently don't have any notion
# of data triggers in Argo Workflows.
if "default" not in param.kwargs and is_required and has_schedule:
raise MetaflowException(
"The parameter *%s* does not have a default and is required. "
"Scheduling such parameters via Argo CronWorkflows is not "
"currently supported." % param.name
)
default_value = deploy_time_eval(param.kwargs.get("default"))
# If the value is not required and the value is None, we set the value to
# the JSON equivalent of None to please argo-workflows. Unfortunately it
# has the side effect of casting the parameter value to string null during
# execution - which needs to be fixed imminently.
if default_value is None:
default_value = json.dumps(None)
elif param_type == "JSON":
if not isinstance(default_value, str):
# once to serialize the default value if needed.
default_value = json.dumps(default_value)
# adds outer quotes to param
default_value = json.dumps(default_value)
else:
# Make argo sensors happy
default_value = json.dumps(default_value)
parameters[param.name] = dict(
python_var_name=var,
name=param.name,
value=default_value,
type=param_type,
description=param.kwargs.get("help"),
is_required=is_required,
**extra_attrs,
)
return parameters
def _process_config_parameters(self):
parameters = []
seen = set()
for var, param in self.flow._get_parameters():
if not param.IS_CONFIG_PARAMETER:
continue
# Throw an exception if the parameter is specified twice.
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
parameters.append(
dict(name=param.name, kv_name=ConfigInput.make_key_name(param.name))
)
return parameters
def _process_triggers(self):
# Impute triggers for Argo Workflow Template specified through @trigger and
# @trigger_on_finish decorators
# Disallow usage of @trigger and @trigger_on_finish together for now.
if self.flow._flow_decorators.get("trigger") and self.flow._flow_decorators.get(
"trigger_on_finish"
):
raise ArgoWorkflowsException(
"Argo Workflows doesn't support both *@trigger* and "
"*@trigger_on_finish* decorators concurrently yet. Use one or the "
"other for now."
)
triggers = []
options = None
# @trigger decorator
if self.flow._flow_decorators.get("trigger"):
# Parameters are not duplicated, and exist in the flow. Additionally,
# convert them to lower case since Metaflow parameters are case
# insensitive.
seen = set()
# NOTE: We skip config parameters as their values can not be set through event payloads
params = set(
[
param.name.lower()
for var, param in self.flow._get_parameters()
if not param.IS_CONFIG_PARAMETER
]
)
trigger_deco = self.flow._flow_decorators.get("trigger")[0]
trigger_deco.format_deploytime_value()
for event in trigger_deco.triggers:
parameters = {}
# TODO: Add a check to guard against names starting with numerals(?)
if not re.match(r"^[A-Za-z0-9_.-]+$", event["name"]):
raise ArgoWorkflowsException(
"Invalid event name *%s* in *@trigger* decorator. Only "
"alphanumeric characters, underscores(_), dashes(-) and "
"dots(.) are allowed." % event["name"]
)
for key, value in event.get("parameters", {}).items():
if not re.match(r"^[A-Za-z0-9_]+$", value):
raise ArgoWorkflowsException(
"Invalid event payload key *%s* for event *%s* in "
"*@trigger* decorator. Only alphanumeric characters and "
"underscores(_) are allowed." % (value, event["name"])
)
if key.lower() not in params:
raise ArgoWorkflowsException(
"Parameter *%s* defined in the event mappings for "
"*@trigger* decorator not found in the flow." % key
)
if key.lower() in seen:
raise ArgoWorkflowsException(
"Duplicate entries for parameter *%s* defined in the "
"event mappings for *@trigger* decorator." % key.lower()
)
seen.add(key.lower())
parameters[key.lower()] = value
event["parameters"] = parameters
event["type"] = "event"
triggers.extend(self.flow._flow_decorators.get("trigger")[0].triggers)
# Set automatic parameter mapping iff only a single event dependency is
# specified with no explicit parameter mapping.
if len(triggers) == 1 and not triggers[0].get("parameters"):
triggers[0]["parameters"] = dict(zip(params, params))
options = self.flow._flow_decorators.get("trigger")[0].options
# @trigger_on_finish decorator
if self.flow._flow_decorators.get("trigger_on_finish"):
trigger_on_finish_deco = self.flow._flow_decorators.get(
"trigger_on_finish"
)[0]
trigger_on_finish_deco.format_deploytime_value()
for event in trigger_on_finish_deco.triggers:
# Actual filters are deduced here since we don't have access to
# the current object in the @trigger_on_finish decorator.
project_name = event.get("project") or current.get("project_name")
branch_name = event.get("branch") or current.get("branch_name")
# validate that we have complete project info for an event name
if project_name or branch_name:
if not (project_name and branch_name):
# if one of the two is missing, we would end up listening to an event that will never be broadcast.
raise ArgoWorkflowsException(
"Incomplete project info. Please specify both 'project' and 'project_branch' or use the @project decorator"
)
triggers.append(
{
# Make sure this remains consistent with the event name format
# in ArgoWorkflowsInternalDecorator.
"name": "metaflow.%s.end"
% ".".join(
v
for v in [
project_name,
branch_name,
event["flow"],
]
if v
),
"filters": {
"auto-generated-by-metaflow": True,
"project_name": project_name,
"branch_name": branch_name,
# TODO: Add a time filters to guard against cached events
},
"type": "run",
"flow": event["flow"],
}
)
options = self.flow._flow_decorators.get("trigger_on_finish")[0].options
for event in triggers:
# Assign a sanitized name since we need this at many places to please
# Argo Events sensors. There is a slight possibility of name collision
# but quite unlikely for us to worry about at this point.
event["sanitized_name"] = "%s_%s" % (
event["name"]
.replace(".", "")
.replace("-", "")
.replace("@", "")
.replace("+", ""),
to_unicode(base64.b32encode(sha1(to_bytes(event["name"])).digest()))[
:4
].lower(),
)
return triggers, options
def _compile_workflow_template(self):
# This method compiles a Metaflow FlowSpec into Argo WorkflowTemplate
#
# WorkflowTemplate
# |
# -- WorkflowSpec
# |
# -- Array<Template>
# |
# -- DAGTemplate, ContainerTemplate
# | |
# -- Array<DAGTask> |
# | |
# -- Template
#
# Steps in FlowSpec are represented as DAGTasks.
# A DAGTask can reference to -
# a ContainerTemplate (for linear steps..) or
# another DAGTemplate (for nested `foreach`s).
#
# While we could have very well inlined container templates inside a DAGTask,
# unfortunately Argo variable substitution ({{pod.name}}) doesn't work as
# expected within DAGTasks
# (https://github.com/argoproj/argo-workflows/issues/7432) and we are forced to
# generate container templates at the top level (in WorkflowSpec) and maintain
# references to them within the DAGTask.
annotations = {}
if self._schedule is not None:
# timezone is an optional field and json dumps on None will result in null
# hence configuring it to an empty string
if self._timezone is None:
self._timezone = ""
cron_info = {"schedule": self._schedule, "tz": self._timezone}
annotations.update({"metaflow/cron": json.dumps(cron_info)})
if self.parameters:
annotations.update({"metaflow/parameters": json.dumps(self.parameters)})
# Some more annotations to populate the Argo UI nicely
if self.tags:
annotations.update({"metaflow/tags": json.dumps(self.tags)})
if self.triggers:
annotations.update(
{
"metaflow/triggers": json.dumps(
[
{key: trigger.get(key) for key in ["name", "type"]}
for trigger in self.triggers
]
),
"metaflow/sensor_name": ArgoWorkflows._sensor_name(self.name),
"metaflow/sensor_namespace": ARGO_EVENTS_SENSOR_NAMESPACE,
}
)
if self.notify_on_error:
annotations.update(
{
"metaflow/notify_on_error": json.dumps(
{
"slack": bool(self.notify_slack_webhook_url),
"pager_duty": bool(self.notify_pager_duty_integration_key),
"incident_io": bool(self.notify_incident_io_api_key),
}
)
}
)
if self.notify_on_success:
annotations.update(
{
"metaflow/notify_on_success": json.dumps(
{
"slack": bool(self.notify_slack_webhook_url),
"pager_duty": bool(self.notify_pager_duty_integration_key),
"incident_io": bool(self.notify_incident_io_api_key),
}
)
}
)
try:
# Build the DAG based on the DAGNodes given by the FlowGraph for the found FlowSpec class.
_steps_info, graph_structure = self.graph.output_steps()
graph_info = {
# for the time being, we only need the graph_structure. Being mindful of annotation size limits we do not include anything extra.
"graph_structure": graph_structure
}
except Exception:
graph_info = None
dag_annotation = {"metaflow/dag": json.dumps(graph_info)}
lifecycle_hooks = self._lifecycle_hooks()
return (
WorkflowTemplate()
.metadata(
# Workflow Template metadata.
ObjectMeta()
.name(self.name)
# Argo currently only supports Workflow-level namespaces. When v3.4.0
# is released, we should be able to support multi-namespace /
# multi-cluster scheduling.
.namespace(KUBERNETES_NAMESPACE)
.annotations(annotations)
.annotations(self._base_annotations)
.labels(self._base_labels)
.label("app.kubernetes.io/name", "metaflow-flow")
.annotations(dag_annotation)
)
.spec(
WorkflowSpec()
# Set overall workflow timeout.
.active_deadline_seconds(self.workflow_timeout)
# TODO: Allow Argo to optionally archive all workflow execution logs
# It's disabled for now since it requires all Argo installations
# to enable an artifactory repository. If log archival is
# enabled in workflow controller, the logs for this workflow will
# automatically get archived.
# .archive_logs()
# Don't automount service tokens for now - https://github.com/kubernetes/kubernetes/issues/16779#issuecomment-159656641
# TODO: Service account names are currently set in the templates. We
# can specify the default service account name here to reduce
# the size of the generated YAML by a tiny bit.
# .automount_service_account_token()
# TODO: Support ImagePullSecrets for Argo & Kubernetes
# Not strictly needed since a very valid workaround exists
# https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
# .image_pull_secrets(...)
# Limit workflow parallelism
.parallelism(self.max_workers)
# TODO: Support Prometheus metrics for Argo
# .metrics(...)
# TODO: Support PodGC and DisruptionBudgets
.priority(self.workflow_priority)
# Set workflow metadata
.workflow_metadata(
Metadata()
.labels(self._base_labels)
.label("app.kubernetes.io/name", "metaflow-run")
.annotations(
{
**annotations,
**{
k: v
for k, v in self._base_annotations.items()
if k
# Skip custom title/description for workflows as this makes it harder to find specific runs.
not in [
"workflows.argoproj.io/title",
"workflows.argoproj.io/description",
]
},
**{"metaflow/run_id": "argo-{{workflow.name}}"},
}
)
# TODO: Set dynamic labels using labels_from. Ideally, we would
# want to expose run_id as a label. It's easy to add labels,
# but very difficult to remove them - let's err on the
# conservative side and only add labels when we come across
# use-cases for them.
)
# Handle parameters
.arguments(
Arguments().parameters(
[
Parameter(parameter["name"])
.value(parameter["value"])
.description(parameter.get("description"))
# TODO: Better handle IncludeFile in Argo Workflows UI.
for parameter in self.parameters.values()
]
+ [
# Introduce non-required parameters for argo events so
# that the entire event payload can be accessed within the
# run. The parameter name is hashed to ensure that
# there won't be any collisions with Metaflow parameters.
Parameter(event["sanitized_name"])
.value(json.dumps(None)) # None in Argo Workflows world.
.description("auto-set by metaflow. safe to ignore.")
for event in self.triggers
]
)
)
# Set common pod metadata.
.pod_metadata(
Metadata()
.labels(self._base_labels)
.label("app.kubernetes.io/name", "metaflow-task")
.annotations(
{
**annotations,
**self._base_annotations,
**{
"metaflow/run_id": "argo-{{workflow.name}}"
}, # we want pods of the workflow to have the run_id as an annotation as well
}
)
)
# Set the entrypoint to flow name
.entrypoint(self.flow.name)
# OnExit hooks
.onExit(
"capture-error-hook-fn-preflight"
if self.enable_error_msg_capture
else None
)
# Set lifecycle hooks if notifications are enabled
.hooks(
{
lifecycle.name: lifecycle
for hook in lifecycle_hooks
for lifecycle in hook.lifecycle_hooks
}
)
# Top-level DAG template(s)
.templates(self._dag_templates())
# Container templates
.templates(self._container_templates())
# Lifecycle hook template(s)
.templates([hook.template for hook in lifecycle_hooks])
# Exit hook template(s)
.templates(self._exit_hook_templates())
# Sidecar templates (Daemon Containers)
.templates(self._daemon_templates())
)
)
# Visit every node and record information on conditional step structure
def _parse_conditional_branches(self):
self.conditional_nodes = set()
self.conditional_join_nodes = set()
self.matching_conditional_join_dict = {}
self.recursive_nodes = set()
node_conditional_parents = {}
node_conditional_branches = {}
def _visit(node, conditional_branch, conditional_parents=None):
if not node.type == "split-switch" and not (
conditional_branch and conditional_parents
):
# skip regular non-conditional nodes entirely
return
if node.type == "split-switch":
conditional_branch = conditional_branch + [node.name]
c_br = node_conditional_branches.get(node.name, [])
node_conditional_branches[node.name] = c_br + [
b for b in conditional_branch if b not in c_br
]
conditional_parents = (
[node.name]
if not conditional_parents
else conditional_parents + [node.name]
)
node_conditional_parents[node.name] = conditional_parents
# check for recursion. this split is recursive if any of its out functions are itself.
if any(
out_func for out_func in node.out_funcs if out_func == node.name
):
self.recursive_nodes.add(node.name)
if conditional_parents and not node.type == "split-switch":
node_conditional_parents[node.name] = conditional_parents
conditional_branch = conditional_branch + [node.name]
c_br = node_conditional_branches.get(node.name, [])
node_conditional_branches[node.name] = c_br + [
b for b in conditional_branch if b not in c_br
]
self.conditional_nodes.add(node.name)
if conditional_branch and conditional_parents:
for n in node.out_funcs:
child = self.graph[n]
if child.name == node.name:
continue
_visit(child, conditional_branch, conditional_parents)
# First we visit all nodes to determine conditional parents and branches
for n in self.graph:
_visit(n, [])
# helper to clean up conditional info for all children of a node, until a new split-switch is encountered.
def _cleanup_conditional_status(node_name, seen):
if self.graph[node_name].type == "split-switch":
# stop recursive cleanup if we hit a new split-switch
return
if node_name in self.conditional_nodes:
self.conditional_nodes.remove(node_name)
node_conditional_parents[node_name] = []
node_conditional_branches[node_name] = []
for p in self.graph[node_name].out_funcs:
if p not in seen:
_cleanup_conditional_status(p, seen + [p])
# Then we traverse again in order to determine conditional join nodes, and matching conditional join info
for node in self.graph:
if node_conditional_parents.get(node.name, False):
# do the required postprocessing for anything requiring node.in_funcs
# check that in previous parsing we have not closed all conditional in_funcs.
# If so, this step can not be conditional either
is_conditional = any(
in_func in self.conditional_nodes
or self.graph[in_func].type == "split-switch"
for in_func in node.in_funcs
)
if is_conditional:
self.conditional_nodes.add(node.name)
else:
if node.name in self.conditional_nodes:
self.conditional_nodes.remove(node.name)
# does this node close the latest conditional parent branches?
conditional_in_funcs = [
in_func
for in_func in node.in_funcs
if node_conditional_branches.get(in_func, False)
]
closed_conditional_parents = []
for last_split_switch in node_conditional_parents.get(node.name, [])[
::-1
]:
last_conditional_split_nodes = self.graph[
last_split_switch
].out_funcs
# NOTE: How do we define a conditional join step?
# The idea here is that we check if the conditional branches(e.g. chains of conditional steps leading to) of all the in_funcs
# manage to tick off every step name that follows a split-switch
# For example, consider the following structure
# switch_step -> A, B, C
# A -> A2 -> A3 -> A4 -> B2
# B -> B2 -> B3 -> C3
# C -> C2 -> C3 -> end
#
# if we look at the in_funcs for C3, they are (C2, B3)
# B3 closes off branches started by A and B
# C3 closes off branches started by C
# therefore C3 is a conditional join step for the 'switch_step'
# NOTE: Then what about a skip step?
# some switch cases might not introduce any distinct steps of their own, opting to instead skip ahead to a later common step.
# Example:
# switch_step -> A, B, C
# A -> A1 -> B2 -> C
# B -> B1 -> B2 -> C
#
# In this case, C is a skip step as it does not add any conditional branching of its own.
# C is also a conditional join, as it closes all branches started by 'switch_step'
closes_branches = all(
(
# branch_root_node_name needs to be in at least one conditional_branch for it to be closed.
any(
branch_root_node_name
in node_conditional_branches.get(in_func, [])
for in_func in conditional_in_funcs
)
# need to account for a switch case skipping completely, not having a conditional-branch of its own.
if branch_root_node_name != node.name
else True
)
for branch_root_node_name in last_conditional_split_nodes
)
if closes_branches:
closed_conditional_parents.append(last_split_switch)
self.conditional_join_nodes.add(node.name)
self.matching_conditional_join_dict[last_split_switch] = (
node.name
)
# Did we close all conditionals? Then this branch and all its children are not conditional anymore (unless a new conditional branch is encountered).
if not [
p
for p in node_conditional_parents.get(node.name, [])
if p not in closed_conditional_parents
]:
_cleanup_conditional_status(node.name, [])
def _is_conditional_node(self, node):
return node.name in self.conditional_nodes
def _is_conditional_skip_node(self, node):
return (
self._is_conditional_node(node)
and any(
self.graph[in_func].type == "split-switch" for in_func in node.in_funcs
)
and len(
[
in_func
for in_func in node.in_funcs
if self._is_conditional_node(self.graph[in_func])
or self.graph[in_func].type == "split-switch"
]
)
> 1
)
def _is_conditional_join_node(self, node):
return node.name in self.conditional_join_nodes
def _many_in_funcs_all_conditional(self, node):
cond_in_funcs = [
in_func
for in_func in node.in_funcs
if self._is_conditional_node(self.graph[in_func])
]
return len(cond_in_funcs) > 1 and len(cond_in_funcs) == len(node.in_funcs)
def _is_recursive_node(self, node):
return node.name in self.recursive_nodes
def _matching_conditional_join(self, node):
# If no earlier conditional join step is found during parsing, then 'end' is always one.
return self.matching_conditional_join_dict.get(node.name, "end")
# Visit every node and yield the uber DAGTemplate(s).
def _dag_templates(self):
def _visit(
node,
exit_node=None,
templates=None,
dag_tasks=None,
parent_foreach=None,
seen=None,
): # Returns Tuple[List[Template], List[DAGTask]]
""" """
# Every for-each node results in a separate subDAG and an equivalent
# DAGTemplate rooted at the child of the for-each node. Each DAGTemplate
# has a unique name - the top-level DAGTemplate is named as the name of
# the flow and the subDAG DAGTemplates are named after the (only) descendant
# of the for-each node.
# Emit if we have reached the end of the sub workflow
if seen is None:
seen = []
if dag_tasks is None:
dag_tasks = []
if templates is None:
templates = []
if exit_node is not None and exit_node is node.name:
return templates, dag_tasks
if node.name in seen:
return templates, dag_tasks
seen.append(node.name)
# helper variable for recursive conditional inputs
has_foreach_inputs = False
if node.name == "start":
# Start node has no dependencies.
dag_task = DAGTask(self._sanitize(node.name)).template(
self._sanitize(node.name)
)
if (
node.is_inside_foreach
and self.graph[node.in_funcs[0]].type == "foreach"
and not self.graph[node.in_funcs[0]].parallel_foreach
# We need to distinguish what is a "regular" foreach (i.e something that doesn't care about to gang semantics)
# vs what is a "num_parallel" based foreach (i.e. something that follows gang semantics.)
# A `regular` foreach is basically any arbitrary kind of foreach.
):
# helper variable for recursive conditional inputs
has_foreach_inputs = True
# Child of a foreach node needs input-paths as well as split-index
# This child is the first node of the sub workflow and has no dependency
parameters = [
Parameter("input-paths").value("{{inputs.parameters.input-paths}}"),
Parameter("split-index").value("{{inputs.parameters.split-index}}"),
]
dag_task = (
DAGTask(self._sanitize(node.name))
.template(self._sanitize(node.name))
.arguments(Arguments().parameters(parameters))
)
elif node.parallel_step:
# This is the step where the @parallel decorator is defined.
# Since this DAGTask will call the for the `resource` [based templates]
# (https://argo-workflows.readthedocs.io/en/stable/walk-through/kubernetes-resources/)
# we have certain constraints on the way we can pass information inside the Jobset manifest
# [All templates will have access](https://argo-workflows.readthedocs.io/en/stable/variables/#all-templates)
# to the `inputs.parameters` so we will pass down ANY/ALL information using the
# input parameters.
# We define the usual parameters like input-paths/split-index etc. but we will also
# define the following:
# - `workerCount`: parameter which will be used to determine the number of
# parallel worker jobs
# - `jobset-name`: parameter which will be used to determine the name of the jobset.
# This parameter needs to be dynamic so that when we have retries we don't
# end up using the name of the jobset again (if we do, it will crash since k8s wont allow duplicated job names)
# - `retryCount`: parameter which will be used to determine the number of retries
# This parameter will *only* be available within the container templates like we
# have it for all other DAGTasks and NOT for custom kubernetes resource templates.
# So as a work-around, we will set it as the `retryCount` parameter instead of
# setting it as a {{ retries }} in the CLI code. Once set as a input parameter,
# we can use it in the Jobset Manifest templates as `{{inputs.parameters.retryCount}}`
# - `task-id-entropy`: This is a parameter which will help derive task-ids and jobset names. This parameter
# contains the relevant amount of entropy to ensure that task-ids and jobset names
# are uniquish. We will also use this in the join task to construct the task-ids of
# all parallel tasks since the task-ids for parallel task are minted formulaically.
parameters = [
Parameter("input-paths").value("{{inputs.parameters.input-paths}}"),
Parameter("num-parallel").value(
"{{inputs.parameters.num-parallel}}"
),
Parameter("split-index").value("{{inputs.parameters.split-index}}"),
Parameter("task-id-entropy").value(
"{{inputs.parameters.task-id-entropy}}"
),
# we cant just use hyphens with sprig.
# https://github.com/argoproj/argo-workflows/issues/10567#issuecomment-1452410948
Parameter("workerCount").value(
"{{=sprig.int(sprig.sub(sprig.int(inputs.parameters['num-parallel']),1))}}"
),
]
# Resolve retry strategy to determine if we should add retry-related parameters.
# {{retries}} is only available if retryStrategy is specified in the template.
max_user_code_retries = 0
max_error_retries = 0
for decorator in node.decorators:
user_code_retries, error_retries = decorator.step_task_retry_count()
max_user_code_retries = max(
max_user_code_retries, user_code_retries
)
max_error_retries = max(max_error_retries, error_retries)
total_retries = max_user_code_retries + max_error_retries
if total_retries > 0:
parameters.extend(
[
Parameter("retryCount").value("{{retries}}"),
# The job-setname needs to be unique for each retry
# and we cannot use the `generateName` field in the
# Jobset Manifest since we need to construct the subdomain
# and control pod domain name pre-hand. So we will use
# the retry count to ensure that the jobset name is unique
Parameter("jobset-name").value(
"js-{{inputs.parameters.task-id-entropy}}{{retries}}",
),
]
)
else:
parameters.extend(
[
Parameter("jobset-name").value(
"js-{{inputs.parameters.task-id-entropy}}",
)
]
)
dag_task = (
DAGTask(self._sanitize(node.name))
.template(self._sanitize(node.name))
.arguments(Arguments().parameters(parameters))
)
else:
# Every other node needs only input-paths
parameters = [
Parameter("input-paths").value(
compress_list(
[
"argo-{{workflow.name}}/%s/{{tasks.%s.outputs.parameters.task-id}}"
% (n, self._sanitize(n))
for n in node.in_funcs
],
# NOTE: We set zlibmin to infinite because zlib compression for the Argo input-paths breaks template value substitution.
zlibmin=inf,
)
)
]
# NOTE: Due to limitations with Argo Workflows Parameter size we
# can not pass arbitrarily large lists of task id's to join tasks.
# Instead we ensure that task id's for foreach tasks can be
# deduced deterministically and pass the relevant information to
# the join task.
#
# We need to add the split-index and root-input-path for the last
# step in any foreach scope and use these to generate the task id,
# as the join step uses the root and the cardinality of the
# foreach scope to generate the required id's.
if (
node.is_inside_foreach
and self.graph[node.out_funcs[0]].type == "join"
):
if any(
self.graph[parent].matching_join
== self.graph[node.out_funcs[0]].name
and self.graph[parent].type == "foreach"
for parent in self.graph[node.out_funcs[0]].split_parents
):
parameters.extend(
[
Parameter("split-index").value(
"{{inputs.parameters.split-index}}"
),
Parameter("root-input-path").value(
"{{inputs.parameters.input-paths}}"
),
]
)
conditional_deps = [
"%s.Succeeded" % self._sanitize(in_func)
for in_func in node.in_funcs
if self._is_conditional_node(self.graph[in_func])
or self.graph[in_func].type == "split-switch"
]
required_deps = [
"%s.Succeeded" % self._sanitize(in_func)
for in_func in node.in_funcs
if not self._is_conditional_node(self.graph[in_func])
and self.graph[in_func].type != "split-switch"
]
if self._is_conditional_skip_node(
node
) or self._many_in_funcs_all_conditional(node):
# skip nodes need unique condition handling
conditional_deps = [
"%s.Succeeded" % self._sanitize(in_func)
for in_func in node.in_funcs
]
required_deps = []
both_conditions = required_deps and conditional_deps
depends_str = "{required}{_and}{conditional}".format(
required=("(%s)" if both_conditions else "%s")
% " && ".join(required_deps),
_and=" && " if both_conditions else "",
conditional=("(%s)" if both_conditions else "%s")
% " || ".join(conditional_deps),
)
dag_task = (
DAGTask(self._sanitize(node.name))
.depends(depends_str)
.template(self._sanitize(node.name))
.arguments(Arguments().parameters(parameters))
)
# Add conditional if this is the first step in a conditional branch
switch_in_funcs = [
in_func
for in_func in node.in_funcs
if self.graph[in_func].type == "split-switch"
]
if (
self._is_conditional_node(node)
or self._is_conditional_skip_node(node)
or self._is_conditional_join_node(node)
) and switch_in_funcs:
conditional_when = "||".join(
[
"{{tasks.%s.outputs.parameters.switch-step}}==%s"
% (self._sanitize(switch_in_func), node.name)
for switch_in_func in switch_in_funcs
]
)
non_switch_in_funcs = [
in_func
for in_func in node.in_funcs
if in_func not in switch_in_funcs
]
status_when = ""
if non_switch_in_funcs:
status_when = "||".join(
[
"{{tasks.%s.status}}==Succeeded"
% self._sanitize(in_func)
for in_func in non_switch_in_funcs
]
)
total_when = (
f"({status_when}) || ({conditional_when})"
if status_when
else conditional_when
)
dag_task.when(total_when)
dag_tasks.append(dag_task)
# End the workflow if we have reached the end of the flow
if node.type == "end":
return templates, dag_tasks
# For split nodes traverse all the children
if node.type == "split":
for n in node.out_funcs:
_visit(
self.graph[n],
node.matching_join,
templates,
dag_tasks,
parent_foreach,
seen,
)
return _visit(
self.graph[node.matching_join],
exit_node,
templates,
dag_tasks,
parent_foreach,
seen,
)
elif node.type == "split-switch":
if self._is_recursive_node(node):
# we need an additional recursive template if the step is recursive
# NOTE: in the recursive case, the original step is renamed in the container templates to 'recursive-<step_name>'
# so that we do not have to touch the step references in the DAG.
#
# NOTE: The way that recursion in Argo Workflows is achieved is with the following structure:
# - the usual 'example-step' template which would match example_step in flow code is renamed to 'recursive-example-step'
# - templates has another template with the original task name: 'example-step'
# - the template 'example-step' in turn has steps
# - 'example-step-internal' which uses the metaflow step executing template 'recursive-example-step'
# - 'example-step-recursion' which calls the parent template 'example-step' if switch-step output from 'example-step-internal' matches the condition.
sanitized_name = self._sanitize(node.name)
templates.append(
Template(sanitized_name)
.steps(
[
WorkflowStep()
.name("%s-internal" % sanitized_name)
.template("recursive-%s" % sanitized_name)
.arguments(
Arguments().parameters(
[
Parameter("input-paths").value(
"{{inputs.parameters.input-paths}}"
)
]
# Add the additional inputs required by specific node types.
# We do not need to cover joins or @parallel, as a split-switch step can not be either one of these.
+ (
[
Parameter("split-index").value(
"{{inputs.parameters.split-index}}"
)
]
if has_foreach_inputs
else []
)
)
)
]
)
.steps(
[
WorkflowStep()
.name("%s-recursion" % sanitized_name)
.template(sanitized_name)
.when(
"{{steps.%s-internal.outputs.parameters.switch-step}}==%s"
% (sanitized_name, node.name)
)
.arguments(
Arguments().parameters(
[
Parameter("input-paths").value(
"argo-{{workflow.name}}/%s/{{steps.%s-internal.outputs.parameters.task-id}}"
% (node.name, sanitized_name)
)
]
+ (
[
Parameter("split-index").value(
"{{inputs.parameters.split-index}}"
)
]
if has_foreach_inputs
else []
)
)
),
]
)
.inputs(Inputs().parameters(parameters))
.outputs(
# NOTE: We try to read the output parameters from the recursive template call first (<step>-recursion), and the internal step second (<step>-internal).
# This guarantees that we always get the output parameters of the last recursive step that executed.
Outputs().parameters(
[
Parameter("task-id").valueFrom(
{
"expression": "(steps['%s-recursion']?.outputs ?? steps['%s-internal']?.outputs).parameters['task-id']"
% (sanitized_name, sanitized_name)
}
),
Parameter("switch-step").valueFrom(
{
"expression": "(steps['%s-recursion']?.outputs ?? steps['%s-internal']?.outputs).parameters['switch-step']"
% (sanitized_name, sanitized_name)
}
),
]
)
)
)
for n in node.out_funcs:
_visit(
self.graph[n],
self._matching_conditional_join(node),
templates,
dag_tasks,
parent_foreach,
seen,
)
return _visit(
self.graph[self._matching_conditional_join(node)],
exit_node,
templates,
dag_tasks,
parent_foreach,
seen,
)
# For foreach nodes generate a new sub DAGTemplate
# We do this for "regular" foreaches (ie. `self.next(self.a, foreach=)`)
elif node.type == "foreach":
foreach_template_name = self._sanitize(
"%s-foreach-%s"
% (
node.name,
"parallel" if node.parallel_foreach else node.foreach_param,
# Since foreach's are derived based on `self.next(self.a, foreach="<varname>")`
# vs @parallel foreach are done based on `self.next(self.a, num_parallel="<some-number>")`,
# we need to ensure that `foreach_template_name` suffix is appropriately set based on the kind
# of foreach.
)
)
# There are two separate "DAGTask"s created for the foreach node.
# - The first one is a "jump-off" DAGTask where we propagate the
# input-paths and split-index. This thing doesn't create
# any actual containers and it responsible for only propagating
# the parameters.
# - The DAGTask that follows first DAGTask is the one
# that uses the ContainerTemplate. This DAGTask is named the same
# thing as the foreach node. We will leverage a similar pattern for the
# @parallel tasks.
#
foreach_task = (
DAGTask(foreach_template_name)
.depends(f"{self._sanitize(node.name)}.Succeeded")
.template(foreach_template_name)
.arguments(
Arguments().parameters(
[
Parameter("input-paths").value(
"argo-{{workflow.name}}/%s/{{tasks.%s.outputs.parameters.task-id}}"
% (node.name, self._sanitize(node.name))
),
Parameter("split-index").value("{{item}}"),
]
+ (
[
Parameter("root-input-path").value(
"argo-{{workflow.name}}/%s/{{tasks.%s.outputs.parameters.task-id}}"
% (node.name, self._sanitize(node.name))
),
]
if parent_foreach
else []
)
+ (
# Disabiguate parameters for a regular `foreach` vs a `@parallel` foreach
[
Parameter("num-parallel").value(
"{{tasks.%s.outputs.parameters.num-parallel}}"
% self._sanitize(node.name)
),
Parameter("task-id-entropy").value(
"{{tasks.%s.outputs.parameters.task-id-entropy}}"
% self._sanitize(node.name)
),
]
if node.parallel_foreach
else []
)
)
)
.with_param(
# For @parallel workloads `num-splits` will be explicitly set to one so that
# we can piggyback on the current mechanism with which we leverage argo.
"{{tasks.%s.outputs.parameters.num-splits}}"
% self._sanitize(node.name)
)
)
# Add conditional if this is the first step in a conditional branch
if self._is_conditional_node(node) and not any(
self._is_conditional_node(self.graph[in_func])
for in_func in node.in_funcs
):
in_func = node.in_funcs[0]
foreach_task.when(
"{{tasks.%s.outputs.parameters.switch-step}}==%s"
% (self._sanitize(in_func), node.name)
)
dag_tasks.append(foreach_task)
templates, dag_tasks_1 = _visit(
self.graph[node.out_funcs[0]],
node.matching_join,
templates,
[],
node.name,
seen,
)
# How do foreach's work on Argo:
# Lets say you have the following dag: (start[sets `foreach="x"`]) --> (task-a [actual foreach]) --> (join) --> (end)
# With argo we will :
# (start [sets num-splits]) --> (task-a-foreach-(0,0) [dummy task]) --> (task-a) --> (join) --> (end)
# The (task-a-foreach-(0,0) [dummy task]) propagates the values of the `split-index` and the input paths.
# to the actual foreach task.
templates.append(
Template(foreach_template_name)
.inputs(
Inputs().parameters(
[Parameter("input-paths"), Parameter("split-index")]
+ ([Parameter("root-input-path")] if parent_foreach else [])
+ (
[
Parameter("num-parallel"),
Parameter("task-id-entropy"),
# Parameter("workerCount")
]
if node.parallel_foreach
else []
)
)
)
.outputs(
Outputs().parameters(
[
# non @parallel tasks set task-ids as outputs
Parameter("task-id").valueFrom(
{
"parameter": "{{tasks.%s.outputs.parameters.task-id}}"
% self._sanitize(
self.graph[node.matching_join].in_funcs[0]
)
}
if not self._is_conditional_join_node(
self.graph[node.matching_join]
)
else
# Note: If the nodes leading to the join are conditional, then we need to use an expression to pick the outputs from the task that executed.
# ref for operators: https://github.com/expr-lang/expr/blob/master/docs/language-definition.md
{
"expression": "get((%s)?.parameters, 'task-id')"
% " ?? ".join(
f"tasks['{self._sanitize(func)}']?.outputs"
for func in self.graph[
node.matching_join
].in_funcs
)
}
),
]
if not node.parallel_foreach
else [
# @parallel tasks set `task-id-entropy` and `num-parallel`
# as outputs so task-ids can be derived in the join step.
# Both of these values should be propagated from the
# jobset labels.
Parameter("num-parallel").valueFrom(
{
"parameter": "{{tasks.%s.outputs.parameters.num-parallel}}"
% self._sanitize(
self.graph[node.matching_join].in_funcs[0]
)
}
),
Parameter("task-id-entropy").valueFrom(
{
"parameter": "{{tasks.%s.outputs.parameters.task-id-entropy}}"
% self._sanitize(
self.graph[node.matching_join].in_funcs[0]
)
}
),
]
)
)
.dag(DAGTemplate().fail_fast().tasks(dag_tasks_1))
)
join_foreach_task = (
DAGTask(self._sanitize(self.graph[node.matching_join].name))
.template(self._sanitize(self.graph[node.matching_join].name))
.depends(f"{foreach_template_name}.Succeeded")
.arguments(
Arguments().parameters(
(
[
Parameter("input-paths").value(
"argo-{{workflow.name}}/%s/{{tasks.%s.outputs.parameters.task-id}}"
% (node.name, self._sanitize(node.name))
),
Parameter("split-cardinality").value(
"{{tasks.%s.outputs.parameters.split-cardinality}}"
% self._sanitize(node.name)
),
]
if not node.parallel_foreach
else [
Parameter("num-parallel").value(
"{{tasks.%s.outputs.parameters.num-parallel}}"
% self._sanitize(node.name)
),
Parameter("task-id-entropy").value(
"{{tasks.%s.outputs.parameters.task-id-entropy}}"
% self._sanitize(node.name)
),
]
)
+ (
[
Parameter("split-index").value(
# TODO : Pass down these parameters to the jobset stuff.
"{{inputs.parameters.split-index}}"
),
Parameter("root-input-path").value(
"{{inputs.parameters.input-paths}}"
),
]
if parent_foreach
else []
)
)
)
)
dag_tasks.append(join_foreach_task)
return _visit(
self.graph[self.graph[node.matching_join].out_funcs[0]],
exit_node,
templates,
dag_tasks,
parent_foreach,
seen,
)
# For linear nodes continue traversing to the next node
if node.type in ("linear", "join", "start"):
return _visit(
self.graph[node.out_funcs[0]],
exit_node,
templates,
dag_tasks,
parent_foreach,
seen,
)
else:
raise ArgoWorkflowsException(
"Node type *%s* for step *%s* is not currently supported by "
"Argo Workflows." % (node.type, node.name)
)
# Generate daemon tasks
daemon_tasks = [
DAGTask("%s-task" % daemon_template.name).template(daemon_template.name)
for daemon_template in self._daemon_templates()
]
templates, dag_tasks = _visit(node=self.graph["start"], dag_tasks=daemon_tasks)
# Add the DAG template only after fully traversing the graph so we are guaranteed to have all the dag_tasks collected.
templates.append(
Template(self.flow.name).dag(DAGTemplate().fail_fast().tasks(dag_tasks))
)
return templates
# Visit every node and yield ContainerTemplates.
def _container_templates(self):
try:
# Kubernetes is a soft dependency for generating Argo objects.
# We can very well remove this dependency for Argo with the downside of
# adding a bunch more json bloat classes (looking at you... V1Container)
from kubernetes import client as kubernetes_sdk
except (NameError, ImportError):
raise MetaflowException(
"Could not import Python package 'kubernetes'. Install kubernetes "
"sdk (https://pypi.org/project/kubernetes/) first."
)
for node in self.graph:
# Resolve entry point for pod container.
script_name = os.path.basename(sys.argv[0])
executable = self.environment.executable(node.name)
# TODO: Support R someday. Quite a few people will be happy.
entrypoint = [executable, script_name]
# The values with curly braces '{{}}' are made available by Argo
# Workflows. Unfortunately, there are a few bugs in Argo which prevent
# us from accessing these values as liberally as we would like to - e.g,
# within inline templates - so we are forced to generate container templates
run_id = "argo-{{workflow.name}}"
# Unfortunately, we don't have any easy access to unique ids that remain
# stable across task attempts through Argo Workflows. So, we are forced to
# stitch them together ourselves. The task ids are a function of step name,
# split index and the parent task id (available from input path name).
# Ideally, we would like these task ids to be the same as node name
# (modulo retry suffix) on Argo Workflows but that doesn't seem feasible
# right now.
task_idx = ""
input_paths = ""
root_input = None
# export input_paths as it is used multiple times in the container script
# and we do not want to repeat the values.
input_paths_expr = "export INPUT_PATHS=''"
# If node is not a start step or a @parallel join then we will set the input paths.
# To set the input-paths as a parameter, we need to ensure that the node
# is not (a start node or a parallel join node). Start nodes will have no
# input paths and parallel join will derive input paths based on a
# formulaic approach using `num-parallel` and `task-id-entropy`.
if not (
node.name == "start"
or (node.type == "join" and self.graph[node.in_funcs[0]].parallel_step)
):
# For parallel joins we don't pass the INPUT_PATHS but are dynamically constructed.
# So we don't need to set the input paths.
input_paths_expr = (
"export INPUT_PATHS={{inputs.parameters.input-paths}}"
)
if (
(
self._is_conditional_join_node(node)
or self._many_in_funcs_all_conditional(node)
or self._is_conditional_skip_node(node)
)
and not (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
) # base64 encoding input-paths for foreach joins is unnecessary, as this is simply the task id of the splitting step.
and not (
node.is_inside_foreach
and self.graph[node.out_funcs[0]].type == "join"
) # do not base64 encode the input-paths of a step inside a foreach that leads to a join, as this would not match the task-id generation logic that the join relies on.
):
# NOTE: Argo template expressions that fail to resolve, output the expression itself as a value.
# With conditional steps, some of the input-paths are therefore 'broken' due to containing a nil expression
# e.g. "{{ tasks['A'].outputs.parameters.task-id }}" when task A never executed.
# We base64 encode the input-paths in order to not pollute the execution environment with templating expressions.
# NOTE: Adding conditionals that check if a key exists or not does not work either, due to an issue with how Argo
# handles tasks in a nested foreach (withParam template) leading to all such expressions getting evaluated as false.
input_paths_expr = "export INPUT_PATHS={{=toBase64(inputs.parameters['input-paths'])}}"
input_paths = "$(echo $INPUT_PATHS)"
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
task_idx = "{{inputs.parameters.split-index}}"
if node.is_inside_foreach and self.graph[node.out_funcs[0]].type == "join":
if any(
self.graph[parent].matching_join
== self.graph[node.out_funcs[0]].name
for parent in self.graph[node.out_funcs[0]].split_parents
if self.graph[parent].type == "foreach"
) and any(not self.graph[f].type == "foreach" for f in node.in_funcs):
# we need to propagate the split-index and root-input-path info for
# the last step inside a foreach for correctly joining nested
# foreaches
task_idx = "{{inputs.parameters.split-index}}"
root_input = "{{inputs.parameters.root-input-path}}"
# Task string to be hashed into an ID
task_str = "-".join(
[
node.name,
"{{workflow.creationTimestamp}}",
root_input or input_paths,
task_idx,
]
)
if node.parallel_step:
task_str = "-".join(
[
"$TASK_ID_PREFIX",
"{{inputs.parameters.task-id-entropy}}",
"$TASK_ID_SUFFIX",
]
)
else:
# Generated task_ids need to be non-numeric - see register_task_id in
# service.py. We do so by prefixing `t-`
_task_id_base = (
"$(echo %s | md5sum | cut -d ' ' -f 1 | tail -c 9)" % task_str
)
task_str = "(t-%s)" % _task_id_base
task_id_expr = "export METAFLOW_TASK_ID=" "%s" % task_str
task_id = "$METAFLOW_TASK_ID"
# Resolve retry strategy.
max_user_code_retries = 0
max_error_retries = 0
minutes_between_retries = "2"
for decorator in node.decorators:
if decorator.name == "retry":
minutes_between_retries = decorator.attributes.get(
"minutes_between_retries", minutes_between_retries
)
user_code_retries, error_retries = decorator.step_task_retry_count()
max_user_code_retries = max(max_user_code_retries, user_code_retries)
max_error_retries = max(max_error_retries, error_retries)
user_code_retries = max_user_code_retries
total_retries = max_user_code_retries + max_error_retries
# {{retries}} is only available if retryStrategy is specified
# For custom kubernetes manifests, we will pass the retryCount as a parameter
# and use that in the manifest.
retry_count = (
(
"{{retries}}"
if not node.parallel_step
else "{{inputs.parameters.retryCount}}"
)
if total_retries
else 0
)
minutes_between_retries = int(minutes_between_retries)
# Configure log capture.
mflog_expr = export_mflog_env_vars(
datastore_type=self.flow_datastore.TYPE,
stdout_path="$PWD/.logs/mflog_stdout",
stderr_path="$PWD/.logs/mflog_stderr",
flow_name=self.flow.name,
run_id=run_id,
step_name=node.name,
task_id=task_id,
retry_count=retry_count,
)
init_cmds = " && ".join(
[
# For supporting sandboxes, ensure that a custom script is executed
# before anything else is executed. The script is passed in as an
# env var.
'${METAFLOW_INIT_SCRIPT:+eval \\"${METAFLOW_INIT_SCRIPT}\\"}',
"mkdir -p $PWD/.logs",
input_paths_expr,
task_id_expr,
mflog_expr,
]
+ self.environment.get_package_commands(
self.code_package_url,
self.flow_datastore.TYPE,
self.code_package_metadata,
)
)
step_cmds = self.environment.bootstrap_commands(
node.name, self.flow_datastore.TYPE
)
top_opts_dict = {
"with": [
decorator.make_decorator_spec()
for decorator in node.decorators
if not decorator.statically_defined
and decorator.inserted_by is None
]
}
# FlowDecorators can define their own top-level options. They are
# responsible for adding their own top-level options and values through
# the get_top_level_options() hook. See similar logic in runtime.py.
for deco in flow_decorators(self.flow):
top_opts_dict.update(deco.get_top_level_options())
top_level = list(dict_to_cli_options(top_opts_dict)) + [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=%s" % self.flow_datastore.TYPE,
"--datastore-root=%s" % self.flow_datastore.datastore_root,
"--event-logger=%s" % self.event_logger.TYPE,
"--monitor=%s" % self.monitor.TYPE,
"--no-pylint",
"--with=argo_workflows_internal:auto-emit-argo-events=%i"
% self.auto_emit_argo_events,
]
if node.name == "start":
# Execute `init` before any step of the workflow executes
task_id_params = "%s-params" % task_id
init = (
entrypoint
+ top_level
+ [
"init",
"--run-id %s" % run_id,
"--task-id %s" % task_id_params,
]
+ [
# Parameter names can be hyphenated, hence we use
# {{foo.bar['param_name']}}.
# https://argoproj.github.io/argo-events/tutorials/02-parameterization/
# http://masterminds.github.io/sprig/strings.html
"--%s=\\\"$(python -m metaflow.plugins.argo.param_val {{=toBase64(workflow.parameters['%s'])}})\\\""
% (parameter["name"], parameter["name"])
for parameter in self.parameters.values()
]
)
if self.tags:
init.extend("--tag %s" % tag for tag in self.tags)
# if the start step gets retried, we must be careful
# not to regenerate multiple parameters tasks. Hence,
# we check first if _parameters exists already.
exists = entrypoint + [
"dump",
"--max-value-size=0",
"%s/_parameters/%s" % (run_id, task_id_params),
]
step_cmds.extend(
[
"if ! %s >/dev/null 2>/dev/null; then %s; fi"
% (" ".join(exists), " ".join(init))
]
)
input_paths = "%s/_parameters/%s" % (run_id, task_id_params)
# Only for static joins and conditional_joins
elif (
self._is_conditional_join_node(node)
or self._many_in_funcs_all_conditional(node)
or self._is_conditional_skip_node(node)
) and not (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
# we need to pass in the set of conditional in_funcs to the pathspec generating script as in the case of split-switch skipping cases,
# non-conditional input-paths need to be ignored in favour of conditional ones when they have executed.
skippable_input_steps = ",".join(
[
in_func
for in_func in node.in_funcs
if self.graph[in_func].type == "split-switch"
]
)
input_paths = (
"$(python -m metaflow.plugins.argo.conditional_input_paths %s %s)"
% (input_paths, skippable_input_steps)
)
elif (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
# foreach-joins straight out of conditional branches are not yet supported
if self._is_conditional_join_node(node) and len(node.in_funcs) > 1:
raise ArgoWorkflowsException(
"Conditional steps inside a foreach that transition directly into a join step are not currently supported.\n"
"As a workaround, add a common step after the conditional steps %s "
"that will transition to a join."
% ", ".join("*%s*" % f for f in node.in_funcs)
)
# Set aggregated input-paths for a for-each join
foreach_step = next(
n for n in node.in_funcs if self.graph[n].is_inside_foreach
)
if not self.graph[node.split_parents[-1]].parallel_foreach:
input_paths = (
"$(python -m metaflow.plugins.argo.generate_input_paths %s {{workflow.creationTimestamp}} %s {{inputs.parameters.split-cardinality}})"
% (
foreach_step,
input_paths,
)
)
else:
# Handle @parallel where output from volume mount isn't accessible
input_paths = (
"$(python -m metaflow.plugins.argo.jobset_input_paths %s %s {{inputs.parameters.task-id-entropy}} {{inputs.parameters.num-parallel}})"
% (
run_id,
foreach_step,
)
)
# NOTE: input-paths might be extremely lengthy so we dump these to disk instead of passing them directly to the cmd
step_cmds.append("echo %s >> /tmp/mf-input-paths" % input_paths)
step = [
"step",
node.name,
"--run-id %s" % run_id,
"--task-id %s" % task_id,
"--retry-count %s" % retry_count,
"--max-user-code-retries %d" % user_code_retries,
"--input-paths-filename /tmp/mf-input-paths",
]
if node.parallel_step:
step.append(
"--split-index ${MF_CONTROL_INDEX:-$((MF_WORKER_REPLICA_INDEX + 1))}"
)
# This is needed for setting the value of the UBF context in the CLI.
step.append("--ubf-context $UBF_CONTEXT")
elif any(self.graph[n].type == "foreach" for n in node.in_funcs):
# Pass split-index to a foreach task
step.append("--split-index {{inputs.parameters.split-index}}")
if self.tags:
step.extend("--tag %s" % tag for tag in self.tags)
if self.namespace is not None:
step.append("--namespace=%s" % self.namespace)
step_cmds.extend([" ".join(entrypoint + top_level + step)])
cmd_str = "%s; c=$?; %s; exit $c" % (
" && ".join([init_cmds, bash_capture_logs(" && ".join(step_cmds))]),
BASH_SAVE_LOGS,
)
cmds = shlex.split('bash -c "%s"' % cmd_str)
# Resolve resource requirements.
resources = dict(
[deco for deco in node.decorators if deco.name == "kubernetes"][
0
].attributes
)
if (
resources["namespace"]
and resources["namespace"] != KUBERNETES_NAMESPACE
):
raise ArgoWorkflowsException(
"Multi-namespace Kubernetes execution of flows in Argo Workflows "
"is not currently supported. \nStep *%s* is trying to override "
"the default Kubernetes namespace *%s*."
% (node.name, KUBERNETES_NAMESPACE)
)
run_time_limit = [
deco for deco in node.decorators if deco.name == "kubernetes"
][0].run_time_limit
# Resolve @environment decorator. We set three classes of environment
# variables -
# (1) User-specified environment variables through @environment
# (2) Metaflow runtime specific environment variables
# (3) @kubernetes, @argo_workflows_internal bookkeeping environment
# variables
env = dict(
[deco for deco in node.decorators if deco.name == "environment"][
0
].attributes["vars"]
)
# Temporary passing of *some* environment variables. Do not rely on this
# mechanism as it will be removed in the near future
env.update(
{
k: v
for k, v in config_values()
if k.startswith("METAFLOW_CONDA_")
or k.startswith("METAFLOW_DEBUG_")
}
)
env.update(
{
**{
# These values are needed by Metaflow to set it's internal
# state appropriately.
"METAFLOW_CODE_METADATA": self.code_package_metadata,
"METAFLOW_CODE_URL": self.code_package_url,
"METAFLOW_CODE_SHA": self.code_package_sha,
"METAFLOW_CODE_DS": self.flow_datastore.TYPE,
"METAFLOW_SERVICE_URL": SERVICE_INTERNAL_URL,
"METAFLOW_SERVICE_HEADERS": json.dumps(SERVICE_HEADERS),
"METAFLOW_USER": "argo-workflows",
"METAFLOW_DATASTORE_SYSROOT_S3": DATASTORE_SYSROOT_S3,
"METAFLOW_DATATOOLS_S3ROOT": DATATOOLS_S3ROOT,
"METAFLOW_DEFAULT_DATASTORE": self.flow_datastore.TYPE,
"METAFLOW_DEFAULT_METADATA": DEFAULT_METADATA,
"METAFLOW_CARD_S3ROOT": CARD_S3ROOT,
"METAFLOW_KUBERNETES_WORKLOAD": 1,
"METAFLOW_KUBERNETES_FETCH_EC2_METADATA": KUBERNETES_FETCH_EC2_METADATA,
"METAFLOW_RUNTIME_ENVIRONMENT": "kubernetes",
"METAFLOW_OWNER": self.username,
},
**{
# Configuration for Argo Events. Keep these in sync with the
# environment variables for @kubernetes decorator.
"METAFLOW_ARGO_EVENTS_EVENT": ARGO_EVENTS_EVENT,
"METAFLOW_ARGO_EVENTS_EVENT_BUS": ARGO_EVENTS_EVENT_BUS,
"METAFLOW_ARGO_EVENTS_EVENT_SOURCE": ARGO_EVENTS_EVENT_SOURCE,
"METAFLOW_ARGO_EVENTS_SERVICE_ACCOUNT": ARGO_EVENTS_SERVICE_ACCOUNT,
"METAFLOW_ARGO_EVENTS_WEBHOOK_URL": ARGO_EVENTS_INTERNAL_WEBHOOK_URL,
"METAFLOW_ARGO_EVENTS_WEBHOOK_AUTH": ARGO_EVENTS_WEBHOOK_AUTH,
},
**{
# Some optional values for bookkeeping
"METAFLOW_FLOW_FILENAME": os.path.basename(sys.argv[0]),
"METAFLOW_FLOW_NAME": self.flow.name,
"METAFLOW_STEP_NAME": node.name,
"METAFLOW_RUN_ID": run_id,
# "METAFLOW_TASK_ID": task_id,
"METAFLOW_RETRY_COUNT": retry_count,
"METAFLOW_PRODUCTION_TOKEN": self.production_token,
"ARGO_WORKFLOW_TEMPLATE": self.name,
"ARGO_WORKFLOW_NAME": "{{workflow.name}}",
"ARGO_WORKFLOW_NAMESPACE": KUBERNETES_NAMESPACE,
},
**self.metadata.get_runtime_environment("argo-workflows"),
}
)
# add METAFLOW_S3_ENDPOINT_URL
env["METAFLOW_S3_ENDPOINT_URL"] = S3_ENDPOINT_URL
# support Metaflow sandboxes
env["METAFLOW_INIT_SCRIPT"] = KUBERNETES_SANDBOX_INIT_SCRIPT
env["METAFLOW_KUBERNETES_SANDBOX_INIT_SCRIPT"] = (
KUBERNETES_SANDBOX_INIT_SCRIPT
)
# support for @secret
env["METAFLOW_DEFAULT_SECRETS_BACKEND_TYPE"] = DEFAULT_SECRETS_BACKEND_TYPE
env["METAFLOW_AWS_SECRETS_MANAGER_DEFAULT_REGION"] = (
AWS_SECRETS_MANAGER_DEFAULT_REGION
)
env["METAFLOW_GCP_SECRET_MANAGER_PREFIX"] = GCP_SECRET_MANAGER_PREFIX
env["METAFLOW_AZURE_KEY_VAULT_PREFIX"] = AZURE_KEY_VAULT_PREFIX
# support for Azure
env["METAFLOW_AZURE_STORAGE_BLOB_SERVICE_ENDPOINT"] = (
AZURE_STORAGE_BLOB_SERVICE_ENDPOINT
)
env["METAFLOW_DATASTORE_SYSROOT_AZURE"] = DATASTORE_SYSROOT_AZURE
env["METAFLOW_CARD_AZUREROOT"] = CARD_AZUREROOT
env["METAFLOW_ARGO_WORKFLOWS_KUBERNETES_SECRETS"] = (
ARGO_WORKFLOWS_KUBERNETES_SECRETS
)
env["METAFLOW_ARGO_WORKFLOWS_ENV_VARS_TO_SKIP"] = (
ARGO_WORKFLOWS_ENV_VARS_TO_SKIP
)
# support for GCP
env["METAFLOW_DATASTORE_SYSROOT_GS"] = DATASTORE_SYSROOT_GS
env["METAFLOW_CARD_GSROOT"] = CARD_GSROOT
# Map Argo Events payload (if any) to environment variables
if self.triggers:
for event in self.triggers:
env[
"METAFLOW_ARGO_EVENT_PAYLOAD_%s_%s"
% (event["type"], event["sanitized_name"])
] = ("{{workflow.parameters.%s}}" % event["sanitized_name"])
# Map S3 upload headers to environment variables
if S3_SERVER_SIDE_ENCRYPTION is not None:
env["METAFLOW_S3_SERVER_SIDE_ENCRYPTION"] = S3_SERVER_SIDE_ENCRYPTION
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
env["METAFLOW_VERSION"] = json.dumps(metaflow_version)
# map config values
cfg_env = {
param["name"]: param["kv_name"] for param in self.config_parameters
}
if cfg_env:
env["METAFLOW_FLOW_CONFIG_VALUE"] = json.dumps(cfg_env)
# Set the template inputs and outputs for passing state. Very simply,
# the container template takes in input-paths as input and outputs
# the task-id (which feeds in as input-paths to the subsequent task).
# In addition to that, if the parent of the node under consideration
# is a for-each node, then we take the split-index as an additional
# input. Analogously, if the node under consideration is a foreach
# node, then we emit split cardinality as an extra output. I would like
# to thank the designers of Argo Workflows for making this so
# straightforward! Things become a bit more complicated to support very
# wide foreaches where we have to resort to passing a root-input-path
# so that we can compute the task ids for each parent task of a for-each
# join task deterministically inside the join task without resorting to
# passing a rather long list of (albiet compressed)
inputs = []
# To set the input-paths as a parameter, we need to ensure that the node
# is not (a start node or a parallel join node). Start nodes will have no
# input paths and parallel join will derive input paths based on a
# formulaic approach.
if not (
node.name == "start"
or (node.type == "join" and self.graph[node.in_funcs[0]].parallel_step)
):
inputs.append(Parameter("input-paths"))
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
# Fetch split-index from parent
inputs.append(Parameter("split-index"))
if (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
# @parallel join tasks require `num-parallel` and `task-id-entropy`
# to construct the input paths, so we pass them down as input parameters.
if self.graph[node.split_parents[-1]].parallel_foreach:
inputs.extend(
[Parameter("num-parallel"), Parameter("task-id-entropy")]
)
else:
# append these only for joins of foreaches, not static splits
inputs.append(Parameter("split-cardinality"))
# check if the node is a @parallel node.
elif node.parallel_step:
inputs.extend(
[
Parameter("num-parallel"),
Parameter("task-id-entropy"),
Parameter("jobset-name"),
Parameter("workerCount"),
]
)
# {{retries}} is only available if retryStrategy is specified in the template.
# Only add retryCount input parameter if total_retries > 0.
if total_retries > 0:
inputs.append(Parameter("retryCount"))
if node.is_inside_foreach and self.graph[node.out_funcs[0]].type == "join":
if any(
self.graph[parent].matching_join
== self.graph[node.out_funcs[0]].name
for parent in self.graph[node.out_funcs[0]].split_parents
if self.graph[parent].type == "foreach"
) and any(not self.graph[f].type == "foreach" for f in node.in_funcs):
# we need to propagate the split-index and root-input-path info for
# the last step inside a foreach for correctly joining nested
# foreaches
if not any(self.graph[n].type == "foreach" for n in node.in_funcs):
# Don't add duplicate split index parameters.
inputs.append(Parameter("split-index"))
inputs.append(Parameter("root-input-path"))
outputs = []
# @parallel steps will not have a task-id as an output parameter since task-ids
# are derived at runtime.
if not (node.name == "end" or node.parallel_step):
outputs = [Parameter("task-id").valueFrom({"path": "/mnt/out/task_id"})]
# If this step is a split-switch one, we need to output the switch step name
if node.type == "split-switch":
outputs.append(
Parameter("switch-step").valueFrom({"path": "/mnt/out/switch_step"})
)
if node.type == "foreach":
# Emit split cardinality from foreach task
outputs.append(
Parameter("num-splits").valueFrom({"path": "/mnt/out/splits"})
)
outputs.append(
Parameter("split-cardinality").valueFrom(
{"path": "/mnt/out/split_cardinality"}
)
)
if node.parallel_foreach:
outputs.extend(
[
Parameter("num-parallel").valueFrom(
{"path": "/mnt/out/num_parallel"}
),
Parameter("task-id-entropy").valueFrom(
{"path": "/mnt/out/task_id_entropy"}
),
]
)
# Outputs should be defined over here and not in the _dag_template for @parallel.
# It makes no sense to set env vars to None (shows up as "None" string)
# Also we skip some env vars (e.g. in case we want to pull them from KUBERNETES_SECRETS)
env = {
k: v
for k, v in env.items()
if v is not None
and k not in set(ARGO_WORKFLOWS_ENV_VARS_TO_SKIP.split(","))
}
# Tmpfs variables
use_tmpfs = resources["use_tmpfs"]
tmpfs_size = resources["tmpfs_size"]
tmpfs_path = resources["tmpfs_path"]
tmpfs_tempdir = resources["tmpfs_tempdir"]
# Set shared_memory to 0 if it isn't specified. This results
# in Kubernetes using it's default value when the pod is created.
shared_memory = resources.get("shared_memory", 0)
port = resources.get("port", None)
if port:
port = int(port)
tmpfs_enabled = use_tmpfs or (tmpfs_size and not use_tmpfs)
if tmpfs_enabled and tmpfs_tempdir:
env["METAFLOW_TEMPDIR"] = tmpfs_path
qos_requests, qos_limits = qos_requests_and_limits(
resources["qos"],
resources["cpu"],
resources["memory"],
resources["disk"],
)
security_context = resources.get("security_context", None)
_security_context = {}
if security_context is not None and len(security_context) > 0:
_security_context = {
"security_context": kubernetes_sdk.V1SecurityContext(
**security_context
)
}
# Create a ContainerTemplate for this node. Ideally, we would have
# liked to inline this ContainerTemplate and avoid scanning the workflow
# twice, but due to issues with variable substitution, we will have to
# live with this routine.
if node.parallel_step:
jobset_name = "{{inputs.parameters.jobset-name}}"
jobset = KubernetesArgoJobSet(
kubernetes_sdk=kubernetes_sdk,
name=jobset_name,
flow_name=self.flow.name,
run_id=run_id,
step_name=self._sanitize(node.name),
task_id=task_id,
attempt=retry_count,
user=self.username,
subdomain=jobset_name,
command=cmds,
namespace=resources["namespace"],
image=resources["image"],
image_pull_policy=resources["image_pull_policy"],
image_pull_secrets=resources["image_pull_secrets"],
service_account=resources["service_account"],
secrets=(
[
k
for k in (
list(
[]
if not resources.get("secrets")
else (
[resources.get("secrets")]
if isinstance(resources.get("secrets"), str)
else resources.get("secrets")
)
)
+ KUBERNETES_SECRETS.split(",")
+ ARGO_WORKFLOWS_KUBERNETES_SECRETS.split(",")
)
if k
]
),
node_selector=resources.get("node_selector"),
cpu=str(resources["cpu"]),
memory=str(resources["memory"]),
disk=str(resources["disk"]),
gpu=resources["gpu"],
gpu_vendor=str(resources["gpu_vendor"]),
tolerations=resources["tolerations"],
use_tmpfs=use_tmpfs,
tmpfs_tempdir=tmpfs_tempdir,
tmpfs_size=tmpfs_size,
tmpfs_path=tmpfs_path,
timeout_in_seconds=run_time_limit,
persistent_volume_claims=resources["persistent_volume_claims"],
shared_memory=shared_memory,
port=port,
qos=resources["qos"],
security_context=security_context,
)
for k, v in env.items():
jobset.environment_variable(k, v)
# Set labels. Do not allow user-specified task labels to override internal ones.
#
# Explicitly add the task-id-hint label. This is important because this label
# is returned as an Output parameter of this step and is used subsequently as an
# an input in the join step.
kubernetes_labels = {
"task_id_entropy": "{{inputs.parameters.task-id-entropy}}",
"num_parallel": "{{inputs.parameters.num-parallel}}",
"metaflow/argo-workflows-name": "{{workflow.name}}",
"workflows.argoproj.io/workflow": "{{workflow.name}}",
}
jobset.labels(
{
**resources["labels"],
**self._base_labels,
**kubernetes_labels,
}
)
jobset.environment_variable(
"MF_MASTER_ADDR", jobset.jobset_control_addr
)
jobset.environment_variable("MF_MASTER_PORT", str(port))
jobset.environment_variable(
"MF_WORLD_SIZE", "{{inputs.parameters.num-parallel}}"
)
# We need this task-id set so that all the nodes are aware of the control
# task's task-id. These "MF_" variables populate the `current.parallel` namedtuple
jobset.environment_variable(
"MF_PARALLEL_CONTROL_TASK_ID",
"control-{{inputs.parameters.task-id-entropy}}-0",
)
# for k, v in .items():
jobset.environment_variables_from_selectors(
{
"MF_WORKER_REPLICA_INDEX": "metadata.annotations['jobset.sigs.k8s.io/job-index']",
"JOBSET_RESTART_ATTEMPT": "metadata.annotations['jobset.sigs.k8s.io/restart-attempt']",
"METAFLOW_KUBERNETES_JOBSET_NAME": "metadata.annotations['jobset.sigs.k8s.io/jobset-name']",
"METAFLOW_KUBERNETES_POD_NAMESPACE": "metadata.namespace",
"METAFLOW_KUBERNETES_POD_NAME": "metadata.name",
"METAFLOW_KUBERNETES_POD_ID": "metadata.uid",
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME": "spec.serviceAccountName",
"METAFLOW_KUBERNETES_NODE_IP": "status.hostIP",
"TASK_ID_SUFFIX": "metadata.annotations['jobset.sigs.k8s.io/job-index']",
}
)
# Set annotations. Do not allow user-specified task-specific annotations to override internal ones.
annotations = {
# setting annotations explicitly as they wont be
# passed down from WorkflowTemplate level
"metaflow/step_name": node.name,
"metaflow/attempt": str(retry_count),
"metaflow/run_id": run_id,
}
jobset.annotations(
{
**resources["annotations"],
**self._base_annotations,
**annotations,
}
)
jobset.control.replicas(1)
jobset.worker.replicas("{{=asInt(inputs.parameters.workerCount)}}")
jobset.control.environment_variable("UBF_CONTEXT", UBF_CONTROL)
jobset.worker.environment_variable("UBF_CONTEXT", UBF_TASK)
jobset.control.environment_variable("MF_CONTROL_INDEX", "0")
# `TASK_ID_PREFIX` needs to explicitly be `control` or `worker`
# because the join task uses a formulaic approach to infer the task-ids
jobset.control.environment_variable("TASK_ID_PREFIX", "control")
jobset.worker.environment_variable("TASK_ID_PREFIX", "worker")
yield (
Template(ArgoWorkflows._sanitize(node.name))
.resource(
"create",
jobset.dump(),
"status.terminalState == Completed",
"status.terminalState == Failed",
)
.inputs(Inputs().parameters(inputs))
.outputs(
Outputs().parameters(
[
Parameter("task-id-entropy").valueFrom(
{"jsonPath": "{.metadata.labels.task_id_entropy}"}
),
Parameter("num-parallel").valueFrom(
{"jsonPath": "{.metadata.labels.num_parallel}"}
),
]
)
)
.retry_strategy(
times=total_retries,
minutes_between_retries=minutes_between_retries,
)
)
else:
template_name = self._sanitize(node.name)
if self._is_recursive_node(node):
# The recursive template has the original step name,
# this becomes a template within the recursive ones 'steps'
template_name = self._sanitize("recursive-%s" % node.name)
yield (
Template(template_name)
# Set @timeout values
.active_deadline_seconds(run_time_limit)
# Set service account
.service_account_name(resources["service_account"])
# Configure template input
.inputs(Inputs().parameters(inputs))
# Configure template output
.outputs(Outputs().parameters(outputs))
# Fail fast!
.fail_fast()
# Set @retry/@catch values
.retry_strategy(
times=total_retries,
minutes_between_retries=minutes_between_retries,
)
.metadata(
ObjectMeta()
.annotation("metaflow/step_name", node.name)
# Unfortunately, we can't set the task_id since it is generated
# inside the pod. However, it can be inferred from the annotation
# set by argo-workflows - `workflows.argoproj.io/outputs` - refer
# the field 'task-id' in 'parameters'
# .annotation("metaflow/task_id", ...)
.annotation("metaflow/attempt", retry_count)
.annotations(resources["annotations"])
.labels(resources["labels"])
)
# Set emptyDir volume for state management
.empty_dir_volume("out")
# Set tmpfs emptyDir volume if enabled
.empty_dir_volume(
"tmpfs-ephemeral-volume",
medium="Memory",
size_limit=tmpfs_size if tmpfs_enabled else 0,
)
.empty_dir_volume("dhsm", medium="Memory", size_limit=shared_memory)
.pvc_volumes(resources.get("persistent_volume_claims"))
# Set node selectors
.node_selectors(resources.get("node_selector"))
# Set tolerations
.tolerations(resources.get("tolerations"))
# Set image pull secrets if present. We need to use pod_spec_patch due to Argo not supporting this on a template level.
.pod_spec_patch(
{
"imagePullSecrets": [
{"name": secret}
for secret in resources["image_pull_secrets"]
]
}
if resources["image_pull_secrets"]
else None
)
# Set container
.container(
# TODO: Unify the logic with kubernetes.py
# Important note - Unfortunately, V1Container uses snakecase while
# Argo Workflows uses camel. For most of the attributes, both cases
# are indistinguishable, but unfortunately, not for all - (
# env_from, value_from, etc.) - so we need to handle the conversion
# ourselves using to_camelcase. We need to be vigilant about
# resources attributes in particular where the keys maybe user
# defined.
to_camelcase(
kubernetes_sdk.V1Container(
name=self._sanitize(node.name),
command=cmds,
termination_message_policy="FallbackToLogsOnError",
ports=(
[
kubernetes_sdk.V1ContainerPort(
container_port=port
)
]
if port
else None
),
env=[
kubernetes_sdk.V1EnvVar(name=k, value=str(v))
for k, v in env.items()
]
# Add environment variables for book-keeping.
# https://argoproj.github.io/argo-workflows/fields/#fields_155
+ [
kubernetes_sdk.V1EnvVar(
name=k,
value_from=kubernetes_sdk.V1EnvVarSource(
field_ref=kubernetes_sdk.V1ObjectFieldSelector(
field_path=str(v)
)
),
)
for k, v in {
"METAFLOW_KUBERNETES_NAMESPACE": "metadata.namespace",
"METAFLOW_KUBERNETES_POD_NAMESPACE": "metadata.namespace",
"METAFLOW_KUBERNETES_POD_NAME": "metadata.name",
"METAFLOW_KUBERNETES_POD_ID": "metadata.uid",
"METAFLOW_KUBERNETES_SERVICE_ACCOUNT_NAME": "spec.serviceAccountName",
"METAFLOW_KUBERNETES_NODE_IP": "status.hostIP",
}.items()
],
image=resources["image"],
image_pull_policy=resources["image_pull_policy"],
resources=kubernetes_sdk.V1ResourceRequirements(
requests=qos_requests,
limits={
**qos_limits,
**{
"%s.com/gpu".lower()
% resources["gpu_vendor"]: str(
resources["gpu"]
)
for k in [0]
if resources["gpu"] is not None
},
},
),
# Configure secrets
env_from=[
kubernetes_sdk.V1EnvFromSource(
secret_ref=kubernetes_sdk.V1SecretEnvSource(
name=str(k),
# optional=True
)
)
for k in list(
[]
if not resources.get("secrets")
else (
[resources.get("secrets")]
if isinstance(resources.get("secrets"), str)
else resources.get("secrets")
)
)
+ KUBERNETES_SECRETS.split(",")
+ ARGO_WORKFLOWS_KUBERNETES_SECRETS.split(",")
if k
],
volume_mounts=[
# Assign a volume mount to pass state to the next task.
kubernetes_sdk.V1VolumeMount(
name="out", mount_path="/mnt/out"
)
]
# Support tmpfs.
+ (
[
kubernetes_sdk.V1VolumeMount(
name="tmpfs-ephemeral-volume",
mount_path=tmpfs_path,
)
]
if tmpfs_enabled
else []
)
# Support shared_memory
+ (
[
kubernetes_sdk.V1VolumeMount(
name="dhsm",
mount_path="/dev/shm",
)
]
if shared_memory
else []
)
# Support persistent volume claims.
+ (
[
kubernetes_sdk.V1VolumeMount(
name=claim, mount_path=path
)
for claim, path in resources.get(
"persistent_volume_claims"
).items()
]
if resources.get("persistent_volume_claims")
is not None
else []
),
**_security_context,
).to_dict()
)
)
)
# Return daemon container templates for workflow execution notifications.
def _daemon_templates(self):
templates = []
if self.enable_heartbeat_daemon:
templates.append(self._heartbeat_daemon_template())
return templates
# Return lifecycle hooks for workflow execution notifications.
def _lifecycle_hooks(self):
hooks = []
if self.notify_on_error:
hooks.append(self._slack_error_template())
hooks.append(self._pager_duty_alert_template())
hooks.append(self._incident_io_alert_template())
if self.notify_on_success:
hooks.append(self._slack_success_template())
hooks.append(self._pager_duty_change_template())
hooks.append(self._incident_io_change_template())
exit_hook_decos = self.flow._flow_decorators.get("exit_hook", [])
for deco in exit_hook_decos:
hooks.extend(self._lifecycle_hook_from_deco(deco))
# Clean up None values from templates.
hooks = list(filter(None, hooks))
if hooks:
hooks.append(
ExitHookHack(
url=(
self.notify_slack_webhook_url
or "https://events.pagerduty.com/v2/enqueue"
)
)
)
return hooks
def _lifecycle_hook_from_deco(self, deco):
from kubernetes import client as kubernetes_sdk
start_step = [step for step in self.graph if step.name == "start"][0]
# We want to grab the base image used by the start step, as this is known to be pullable from within the cluster,
# and it might contain the required libraries, allowing us to start up faster.
start_kube_deco = [
deco for deco in start_step.decorators if deco.name == "kubernetes"
][0]
resources = dict(start_kube_deco.attributes)
kube_defaults = dict(start_kube_deco.defaults)
run_id_template = "argo-{{workflow.name}}"
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
env = {
# These values are needed by Metaflow to set it's internal
# state appropriately.
"METAFLOW_CODE_URL": self.code_package_url,
"METAFLOW_CODE_SHA": self.code_package_sha,
"METAFLOW_CODE_DS": self.flow_datastore.TYPE,
"METAFLOW_SERVICE_URL": SERVICE_INTERNAL_URL,
"METAFLOW_SERVICE_HEADERS": json.dumps(SERVICE_HEADERS),
"METAFLOW_USER": "argo-workflows",
"METAFLOW_DEFAULT_DATASTORE": self.flow_datastore.TYPE,
"METAFLOW_DEFAULT_METADATA": DEFAULT_METADATA,
"METAFLOW_OWNER": self.username,
}
# pass on the Run pathspec for script
env["RUN_PATHSPEC"] = f"{self.graph.name}/{run_id_template}"
# support Metaflow sandboxes
env["METAFLOW_INIT_SCRIPT"] = KUBERNETES_SANDBOX_INIT_SCRIPT
env["METAFLOW_WORKFLOW_NAME"] = "{{workflow.name}}"
env["METAFLOW_WORKFLOW_NAMESPACE"] = "{{workflow.namespace}}"
env = {
k: v
for k, v in env.items()
if v is not None
and k not in set(ARGO_WORKFLOWS_ENV_VARS_TO_SKIP.split(","))
}
def _cmd(fn_name):
mflog_expr = export_mflog_env_vars(
datastore_type=self.flow_datastore.TYPE,
stdout_path="$PWD/.logs/mflog_stdout",
stderr_path="$PWD/.logs/mflog_stderr",
flow_name=self.flow.name,
run_id=run_id_template,
step_name=f"_hook_{fn_name}",
task_id="1",
retry_count="0",
)
cmds = " && ".join(
[
# For supporting sandboxes, ensure that a custom script is executed
# before anything else is executed. The script is passed in as an
# env var.
'${METAFLOW_INIT_SCRIPT:+eval \\"${METAFLOW_INIT_SCRIPT}\\"}',
"mkdir -p $PWD/.logs",
mflog_expr,
]
+ self.environment.get_package_commands(
self.code_package_url, self.flow_datastore.TYPE
)[:-1]
# Replace the line 'Task in starting'
+ [f"mflog 'Lifecycle hook {fn_name} is starting.'"]
+ [
f"python -m metaflow.plugins.exit_hook.exit_hook_script {metaflow_version['script']} {fn_name} $RUN_PATHSPEC"
]
)
cmds = shlex.split('bash -c "%s"' % cmds)
return cmds
def _container(cmds):
return to_camelcase(
kubernetes_sdk.V1Container(
name="main",
command=cmds,
image=deco.attributes["options"].get("image", None)
or resources["image"],
env=[
kubernetes_sdk.V1EnvVar(name=k, value=str(v))
for k, v in env.items()
],
env_from=[
kubernetes_sdk.V1EnvFromSource(
secret_ref=kubernetes_sdk.V1SecretEnvSource(
name=str(k),
# optional=True
)
)
for k in list(
[]
if not resources.get("secrets")
else (
[resources.get("secrets")]
if isinstance(resources.get("secrets"), str)
else resources.get("secrets")
)
)
+ KUBERNETES_SECRETS.split(",")
+ ARGO_WORKFLOWS_KUBERNETES_SECRETS.split(",")
if k
],
resources=kubernetes_sdk.V1ResourceRequirements(
requests={
"cpu": str(kube_defaults["cpu"]),
"memory": "%sM" % str(kube_defaults["memory"]),
}
),
).to_dict()
)
# create lifecycle hooks from deco
hooks = []
for success_fn_name in deco.success_hooks:
hook = ContainerHook(
name=f"success-{success_fn_name.replace('_', '-')}",
container=_container(cmds=_cmd(success_fn_name)),
service_account_name=resources["service_account"],
on_success=True,
)
hooks.append(hook)
for error_fn_name in deco.error_hooks:
hook = ContainerHook(
name=f"error-{error_fn_name.replace('_', '-')}",
service_account_name=resources["service_account"],
container=_container(cmds=_cmd(error_fn_name)),
on_error=True,
)
hooks.append(hook)
return hooks
def _exit_hook_templates(self):
templates = []
if self.enable_error_msg_capture:
templates.extend(self._error_msg_capture_hook_templates())
return templates
def _error_msg_capture_hook_templates(self):
from kubernetes import client as kubernetes_sdk
start_step = [step for step in self.graph if step.name == "start"][0]
# We want to grab the base image used by the start step, as this is known to be pullable from within the cluster,
# and it might contain the required libraries, allowing us to start up faster.
resources = dict(
[deco for deco in start_step.decorators if deco.name == "kubernetes"][
0
].attributes
)
run_id_template = "argo-{{workflow.name}}"
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
mflog_expr = export_mflog_env_vars(
datastore_type=self.flow_datastore.TYPE,
stdout_path="$PWD/.logs/mflog_stdout",
stderr_path="$PWD/.logs/mflog_stderr",
flow_name=self.flow.name,
run_id=run_id_template,
step_name="_run_capture_error",
task_id="1",
retry_count="0",
)
cmds = " && ".join(
[
# For supporting sandboxes, ensure that a custom script is executed
# before anything else is executed. The script is passed in as an
# env var.
'${METAFLOW_INIT_SCRIPT:+eval \\"${METAFLOW_INIT_SCRIPT}\\"}',
"mkdir -p $PWD/.logs",
mflog_expr,
]
+ self.environment.get_package_commands(
self.code_package_url,
self.flow_datastore.TYPE,
self.code_package_metadata,
)[:-1]
# Replace the line 'Task in starting'
# FIXME: this can be brittle.
+ ["mflog 'Error capture hook is starting.'"]
+ ["argo_error=$(python -m 'metaflow.plugins.argo.capture_error')"]
+ ["export METAFLOW_ARGO_ERROR=$argo_error"]
+ [
"""python -c 'import json, os; error_obj=os.getenv(\\"METAFLOW_ARGO_ERROR\\");data=json.loads(error_obj); print(data[\\"message\\"])'"""
]
+ [
'if [ -n \\"${METAFLOW_ARGO_WORKFLOWS_CAPTURE_ERROR_SCRIPT}\\" ]; then eval \\"${METAFLOW_ARGO_WORKFLOWS_CAPTURE_ERROR_SCRIPT}\\"; fi'
]
)
# TODO: Also capture the first failed task id
cmds = shlex.split('bash -c "%s"' % cmds)
env = {
# These values are needed by Metaflow to set it's internal
# state appropriately.
"METAFLOW_CODE_METADATA": self.code_package_metadata,
"METAFLOW_CODE_URL": self.code_package_url,
"METAFLOW_CODE_SHA": self.code_package_sha,
"METAFLOW_CODE_DS": self.flow_datastore.TYPE,
"METAFLOW_SERVICE_URL": SERVICE_INTERNAL_URL,
"METAFLOW_SERVICE_HEADERS": json.dumps(SERVICE_HEADERS),
"METAFLOW_USER": "argo-workflows",
"METAFLOW_DEFAULT_DATASTORE": self.flow_datastore.TYPE,
"METAFLOW_DEFAULT_METADATA": DEFAULT_METADATA,
"METAFLOW_OWNER": self.username,
}
# support Metaflow sandboxes
env["METAFLOW_INIT_SCRIPT"] = KUBERNETES_SANDBOX_INIT_SCRIPT
env["METAFLOW_ARGO_WORKFLOWS_CAPTURE_ERROR_SCRIPT"] = (
ARGO_WORKFLOWS_CAPTURE_ERROR_SCRIPT
)
env["METAFLOW_WORKFLOW_NAME"] = "{{workflow.name}}"
env["METAFLOW_WORKFLOW_NAMESPACE"] = "{{workflow.namespace}}"
env["METAFLOW_ARGO_WORKFLOW_FAILURES"] = "{{workflow.failures}}"
env = {
k: v
for k, v in env.items()
if v is not None
and k not in set(ARGO_WORKFLOWS_ENV_VARS_TO_SKIP.split(","))
}
return [
Template("error-msg-capture-hook")
.service_account_name(resources["service_account"])
.container(
to_camelcase(
kubernetes_sdk.V1Container(
name="main",
command=cmds,
image=resources["image"],
env=[
kubernetes_sdk.V1EnvVar(name=k, value=str(v))
for k, v in env.items()
],
env_from=[
kubernetes_sdk.V1EnvFromSource(
secret_ref=kubernetes_sdk.V1SecretEnvSource(
name=str(k),
# optional=True
)
)
for k in list(
[]
if not resources.get("secrets")
else (
[resources.get("secrets")]
if isinstance(resources.get("secrets"), str)
else resources.get("secrets")
)
)
+ KUBERNETES_SECRETS.split(",")
+ ARGO_WORKFLOWS_KUBERNETES_SECRETS.split(",")
if k
],
resources=kubernetes_sdk.V1ResourceRequirements(
# NOTE: base resources for this are kept to a minimum to save on running costs.
# This has an adverse effect on startup time for the daemon, which can be completely
# alleviated by using a base image that has the required dependencies pre-installed
requests={
"cpu": "200m",
"memory": "100Mi",
},
limits={
"cpu": "200m",
"memory": "500Mi",
},
),
).to_dict()
)
),
Template("capture-error-hook-fn-preflight").steps(
[
WorkflowStep()
.name("capture-error-hook-fn-preflight")
.template("error-msg-capture-hook")
.when("{{workflow.status}} != Succeeded")
]
),
]
def _pager_duty_alert_template(self):
# https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgx-send-an-alert-event
if self.notify_pager_duty_integration_key is None:
return None
return HttpExitHook(
name="notify-pager-duty-on-error",
method="POST",
url="https://events.pagerduty.com/v2/enqueue",
headers={"Content-Type": "application/json"},
body=json.dumps(
{
"event_action": "trigger",
"routing_key": self.notify_pager_duty_integration_key,
# "dedup_key": self.flow.name, # TODO: Do we need deduplication?
"payload": {
"source": "{{workflow.name}}",
"severity": "info",
"summary": "Metaflow run %s/argo-{{workflow.name}} failed!"
% self.flow.name,
"custom_details": {
"Flow": self.flow.name,
"Run ID": "argo-{{workflow.name}}",
},
},
"links": self._pager_duty_notification_links(),
}
),
on_error=True,
)
def _incident_io_alert_template(self):
if self.notify_incident_io_api_key is None:
return None
if self.incident_io_alert_source_config_id is None:
raise MetaflowException(
"Creating alerts for errors requires a alert source config ID."
)
ui_links = self._incident_io_ui_urls_for_run()
return HttpExitHook(
name="notify-incident-io-on-error",
method="POST",
url=(
"https://api.incident.io/v2/alert_events/http/%s"
% self.incident_io_alert_source_config_id
),
headers={
"Content-Type": "application/json",
"Authorization": "Bearer %s" % self.notify_incident_io_api_key,
},
body=json.dumps(
{
"idempotency_key": "argo-{{workflow.name}}", # use run id to deduplicate alerts.
"status": "firing",
"title": "Flow %s has failed." % self.flow.name,
"description": "Metaflow run {run_pathspec} failed!{urls}".format(
run_pathspec="%s/argo-{{workflow.name}}" % self.flow.name,
urls=(
"\n\nSee details for the run at:\n\n"
+ "\n\n".join(ui_links)
if ui_links
else ""
),
),
"source_url": (
"%s/%s/%s"
% (
UI_URL.rstrip("/"),
self.flow.name,
"argo-{{workflow.name}}",
)
if UI_URL
else None
),
"metadata": {
**(self.incident_io_metadata or {}),
**{
"run_status": "failed",
"flow_name": self.flow.name,
"run_id": "argo-{{workflow.name}}",
},
},
}
),
on_error=True,
)
def _incident_io_change_template(self):
if self.notify_incident_io_api_key is None:
return None
if self.incident_io_alert_source_config_id is None:
raise MetaflowException(
"Creating alerts for successes requires an alert source config ID."
)
ui_links = self._incident_io_ui_urls_for_run()
return HttpExitHook(
name="notify-incident-io-on-success",
method="POST",
url=(
"https://api.incident.io/v2/alert_events/http/%s"
% self.incident_io_alert_source_config_id
),
headers={
"Content-Type": "application/json",
"Authorization": "Bearer %s" % self.notify_incident_io_api_key,
},
body=json.dumps(
{
"idempotency_key": "argo-{{workflow.name}}", # use run id to deduplicate alerts.
"status": "firing",
"title": "Flow %s has succeeded." % self.flow.name,
"description": "Metaflow run {run_pathspec} succeeded!{urls}".format(
run_pathspec="%s/argo-{{workflow.name}}" % self.flow.name,
urls=(
"\n\nSee details for the run at:\n\n"
+ "\n\n".join(ui_links)
if ui_links
else ""
),
),
"source_url": (
"%s/%s/%s"
% (
UI_URL.rstrip("/"),
self.flow.name,
"argo-{{workflow.name}}",
)
if UI_URL
else None
),
"metadata": {
**(self.incident_io_metadata or {}),
**{
"run_status": "succeeded",
"flow_name": self.flow.name,
"run_id": "argo-{{workflow.name}}",
},
},
}
),
on_success=True,
)
def _incident_io_ui_urls_for_run(self):
links = []
if UI_URL:
url = "[Metaflow UI](%s/%s/%s)" % (
UI_URL.rstrip("/"),
self.flow.name,
"argo-{{workflow.name}}",
)
links.append(url)
if ARGO_WORKFLOWS_UI_URL:
url = "[Argo UI](%s/workflows/%s/%s)" % (
ARGO_WORKFLOWS_UI_URL.rstrip("/"),
"{{workflow.namespace}}",
"{{workflow.name}}",
)
links.append(url)
return links
def _pager_duty_change_template(self):
# https://developer.pagerduty.com/docs/ZG9jOjExMDI5NTgy-send-a-change-event
if self.notify_pager_duty_integration_key is None:
return None
return HttpExitHook(
name="notify-pager-duty-on-success",
method="POST",
url="https://events.pagerduty.com/v2/change/enqueue",
headers={"Content-Type": "application/json"},
body=json.dumps(
{
"routing_key": self.notify_pager_duty_integration_key,
"payload": {
"summary": "Metaflow run %s/argo-{{workflow.name}} Succeeded"
% self.flow.name,
"source": "{{workflow.name}}",
"custom_details": {
"Flow": self.flow.name,
"Run ID": "argo-{{workflow.name}}",
},
},
"links": self._pager_duty_notification_links(),
}
),
on_success=True,
)
def _pager_duty_notification_links(self):
links = []
if UI_URL:
links.append(
{
"href": "%s/%s/%s"
% (UI_URL.rstrip("/"), self.flow.name, "argo-{{workflow.name}}"),
"text": "Metaflow UI",
}
)
if ARGO_WORKFLOWS_UI_URL:
links.append(
{
"href": "%s/workflows/%s/%s"
% (
ARGO_WORKFLOWS_UI_URL.rstrip("/"),
"{{workflow.namespace}}",
"{{workflow.name}}",
),
"text": "Argo UI",
}
)
return links
def _get_slack_blocks(self, message):
"""
Use Slack's Block Kit to add general information about the environment and
execution metadata, including a link to the UI and an optional message.
"""
ui_link = "%s/%s/argo-{{workflow.name}}" % (UI_URL.rstrip("/"), self.flow.name)
# fmt: off
if getattr(current, "project_name", None):
# Add @project metadata when available.
environment_details_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Environment details"
},
"fields": [
{
"type": "mrkdwn",
"text": "*Project:* %s" % current.project_name
},
{
"type": "mrkdwn",
"text": "*Project Branch:* %s" % current.branch_name
}
]
}
else:
environment_details_block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Environment details"
}
}
blocks = [
environment_details_block,
{
"type": "context",
"elements": [
{
"type": "mrkdwn",
"text": " :information_source: *<%s>*" % ui_link,
}
],
},
{
"type": "divider"
},
]
if message:
blocks += [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": message
}
}
]
# fmt: on
return blocks
def _slack_error_template(self):
if self.notify_slack_webhook_url is None:
return None
message = (
":rotating_light: _%s/argo-{{workflow.name}}_ failed!" % self.flow.name
)
payload = {"text": message}
if UI_URL:
blocks = self._get_slack_blocks(message)
payload = {"text": message, "blocks": blocks}
return HttpExitHook(
name="notify-slack-on-error",
method="POST",
url=self.notify_slack_webhook_url,
body=json.dumps(payload),
on_error=True,
)
def _slack_success_template(self):
if self.notify_slack_webhook_url is None:
return None
message = (
":white_check_mark: _%s/argo-{{workflow.name}}_ succeeded!" % self.flow.name
)
payload = {"text": message}
if UI_URL:
blocks = self._get_slack_blocks(message)
payload = {"text": message, "blocks": blocks}
return HttpExitHook(
name="notify-slack-on-success",
method="POST",
url=self.notify_slack_webhook_url,
body=json.dumps(payload),
on_success=True,
)
def _heartbeat_daemon_template(self):
# Use all the affordances available to _parameters task
executable = self.environment.executable("_parameters")
run_id = "argo-{{workflow.name}}"
script_name = os.path.basename(sys.argv[0])
entrypoint = [executable, script_name]
# FlowDecorators can define their own top-level options. These might affect run level information
# so it is important to pass these to the heartbeat process as well, as it might be the first task to register a run.
top_opts_dict = {}
for deco in flow_decorators(self.flow):
top_opts_dict.update(deco.get_top_level_options())
top_level = list(dict_to_cli_options(top_opts_dict)) + [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=%s" % self.flow_datastore.TYPE,
"--datastore-root=%s" % self.flow_datastore.datastore_root,
"--event-logger=%s" % self.event_logger.TYPE,
"--monitor=%s" % self.monitor.TYPE,
"--no-pylint",
"--with=argo_workflows_internal:auto-emit-argo-events=%i"
% self.auto_emit_argo_events,
]
heartbeat_cmds = "{entrypoint} {top_level} argo-workflows heartbeat --run_id {run_id} {tags}".format(
entrypoint=" ".join(entrypoint),
top_level=" ".join(top_level) if top_level else "",
run_id=run_id,
tags=" ".join(["--tag %s" % t for t in self.tags]) if self.tags else "",
)
# TODO: we do not really need MFLOG logging for the daemon at the moment, but might be good for the future.
# Consider if we can do without this setup.
# Configure log capture.
mflog_expr = export_mflog_env_vars(
datastore_type=self.flow_datastore.TYPE,
stdout_path="$PWD/.logs/mflog_stdout",
stderr_path="$PWD/.logs/mflog_stderr",
flow_name=self.flow.name,
run_id=run_id,
step_name="_run_heartbeat_daemon",
task_id="1",
retry_count="0",
)
# TODO: Can the init be trimmed down?
# Can we do without get_package_commands fetching the whole code package?
init_cmds = " && ".join(
[
# For supporting sandboxes, ensure that a custom script is executed
# before anything else is executed. The script is passed in as an
# env var.
'${METAFLOW_INIT_SCRIPT:+eval \\"${METAFLOW_INIT_SCRIPT}\\"}',
"mkdir -p $PWD/.logs",
mflog_expr,
]
+ self.environment.get_package_commands(
self.code_package_url,
self.flow_datastore.TYPE,
)[:-1]
# Replace the line 'Task in starting'
# FIXME: this can be brittle.
+ ["mflog 'Heartbeat daemon is starting.'"]
)
cmd_str = " && ".join([init_cmds, heartbeat_cmds])
cmds = shlex.split('bash -c "%s"' % cmd_str)
# Env required for sending heartbeats to the metadata service, nothing extra.
# prod token / runtime info is required to correctly register flow branches
env = {
# These values are needed by Metaflow to set it's internal
# state appropriately.
"METAFLOW_CODE_METADATA": self.code_package_metadata,
"METAFLOW_CODE_URL": self.code_package_url,
"METAFLOW_CODE_SHA": self.code_package_sha,
"METAFLOW_CODE_DS": self.flow_datastore.TYPE,
"METAFLOW_SERVICE_URL": SERVICE_INTERNAL_URL,
"METAFLOW_SERVICE_HEADERS": json.dumps(SERVICE_HEADERS),
"METAFLOW_USER": "argo-workflows",
"METAFLOW_DATASTORE_SYSROOT_S3": DATASTORE_SYSROOT_S3,
"METAFLOW_DATATOOLS_S3ROOT": DATATOOLS_S3ROOT,
"METAFLOW_DEFAULT_DATASTORE": self.flow_datastore.TYPE,
"METAFLOW_DEFAULT_METADATA": DEFAULT_METADATA,
"METAFLOW_CARD_S3ROOT": CARD_S3ROOT,
"METAFLOW_KUBERNETES_WORKLOAD": 1,
"METAFLOW_KUBERNETES_FETCH_EC2_METADATA": KUBERNETES_FETCH_EC2_METADATA,
"METAFLOW_RUNTIME_ENVIRONMENT": "kubernetes",
"METAFLOW_OWNER": self.username,
"METAFLOW_PRODUCTION_TOKEN": self.production_token, # Used in identity resolving. This affects system tags.
}
# support Metaflow sandboxes
env["METAFLOW_INIT_SCRIPT"] = KUBERNETES_SANDBOX_INIT_SCRIPT
# cleanup env values
env = {
k: v
for k, v in env.items()
if v is not None
and k not in set(ARGO_WORKFLOWS_ENV_VARS_TO_SKIP.split(","))
}
# We want to grab the base image used by the start step, as this is known to be pullable from within the cluster,
# and it might contain the required libraries, allowing us to start up faster.
start_step = next(step for step in self.flow if step.name == "start")
resources = dict(
[deco for deco in start_step.decorators if deco.name == "kubernetes"][
0
].attributes
)
from kubernetes import client as kubernetes_sdk
return (
DaemonTemplate("heartbeat-daemon")
# NOTE: Even though a retry strategy does not work for Argo daemon containers,
# this has the side-effect of protecting the exit hooks of the workflow from failing in case the daemon container errors out.
.retry_strategy(10, 1)
.service_account_name(resources["service_account"])
.container(
to_camelcase(
kubernetes_sdk.V1Container(
name="main",
# TODO: Make the image configurable
image=resources["image"],
command=cmds,
env=[
kubernetes_sdk.V1EnvVar(name=k, value=str(v))
for k, v in env.items()
],
env_from=[
kubernetes_sdk.V1EnvFromSource(
secret_ref=kubernetes_sdk.V1SecretEnvSource(
name=str(k),
# optional=True
)
)
for k in list(
[]
if not resources.get("secrets")
else (
[resources.get("secrets")]
if isinstance(resources.get("secrets"), str)
else resources.get("secrets")
)
)
+ KUBERNETES_SECRETS.split(",")
+ ARGO_WORKFLOWS_KUBERNETES_SECRETS.split(",")
if k
],
resources=kubernetes_sdk.V1ResourceRequirements(
# NOTE: base resources for this are kept to a minimum to save on running costs.
# This has an adverse effect on startup time for the daemon, which can be completely
# alleviated by using a base image that has the required dependencies pre-installed
requests={
"cpu": "200m",
"memory": "100Mi",
},
limits={
"cpu": "200m",
"memory": "100Mi",
},
),
)
).to_dict()
)
)
def _compile_sensor(self):
# This method compiles a Metaflow @trigger decorator into Argo Events Sensor.
#
# Event payload is assumed as -
# ----------------------------------------------------------------------
# | name | name of the event |
# | payload | |
# | parameter name... | parameter value |
# | parameter name... | parameter value |
# | parameter name... | parameter value |
# | parameter name... | parameter value |
# ----------------------------------------------------------------------
#
#
#
# At the moment, every event-triggered workflow template has a dedicated
# sensor (which can potentially be a bit wasteful in scenarios with high
# volume of workflows and low volume of events) - introducing a many-to-one
# sensor-to-workflow-template solution is completely in the realm of
# possibilities (modulo consistency and transactional guarantees).
#
# This implementation side-steps the more prominent/popular usage of event
# sensors where the sensor is responsible for submitting the workflow object
# directly. Instead we construct the equivalent behavior of `argo submit
# --from` to reference an already submitted workflow template. This ensures
# that Metaflow generated Kubernetes objects can be easily reasoned about.
#
# At the moment, Metaflow configures for webhook and NATS event sources. If you
# are interested in the HA story for either - please follow this link
# https://argoproj.github.io/argo-events/eventsources/ha/.
#
# There is some potential for confusion between Metaflow concepts and Argo
# Events concepts, particularly for event names. Argo Events EventSource
# define an event name which is different than the Metaflow event name - think
# of Argo Events name as a type of event (conceptually like topics in Kafka)
# while Metaflow event names are a field within the Argo Event.
#
#
# At the moment, there is parity between the labels and annotations for
# workflow templates and sensors - that may or may not be the case in the
# future.
#
# Unfortunately, there doesn't seem to be a way to create a sensor filter
# where one (or more) fields across multiple events have the same value.
# Imagine a scenario where we want to trigger a flow iff both the dependent
# events agree on the same date field. Unfortunately, there isn't any way in
# Argo Events (as of apr'23) to ensure that.
# Nothing to do here - let's short circuit and exit.
if not self.triggers:
return {}
# Ensure proper configuration is available for Argo Events
if ARGO_EVENTS_EVENT is None:
raise ArgoWorkflowsException(
"An Argo Event name hasn't been configured for your deployment yet. "
"Please see this article for more details on event names - "
"https://argoproj.github.io/argo-events/eventsources/naming/. "
"It is very likely that all events for your deployment share the "
"same name. You can configure it by executing "
"`metaflow configure kubernetes` or setting METAFLOW_ARGO_EVENTS_EVENT "
"in your configuration. If in doubt, reach out for support at "
"http://chat.metaflow.org"
)
# Unfortunately argo events requires knowledge of event source today.
# Hopefully, some day this requirement can be removed and events can be truly
# impervious to their source and destination.
if ARGO_EVENTS_EVENT_SOURCE is None:
raise ArgoWorkflowsException(
"An Argo Event Source name hasn't been configured for your deployment "
"yet. Please see this article for more details on event names - "
"https://argoproj.github.io/argo-events/eventsources/naming/. "
"You can configure it by executing `metaflow configure kubernetes` or "
"setting METAFLOW_ARGO_EVENTS_EVENT_SOURCE in your configuration. If "
"in doubt, reach out for support at http://chat.metaflow.org"
)
# Service accounts are a hard requirement since we utilize the
# argoWorkflow trigger for resource sensors today.
if ARGO_EVENTS_SERVICE_ACCOUNT is None:
raise ArgoWorkflowsException(
"An Argo Event service account hasn't been configured for your "
"deployment yet. Please see this article for more details on event "
"names - https://argoproj.github.io/argo-events/service-accounts/. "
"You can configure it by executing `metaflow configure kubernetes` or "
"setting METAFLOW_ARGO_EVENTS_SERVICE_ACCOUNT in your configuration. "
"If in doubt, reach out for support at http://chat.metaflow.org"
)
try:
# Kubernetes is a soft dependency for generating Argo objects.
# We can very well remove this dependency for Argo with the downside of
# adding a bunch more json bloat classes (looking at you... V1Container)
from kubernetes import client as kubernetes_sdk
except (NameError, ImportError):
raise MetaflowException(
"Could not import Python package 'kubernetes'. Install kubernetes "
"sdk (https://pypi.org/project/kubernetes/) first."
)
return (
Sensor()
.metadata(
# Sensor metadata.
ObjectMeta()
.name(ArgoWorkflows._sensor_name(self.name))
.namespace(ARGO_EVENTS_SENSOR_NAMESPACE)
.labels(self._base_labels)
.label("app.kubernetes.io/name", "metaflow-sensor")
.annotations(self._base_annotations)
)
.spec(
SensorSpec().template(
# Sensor template.
SensorTemplate()
.metadata(
ObjectMeta()
.label("app.kubernetes.io/name", "metaflow-sensor")
.label("app.kubernetes.io/part-of", "metaflow")
.annotations(self._base_annotations)
)
.container(
# Run sensor in guaranteed QoS. The sensor isn't doing a lot
# of work so we roll with minimal resource allocation. It is
# likely that in subsequent releases we will agressively lower
# sensor resources to pack more of them on a single node.
to_camelcase(
kubernetes_sdk.V1Container(
name="main",
resources=kubernetes_sdk.V1ResourceRequirements(
requests={
"cpu": "100m",
"memory": "250Mi",
},
limits={
"cpu": "100m",
"memory": "250Mi",
},
),
).to_dict()
)
)
.service_account_name(ARGO_EVENTS_SERVICE_ACCOUNT)
# TODO (savin): Handle bypassing docker image rate limit errors.
)
# Set sensor replica to 1 for now.
# TODO (savin): Allow for multiple replicas for HA.
.replicas(1)
# TODO: Support revision history limit to manage old deployments
# .revision_history_limit(...)
.event_bus_name(ARGO_EVENTS_EVENT_BUS)
# Workflow trigger.
.trigger(
Trigger().template(
TriggerTemplate(self.name)
# Trigger a deployed workflow template
.k8s_trigger(
StandardK8STrigger()
.source(
{
"resource": {
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Workflow",
"metadata": {
"generateName": "%s-" % self.name,
"namespace": KUBERNETES_NAMESPACE,
# Useful to paint the UI
"annotations": {
"metaflow/triggered_by": json.dumps(
[
{
key: trigger.get(key)
for key in ["name", "type"]
}
for trigger in self.triggers
]
)
},
},
"spec": {
"arguments": {
"parameters": [
Parameter(parameter["name"])
.value(parameter["value"])
.to_json()
for parameter in self.parameters.values()
]
# Also consume event data
+ [
Parameter(event["sanitized_name"])
.value(json.dumps(None))
.to_json()
for event in self.triggers
]
},
"workflowTemplateRef": {
"name": self.name,
},
},
}
}
)
.parameters(
[
y
for x in list(
list(
TriggerParameter()
.src(
dependency_name=event["sanitized_name"],
# Technically, we don't need to create
# a payload carry-on and can stuff
# everything within the body.
# NOTE: We need the conditional logic in order to successfully fall back to the default value
# when the event payload does not contain a key for a parameter.
# NOTE: Keys might contain dashes, so use the safer 'get' for fetching the value
data_template='{{ if (hasKey $.Input.body.payload "%s") }}%s{{- else -}}{{ (fail "use-default-instead") }}{{- end -}}'
% (
v,
(
'{{- $pv:=(get $.Input.body.payload "%s") -}}{{ if kindIs "string" $pv }}{{- $pv | toRawJson -}}{{- else -}}{{ $pv | toRawJson | toRawJson }}{{- end -}}'
% v
if self.parameters[
parameter_name
]["type"]
== "JSON"
else '{{- (get $.Input.body.payload "%s" | toRawJson) -}}'
% v
),
),
# Unfortunately the sensor needs to
# record the default values for
# the parameters - there doesn't seem
# to be any way for us to skip
value=self.parameters[parameter_name][
"value"
],
)
.dest(
# this undocumented (mis?)feature in
# argo-events allows us to reference
# parameters by name rather than index
"spec.arguments.parameters.#(name=%s).value"
% parameter_name
)
for parameter_name, v in event.get(
"parameters", {}
).items()
)
for event in self.triggers
)
for y in x
]
+ [
# Map event payload to parameters for current
TriggerParameter()
.src(
dependency_name=event["sanitized_name"],
data_key="body.payload",
value=json.dumps(None),
)
.dest(
"spec.arguments.parameters.#(name=%s).value"
% event["sanitized_name"]
)
for event in self.triggers
]
)
# Reset trigger conditions ever so often by wiping
# away event tracking history on a schedule.
# @trigger(options={"reset_at": {"cron": , "timezone": }})
# timezone is IANA standard, e.g. America/Los_Angeles
# TODO: Introduce "end_of_day", "end_of_hour" ..
).conditions_reset(
cron=self.trigger_options.get("reset_at", {}).get("cron"),
timezone=self.trigger_options.get("reset_at", {}).get(
"timezone"
),
)
)
)
# Event dependencies. As of Mar' 23, Argo Events docs suggest using
# Jetstream event bus rather than NATS streaming bus since the later
# doesn't support multiple combos of the same event name and event
# source name.
.dependencies(
# Event dependencies don't entertain dots
EventDependency(event["sanitized_name"]).event_name(
ARGO_EVENTS_EVENT
)
# TODO: Alternatively fetch this from @trigger config options
.event_source_name(ARGO_EVENTS_EVENT_SOURCE).filters(
# Ensure that event name matches and all required parameter
# fields are present in the payload. There is a possibility of
# dependency on an event where none of the fields are required.
# At the moment, this event is required but the restriction
# can be removed if needed.
EventDependencyFilter().exprs(
[
{
"expr": "name == '%s'" % event["name"],
"fields": [
{"name": "name", "path": "body.payload.name"}
],
}
]
+ [
{
"expr": "true == true", # field name is present
"fields": [
{
"name": "field",
"path": "body.payload.%s" % v,
}
],
}
for parameter_name, v in event.get(
"parameters", {}
).items()
# only for required parameters
if self.parameters[parameter_name]["is_required"]
]
+ [
{
"expr": "field == '%s'" % v, # trigger_on_finish
"fields": [
{
"name": "field",
"path": "body.payload.%s" % filter_key,
}
],
}
for filter_key, v in event.get("filters", {}).items()
if v
]
)
)
for event in self.triggers
)
)
)
def list_to_prose(self, items, singular):
items = ["*%s*" % item for item in items]
item_count = len(items)
plural = singular + "s"
item_type = singular
if item_count == 1:
result = items[0]
elif item_count == 2:
result = "%s and %s" % (items[0], items[1])
item_type = plural
elif item_count > 2:
result = "%s and %s" % (
", ".join(items[0 : item_count - 1]),
items[item_count - 1],
)
item_type = plural
else:
result = ""
if result:
result = "%s %s" % (result, item_type)
return result
# Helper classes to assist with JSON-foo. This can very well replaced with an explicit
# dependency on argo-workflows Python SDK if this method turns out to be painful.
# TODO: Autogenerate them, maybe?
| ArgoWorkflows |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_highlight.py | {
"start": 21409,
"end": 22405
} | class ____(util.MdCase):
"""Test default inline language cases."""
extension = ['pymdownx.highlight', 'pymdownx.superfences', 'pymdownx.inlinehilite']
extension_configs = {
'pymdownx.inlinehilite': {
'style_plain_text': 'python'
}
}
def test_default_inline(self):
"""Test that default language affects block, but not inline code."""
self.check_markdown(
'''
`import code`
import code
```
import code
```
''',
'''
<p><code class="highlight"><span class="kn">import</span><span class="w"> </span><span class="nn">code</span></code></p>
<div class="highlight"><pre><span></span><code>import code
</code></pre></div>
<div class="highlight"><pre><span></span><code>import code
</code></pre></div>
''', # noqa: E501
True
)
| TestDefaultLangInline |
python | fsspec__filesystem_spec | fsspec/implementations/tests/memory/memory_test.py | {
"start": 122,
"end": 199
} | class ____(abstract.AbstractCopyTests, MemoryFixtures):
pass
| TestMemoryCopy |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/workflow_events.py | {
"start": 3014,
"end": 4319
} | class ____(StartEvent):
def __init__(self, **data: Any) -> None:
"""Convert chat_history items to ChatMessage objects if they aren't already"""
if "chat_history" in data and data["chat_history"]:
converted_history = []
for i, msg in enumerate(data["chat_history"]):
if isinstance(msg, ChatMessage):
converted_history.append(msg)
else:
# Convert dict or other formats to ChatMessage with validation
try:
converted_history.append(ChatMessage.model_validate(msg))
except ValidationError as e:
logger.error(
f"Failed to validate chat message at index {i}: {e}. "
f"Invalid message: {msg}"
)
raise
data["chat_history"] = converted_history
super().__init__(**data)
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
"max_iterations": self.max_iterations,
}
| AgentWorkflowStartEvent |
python | google__pytype | pytype/tests/test_recursive_types.py | {
"start": 9237,
"end": 17093
} | class ____(test_base.BaseTest):
"""Tests recursive types defined in pyi files."""
pickle = False
def DepTree(self, deps):
return super().DepTree([d + ({"pickle": self.pickle},) for d in deps])
def test_basic(self):
with self.DepTree([(
"foo.py",
"""
from typing import List
X = List['X']
""",
)]):
self.CheckWithErrors("""
import foo
from typing import Any, Set, List
x: foo.X = None
ok1: foo.X = x
ok2: List[Any] = x
bad1: List[str] = x # annotation-type-mismatch
bad2: Set[Any] = x # annotation-type-mismatch
""")
def test_reingest(self):
with self.DepTree([(
"foo.py",
"""
from typing import List, Union
X = Union[int, List['X']]
""",
)]):
ty = self.Infer("""
import foo
X = foo.X
""")
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List, Union
X = Union[int, List[foo.X]]
""",
)
def test_reingest_and_use(self):
with self.DepTree([(
"foo.py",
"""
from typing import List, Union
X = Union[int, List['X']]
""",
)]):
self.CheckWithErrors("""
import foo
from typing import Any, List, Set, Union
X = foo.X
x_local: X = None
x_imported: foo.X = None
ok1: X = x_local
ok2: Union[int, List[Any]] = x_local
ok3: foo.X = x_local
ok4: X = x_imported
bad1: Union[int, List[int]] = x_local # annotation-type-mismatch
bad2: Union[int, Set[Any]] = x_local # annotation-type-mismatch
""")
def test_reingest_n_times(self):
deps = [(
"foo1.py",
"""
from typing import List
X = List['X']
""",
)]
for i in range(3):
deps.append((
f"foo{i+2}.py",
f"""
import foo{i+1}
X = foo{i+1}.X
""",
))
with self.DepTree(deps):
self.CheckWithErrors("""
import foo2
import foo4
from typing import Any, List, Set
X = foo4.X
# Test local X
x_local: X = None
ok1: X = x_local
bad1: Set[Any] = x_local # annotation-type-mismatch
# Test imported foo4.X
x_foo4: foo4.X = None
ok2: foo4.X = x_foo4
bad2: Set[Any] = x_foo4 # annotation-type-mismatch
# Test interactions
x_foo2: foo2.X = None
ok3: foo2.X = x_local
ok4: foo2.X = x_foo4
ok5: foo4.X = x_local
ok6: foo4.X = x_foo2
ok7: X = x_foo2
ok8: X = x_foo4
""")
def test_mutually_recursive(self):
with self.DepTree([(
"foo.py",
"""
from typing import List
X = List['Y']
Y = List[X]
""",
)]):
self.CheckWithErrors("""
import foo
from typing import Any, List
x: foo.X = None
ok: List[List[Any]] = x
bad: List[List[int]] = x # annotation-type-mismatch
""")
def test_parameterization(self):
foo_src = """
from typing import List, TypeVar, Union
T = TypeVar('T')
X = Union[T, List['X{inner_parameter}']]
Y = X[int]
"""
for inner_parameter in ("", "[T]"):
with self.subTest(inner_parameter=inner_parameter):
with self.DepTree(
[("foo.py", foo_src.format(inner_parameter=inner_parameter))]
):
errors = self.CheckWithErrors("""
import foo
ok1: foo.X[str] = ['']
ok2: foo.Y = [0]
bad1: foo.X[str] = [0] # annotation-type-mismatch
bad2: foo.Y = [''] # annotation-type-mismatch[e]
""")
self.assertErrorRegexes(
errors,
{
"e": (
r"Annotation: Union\[int,"
r" list\[foo.X(\[T\])?\[int\]\]\].*"
r"Assignment: list\[str\]"
)
},
)
def test_parameterize_and_forward(self):
with self.DepTree([
(
"foo.py",
"""
from typing import List, TypeVar, Union
T = TypeVar('T')
X = Union[T, List['X[T]']]
""",
),
(
"bar.py",
"""
import foo
Y = foo.X[str]
""",
),
]):
self.Check("""
import bar
assert_type(bar.Y, "type[Union[list[bar.foo.X[T][str]], str]]")
""")
def test_dataclass(self):
with self.DepTree([(
"foo.py",
"""
import dataclasses
from typing import Dict, List, Optional, Union
X = Union[List[str], 'X']
Y = Dict[str, X]
@dataclasses.dataclass
class Foo:
y: Optional[Y] = None
""",
)]):
self.Check("""
import foo
def f(x: foo.Foo):
pass
""")
def test_import_multiple_aliases(self):
with self.DepTree([
(
"foo.py",
"""
from typing import List, Union, TypeVar
T = TypeVar('T')
X = Union[T, List['X[T]']]
""",
),
(
"bar.py",
"""
import foo
BarX = foo.X
""",
),
(
"baz.py",
"""
import foo
BazX = foo.X
""",
),
]):
self.Check("""
import bar
import baz
# Reference BarX, then BazX, then BarX again to test that we've fixed an
# odd bug where importing an alias in a different namespace changed the
# scopes of cached TypeVars.
def f1(x: bar.BarX[str]): ...
def f2(x: baz.BazX[str]): ...
def f3(x: bar.BarX[str]): ...
""")
def test_formal_alias(self):
with self.DepTree([(
"foo.py",
"""
from typing import List, Union, TypeVar
T = TypeVar('T')
X = Union[T, List['X[T]']]
""",
)]):
self.Check("""
import foo
from typing import TypeVar
T = TypeVar('T')
def f(x: foo.X[T], y: T):
pass
""")
def test_use_branched_alias(self):
with self.DepTree([(
"foo.py",
"""
from typing import Mapping, Sequence, TypeVar, Union
K = TypeVar('K')
V = TypeVar('V')
StructureKV = Union[
Sequence['StructureKV[K, V]'],
Mapping[K, 'StructureKV[K, V]'],
V,
]
try:
Structure = StructureKV[int, V]
except TypeError:
Structure = Union[
Sequence['Structure[V]'], Mapping[int, 'Structure[V]'], V]
""",
)]):
self.Check("""
import foo
from typing import Any
X = foo.Structure[Any]
def f(x: X):
y = x[0]
return y[1]
""")
def test_use_in_custom_generic_class(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Any, Generic, Iterable, TypeVar, Union
_ShapeType = TypeVar('_ShapeType')
_DType = TypeVar('_DType')
class ndarray(Generic[_ShapeType, _DType]):
def __iter__(self) -> Any: ...
ArrayTree = Union[Iterable[ArrayTree], ndarray]
""",
)]):
self.Check("""
from typing import Generic, TypeVar
import foo
T = TypeVar('T', bound=foo.ArrayTree)
class C(Generic[T]):
@classmethod
def make(cls, obj: T) -> 'C[T]':
return cls()
""")
def test_callable(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import Callable, Sequence, TypeVar
T = TypeVar('T')
X = T | Sequence[X[T]]
class C:
f: Callable[..., X]
""",
)]):
self.Check("""
import foo
class C(foo.C):
def g(self):
return self.f()
""")
| PyiTest |
python | openai__openai-python | src/openai/_extras/sounddevice_proxy.py | {
"start": 380,
"end": 725
} | class ____(LazyProxy[Any]):
@override
def __load__(self) -> Any:
try:
import sounddevice # type: ignore
except ImportError as err:
raise MissingDependencyError(SOUNDDEVICE_INSTRUCTIONS) from err
return sounddevice
if not TYPE_CHECKING:
sounddevice = SounddeviceProxy()
| SounddeviceProxy |
python | huggingface__transformers | src/transformers/models/camembert/modeling_camembert.py | {
"start": 16023,
"end": 16715
} | class ____(nn.Module):
"""Camembert Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
@auto_docstring
| CamembertLMHead |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/eventbridge.py | {
"start": 1277,
"end": 3590
} | class ____(AwsBaseOperator[EventBridgeHook]):
"""
Put Events onto Amazon EventBridge.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EventBridgePutEventsOperator`
:param entries: the list of events to be put onto EventBridge, each event is a dict (required)
:param endpoint_id: the URL subdomain of the endpoint
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.htmlt
"""
aws_hook_class = EventBridgeHook
template_fields: Sequence[str] = aws_template_fields("entries", "endpoint_id")
def __init__(self, *, entries: list[dict], endpoint_id: str | None = None, **kwargs):
super().__init__(**kwargs)
self.entries = entries
self.endpoint_id = endpoint_id
def execute(self, context: Context):
response = self.hook.conn.put_events(
**prune_dict(
{
"Entries": self.entries,
"EndpointId": self.endpoint_id,
}
)
)
self.log.info("Sent %d events to EventBridge.", len(self.entries))
if response.get("FailedEntryCount"):
for event in response["Entries"]:
if "ErrorCode" in event:
self.log.error(event)
raise AirflowException(
f"{response['FailedEntryCount']} entries in this request have failed to send."
)
if self.do_xcom_push:
return [e["EventId"] for e in response["Entries"]]
| EventBridgePutEventsOperator |
python | dagster-io__dagster | integration_tests/test_suites/auto_materialize_perf_tests/perf_scenario.py | {
"start": 1630,
"end": 2956
} | class ____(NamedTuple):
name: str
defs: Definitions
activity_history: ActivityHistory
max_execution_time_seconds: int
current_time: Optional[datetime] = None
def save_instance_snapshot(self) -> None:
"""Executes the specified runs for the given asset graph, writing the resulting instance to
the snapshot directory.
"""
with tempfile.TemporaryDirectory() as temp_dir:
with DagsterInstance.from_ref(InstanceRef.from_dir(temp_dir)) as instance:
self.activity_history.play_history(self.defs, instance)
# write as a compressed file
with tarfile.open(self.instance_snapshot_path, "w:gz") as tf:
for fname in os.listdir(temp_dir):
tf.add(os.path.join(temp_dir, fname), arcname=fname)
@contextmanager
def instance_from_snapshot(self):
with (
tempfile.TemporaryDirectory() as temp_dir,
tarfile.open(self.instance_snapshot_path) as tf,
):
tf.extractall(temp_dir)
with DagsterInstance.from_ref(InstanceRef.from_dir(temp_dir)) as instance:
yield instance
@property
def instance_snapshot_path(self) -> str:
return file_relative_path(__file__, f"snapshots/{self.name}.tar.gz")
| PerfScenario |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/dep_snapshot.py | {
"start": 6279,
"end": 6651
} | class ____(NamedTuple("_OutputHandleSnap", [("node_name", str), ("output_name", str)])):
def __new__(cls, node_name: str, output_name: str):
return super().__new__(
cls,
node_name=check.str_param(node_name, "node_name"),
output_name=check.str_param(output_name, "output_name"),
)
@whitelist_for_serdes
| OutputHandleSnap |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_estimator_checks.py | {
"start": 9894,
"end": 10561
} | class ____(BaseEstimator):
def fit(self, X, y):
X, y = validate_data(
self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True
)
# store the original X to check for sample order later
self._X = X
return self
def predict(self, X):
X = check_array(X)
# if the input contains the same elements but different sample order,
# then just return zeros.
if (
np.array_equiv(np.sort(X, axis=0), np.sort(self._X, axis=0))
and (X != self._X).any()
):
return np.zeros(X.shape[0])
return X[:, 0]
| NotInvariantSampleOrder |
python | getsentry__sentry | src/sentry/integrations/utils/metrics.py | {
"start": 15639,
"end": 15816
} | class ____(StrEnum):
INSTALLATION = "installation"
PUSH = "push"
PULL_REQUEST = "pull_request"
INBOUND_SYNC = "inbound_sync"
@dataclass
| IntegrationWebhookEventType |
python | ray-project__ray | rllib/core/rl_module/rl_module.py | {
"start": 1211,
"end": 10895
} | class ____:
"""Utility spec class to make constructing RLModules (in single-agent case) easier.
Args:
module_class: The RLModule class to use.
observation_space: The observation space of the RLModule. This may differ
from the observation space of the environment. For example, a discrete
observation space of an environment, would usually correspond to a
one-hot encoded observation space of the RLModule because of preprocessing.
action_space: The action space of the RLModule.
inference_only: Whether the RLModule should be configured in its inference-only
state, in which those components not needed for action computing (for
example a value function or a target network) might be missing.
Note that `inference_only=True` AND `learner_only=True` is not allowed.
learner_only: Whether this RLModule should only be built on Learner workers, but
NOT on EnvRunners. Useful for RLModules inside a MultiRLModule that are only
used for training, for example a shared value function in a multi-agent
setup or a world model in a curiosity-learning setup.
Note that `inference_only=True` AND `learner_only=True` is not allowed.
model_config: The model config dict or default RLlib dataclass to use.
catalog_class: The Catalog class to use.
load_state_path: The path to the module state to load from. NOTE: This must be
an absolute path.
"""
module_class: Optional[Type["RLModule"]] = None
observation_space: Optional[gym.Space] = None
action_space: Optional[gym.Space] = None
inference_only: bool = False
learner_only: bool = False
model_config: Optional[Union[Dict[str, Any], DefaultModelConfig]] = None
catalog_class: Optional[Type["Catalog"]] = None
load_state_path: Optional[str] = None
# Deprecated field.
model_config_dict: Optional[Union[dict, int]] = None
def __post_init__(self):
if self.model_config_dict is not None:
deprecation_warning(
old="RLModuleSpec(model_config_dict=..)",
new="RLModuleSpec(model_config=..)",
error=True,
)
def build(self) -> "RLModule":
"""Builds the RLModule from this spec."""
if self.module_class is None:
raise ValueError("RLModule class is not set.")
if self.observation_space is None:
raise ValueError("Observation space is not set.")
try:
module = self.module_class(
observation_space=self.observation_space,
action_space=self.action_space,
inference_only=self.inference_only,
model_config=self._get_model_config(),
catalog_class=self.catalog_class,
)
# Older custom model might still require the old `RLModuleConfig` under
# the `config` arg.
except AttributeError:
module_config = self.get_rl_module_config()
module = self.module_class(module_config)
return module
@classmethod
def from_module(cls, module: "RLModule") -> "RLModuleSpec":
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModule
if isinstance(module, MultiRLModule):
raise ValueError("MultiRLModule cannot be converted to RLModuleSpec.")
# Try instantiating a new RLModule from the spec using the new c'tor args.
try:
rl_module_spec = RLModuleSpec(
module_class=type(module),
observation_space=module.observation_space,
action_space=module.action_space,
inference_only=module.inference_only,
learner_only=module.learner_only,
model_config=module.model_config,
catalog_class=(
type(module.catalog) if module.catalog is not None else None
),
)
# Old path through deprecated `RLModuleConfig` class. Used only if `module`
# still has a valid `config` attribute.
except AttributeError:
rl_module_spec = RLModuleSpec(
module_class=type(module),
observation_space=module.config.observation_space,
action_space=module.config.action_space,
inference_only=module.config.inference_only,
learner_only=module.config.learner_only,
model_config=module.config.model_config_dict,
catalog_class=module.config.catalog_class,
)
return rl_module_spec
def to_dict(self):
"""Returns a serialized representation of the spec."""
return {
"module_class": serialize_type(self.module_class),
"observation_space": gym_space_to_dict(self.observation_space),
"action_space": gym_space_to_dict(self.action_space),
"inference_only": self.inference_only,
"learner_only": self.learner_only,
"model_config": self._get_model_config(),
"catalog_class": serialize_type(self.catalog_class)
if self.catalog_class is not None
else None,
}
@classmethod
def from_dict(cls, d):
"""Returns a single agent RLModule spec from a serialized representation."""
module_class = deserialize_type(d["module_class"])
try:
spec = RLModuleSpec(
module_class=module_class,
observation_space=gym_space_from_dict(d["observation_space"]),
action_space=gym_space_from_dict(d["action_space"]),
inference_only=d["inference_only"],
learner_only=d["learner_only"],
model_config=d["model_config"],
catalog_class=deserialize_type(d["catalog_class"])
if d["catalog_class"] is not None
else None,
)
# Old path through deprecated `RLModuleConfig` class.
except KeyError:
module_config = RLModuleConfig.from_dict(d["module_config"])
spec = RLModuleSpec(
module_class=module_class,
observation_space=module_config.observation_space,
action_space=module_config.action_space,
inference_only=module_config.inference_only,
learner_only=module_config.learner_only,
model_config=module_config.model_config_dict,
catalog_class=module_config.catalog_class,
)
return spec
def update(self, other, override: bool = True) -> None:
"""Updates this spec with the given other spec. Works like dict.update().
Args:
other: The other SingleAgentRLModule spec to update this one from.
override: Whether to update all properties in `self` with those of `other.
If False, only update those properties in `self` that are not None.
"""
if not isinstance(other, RLModuleSpec):
raise ValueError("Can only update with another RLModuleSpec.")
# If the field is None in the other, keep the current field, otherwise update
# with the new value.
if override:
self.module_class = other.module_class or self.module_class
self.observation_space = other.observation_space or self.observation_space
self.action_space = other.action_space or self.action_space
self.inference_only = other.inference_only or self.inference_only
self.learner_only = other.learner_only and self.learner_only
self.model_config = other.model_config or self.model_config
self.catalog_class = other.catalog_class or self.catalog_class
self.load_state_path = other.load_state_path or self.load_state_path
# Only override, if the field is None in `self`.
# Do NOT override the boolean settings: `inference_only` and `learner_only`.
else:
self.module_class = self.module_class or other.module_class
self.observation_space = self.observation_space or other.observation_space
self.action_space = self.action_space or other.action_space
self.model_config = self.model_config or other.model_config
self.catalog_class = self.catalog_class or other.catalog_class
self.load_state_path = self.load_state_path or other.load_state_path
def as_multi_rl_module_spec(self) -> "MultiRLModuleSpec":
"""Returns a MultiRLModuleSpec (`self` under DEFAULT_MODULE_ID key)."""
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
return MultiRLModuleSpec(
rl_module_specs={DEFAULT_MODULE_ID: self},
load_state_path=self.load_state_path,
)
def _get_model_config(self):
return (
dataclasses.asdict(self.model_config)
if dataclasses.is_dataclass(self.model_config)
else (self.model_config or {})
)
@Deprecated(
new="RLModule(*, observation_space=.., action_space=.., ....)",
error=False,
)
def get_rl_module_config(self):
return RLModuleConfig(
observation_space=self.observation_space,
action_space=self.action_space,
inference_only=self.inference_only,
learner_only=self.learner_only,
model_config_dict=self._get_model_config(),
catalog_class=self.catalog_class,
)
@PublicAPI(stability="beta")
| RLModuleSpec |
python | apache__airflow | providers/google/tests/unit/google/marketing_platform/hooks/test_display_video.py | {
"start": 1100,
"end": 7178
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = GoogleDisplayVideo360Hook(api_version=API_VERSION, gcp_conn_id=GCP_CONN_ID)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks.display_video.GoogleDisplayVideo360Hook._authorize"
)
@mock.patch("airflow.providers.google.marketing_platform.hooks.display_video.build")
def test_get_conn_to_display_video(self, mock_build, mock_authorize):
result = self.hook.get_conn_to_display_video()
mock_build.assert_called_once_with(
"displayvideo",
API_VERSION,
http=mock_authorize.return_value,
cache_discovery=False,
)
assert mock_build.return_value == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_create_sdf_download_tasks_called_with_params(self, get_conn_to_display_video):
body_request = {
"version": "version",
"partnerId": "partner_id",
"advertiserId": "advertiser_id",
"parentEntityFilter": "parent_entity_filter",
"idFilter": "id_filter",
"inventorySourceFilter": "inventory_source_filter",
}
self.hook.create_sdf_download_operation(body_request=body_request)
get_conn_to_display_video.return_value.sdfdownloadtasks.return_value.create.assert_called_once_with(
body=body_request
)
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_create_sdf_download_tasks_called_once(self, get_conn_to_display_video):
body_request = {
"version": "version",
"partnerId": "partner_id",
"advertiserId": "advertiser_id",
"parentEntityFilter": "parent_entity_filter",
"idFilter": "id_filter",
"inventorySourceFilter": "inventory_source_filter",
}
self.hook.create_sdf_download_operation(body_request=body_request)
get_conn_to_display_video.return_value.sdfdownloadtasks.return_value.create.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_create_sdf_download_tasks_return_equal_values(self, get_conn_to_display_video):
response = ["name"]
body_request = {
"version": "version",
"partnerId": "partner_id",
"advertiserId": "advertiser_id",
"parentEntityFilter": "parent_entity_filter",
"idFilter": "id_filter",
"inventorySourceFilter": "inventory_source_filter",
}
# fmt: off
get_conn_to_display_video.return_value. \
sdfdownloadtasks.return_value. \
create.return_value \
.execute.return_value = response
# fmt: on
result = self.hook.create_sdf_download_operation(body_request=body_request)
assert response == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_get_sdf_download_tasks_called_with_params(self, get_conn_to_display_video):
operation_name = "operation_name"
self.hook.get_sdf_download_operation(operation_name=operation_name)
# fmt: off
get_conn_to_display_video.return_value. \
sdfdownloadtasks.return_value. \
operations.return_value. \
get.assert_called_once_with(name=operation_name)
# fmt: on
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_get_sdf_download_tasks_called_once(self, get_conn_to_display_video):
operation_name = "name"
self.hook.get_sdf_download_operation(operation_name=operation_name)
# fmt: off
get_conn_to_display_video.return_value. \
sdfdownloadtasks.return_value. \
operations.return_value. \
get.assert_called_once()
# fmt: on
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def get_sdf_download_tasks_return_equal_values(self, get_conn_to_display_video):
operation_name = "operation"
response = "response"
get_conn_to_display_video.return_value.sdfdownloadtasks.return_value.operations.return_value.get = (
response
)
result = self.hook.get_sdf_download_operation(operation_name=operation_name)
assert operation_name == result
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_download_media_called_once(self, get_conn_to_display_video):
resource_name = "resource_name"
self.hook.download_media(resource_name=resource_name)
get_conn_to_display_video.return_value.media.return_value.download_media.assert_called_once()
@mock.patch(
"airflow.providers.google.marketing_platform.hooks."
"display_video.GoogleDisplayVideo360Hook.get_conn_to_display_video"
)
def test_download_media_called_once_with_params(self, get_conn_to_display_video):
resource_name = "resource_name"
self.hook.download_media(resource_name=resource_name)
get_conn_to_display_video.return_value.media.return_value.download_media.assert_called_once_with(
resourceName=resource_name
)
| TestGoogleDisplayVideo360Hook |
python | apache__airflow | providers/google/tests/unit/google/cloud/links/test_dataplex.py | {
"start": 4181,
"end": 5343
} | class ____:
@pytest.mark.db_test
def test_get_link(self, create_task_instance_of_operator, session, mock_supervisor_comms):
expected_url = EXPECTED_DATAPLEX_TASK_LINK
link = DataplexTaskLink()
ti = create_task_instance_of_operator(
DataplexCreateTaskOperator,
dag_id="test_link_dag",
task_id="test_link_task",
region=TEST_LOCATION,
lake_id=TEST_LAKE_ID,
project_id=TEST_PROJECT_ID,
body=TEST_LAKE_BODY,
dataplex_task_id=TEST_TASK_ID,
)
session.add(ti)
session.commit()
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key="key",
value={
"lake_id": ti.task.lake_id,
"task_id": ti.task.dataplex_task_id,
"region": ti.task.region,
"project_id": ti.task.project_id,
},
)
actual_url = link.get_link(operator=ti.task, ti_key=ti.key)
assert actual_url == expected_url
| TestDataplexTaskLink |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/chat_engine.py | {
"start": 591,
"end": 988
} | class ____(BaseEvent):
"""
StreamChatErrorEvent.
Fired when an exception is raised during the stream chat-engine operation.
Args:
exception (Exception): Exception raised during the stream chat operation.
"""
exception: Exception
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "StreamChatErrorEvent"
| StreamChatErrorEvent |
python | cython__cython | tests/run/pure_py.py | {
"start": 10763,
"end": 11042
} | class ____(object):
"""
>>> c = CClass(2)
>>> c.get_attr()
int
2
"""
cython.declare(attr=cython.int)
def __init__(self, attr):
self.attr = attr
def get_attr(self):
print(cython.typeof(self.attr))
return self.attr
| CClass |
python | numba__numba | numba/core/compiler_machinery.py | {
"start": 2612,
"end": 2704
} | class ____(CompilerPass):
""" Base class for function passes
"""
pass
| FunctionPass |
python | getsentry__sentry | src/sentry/tagstore/types.py | {
"start": 4562,
"end": 4652
} | class ____(TypedDict, total=False):
query: str | None
| TagValueSerializerResponseOptional |
python | doocs__leetcode | solution/1500-1599/1567.Maximum Length of Subarray With Positive Product/Solution2.py | {
"start": 0,
"end": 490
} | class ____:
def getMaxLen(self, nums: List[int]) -> int:
n = len(nums)
f = int(nums[0] > 0)
g = int(nums[0] < 0)
ans = f
for i in range(1, n):
ff = gg = 0
if nums[i] > 0:
ff = f + 1
gg = 0 if g == 0 else g + 1
elif nums[i] < 0:
ff = 0 if g == 0 else g + 1
gg = f + 1
f, g = ff, gg
ans = max(ans, f)
return ans
| Solution |
python | huggingface__transformers | tests/models/m2m_100/test_tokenization_m2m_100.py | {
"start": 1449,
"end": 7101
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "facebook/m2m100_418M"
tokenizer_class = M2M100Tokenizer
test_rust_tokenizer = False
test_seq2seq = False
test_sentencepiece = True
@classmethod
def setUpClass(cls):
super().setUpClass()
# `TokenizerTesterMixin` downloads the actual tokenizer in `cls.tmpdirname`.
# Use a dedicated directory for the lightweight test tokenizer to avoid mixing files.
old_tmpdirname = cls.tmpdirname
cls.tmpdirname = tempfile.mkdtemp()
vocab = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(cls.tmpdirname)
save_json(vocab_tokens, save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SAMPLE_SP, save_dir / VOCAB_FILES_NAMES["spm_file"])
tokenizer = M2M100Tokenizer.from_pretrained(cls.tmpdirname)
tokenizer.save_pretrained(cls.tmpdirname)
shutil.rmtree(old_tmpdirname, ignore_errors=True)
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs):
pretrained_name = pretrained_name or cls.tmpdirname
return M2M100Tokenizer.from_pretrained(pretrained_name, **kwargs)
def get_input_output_texts(self, tokenizer):
return (
"This is a test",
"This is a test",
)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "</s>"
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
tokenizer = self.get_tokenizer()
vocab_keys = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0], "</s>")
self.assertEqual(vocab_keys[1], "<unk>")
self.assertEqual(vocab_keys[-1], "<s>")
# The length of the vocab keys can be different
# self.assertEqual(len(vocab_keys), tokenizer.vocab_size)
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[2, 3, 4, 5, 6],
)
back_tokens = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(back_tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
text = tokenizer.convert_tokens_to_string(tokens)
self.assertEqual(text, "This is a test")
@slow
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="facebook/m2m100_418M",
revision="c168bae485c864188cf9aa0e4108b0b6934dc91e",
)
@require_torch
@require_sentencepiece
@require_tokenizers
| M2M100TokenizationTest |
python | sympy__sympy | sympy/core/kind.py | {
"start": 5309,
"end": 11540
} | class ____:
"""
Dispatcher to select a kind from multiple kinds by binary dispatching.
.. notes::
This approach is experimental, and can be replaced or deleted in
the future.
Explanation
===========
SymPy object's :obj:`sympy.core.kind.Kind()` vaguely represents the
algebraic structure where the object belongs to. Therefore, with
given operation, we can always find a dominating kind among the
different kinds. This class selects the kind by recursive binary
dispatching. If the result cannot be determined, ``UndefinedKind``
is returned.
Examples
========
Multiplication between numbers return number.
>>> from sympy import NumberKind, Mul
>>> Mul._kind_dispatcher(NumberKind, NumberKind)
NumberKind
Multiplication between number and unknown-kind object returns unknown kind.
>>> from sympy import UndefinedKind
>>> Mul._kind_dispatcher(NumberKind, UndefinedKind)
UndefinedKind
Any number and order of kinds is allowed.
>>> Mul._kind_dispatcher(UndefinedKind, NumberKind)
UndefinedKind
>>> Mul._kind_dispatcher(NumberKind, UndefinedKind, NumberKind)
UndefinedKind
Since matrix forms a vector space over scalar field, multiplication
between matrix with numeric element and number returns matrix with
numeric element.
>>> from sympy.matrices import MatrixKind
>>> Mul._kind_dispatcher(MatrixKind(NumberKind), NumberKind)
MatrixKind(NumberKind)
If a matrix with number element and another matrix with unknown-kind
element are multiplied, we know that the result is matrix but the
kind of its elements is unknown.
>>> Mul._kind_dispatcher(MatrixKind(NumberKind), MatrixKind(UndefinedKind))
MatrixKind(UndefinedKind)
Parameters
==========
name : str
commutative : bool, optional
If True, binary dispatch will be automatically registered in
reversed order as well.
doc : str, optional
"""
def __init__(self, name, commutative=False, doc=None):
self.name = name
self.doc = doc
self.commutative = commutative
self._dispatcher = Dispatcher(name)
def __repr__(self):
return "<dispatched %s>" % self.name
def register(self, *types, **kwargs):
"""
Register the binary dispatcher for two kind classes.
If *self.commutative* is ``True``, signature in reversed order is
automatically registered as well.
"""
on_ambiguity = kwargs.pop("on_ambiguity", None)
if not on_ambiguity:
if self.commutative:
on_ambiguity = ambiguity_register_error_ignore_dup
else:
on_ambiguity = ambiguity_warn
kwargs.update(on_ambiguity=on_ambiguity)
if not len(types) == 2:
raise RuntimeError(
"Only binary dispatch is supported, but got %s types: <%s>." % (
len(types), str_signature(types)
))
def _(func):
self._dispatcher.add(types, func, **kwargs)
if self.commutative:
self._dispatcher.add(tuple(reversed(types)), func, **kwargs)
return _
def __call__(self, *args, **kwargs):
if self.commutative:
kinds = frozenset(args)
else:
kinds = []
prev = None
for a in args:
if prev is not a:
kinds.append(a)
prev = a
return self.dispatch_kinds(kinds, **kwargs)
@cacheit
def dispatch_kinds(self, kinds, **kwargs):
# Quick exit for the case where all kinds are same
if len(kinds) == 1:
result, = kinds
if not isinstance(result, Kind):
raise RuntimeError("%s is not a kind." % result)
return result
for i,kind in enumerate(kinds):
if not isinstance(kind, Kind):
raise RuntimeError("%s is not a kind." % kind)
if i == 0:
result = kind
else:
prev_kind = result
t1, t2 = type(prev_kind), type(kind)
k1, k2 = prev_kind, kind
func = self._dispatcher.dispatch(t1, t2)
if func is None and self.commutative:
# try reversed order
func = self._dispatcher.dispatch(t2, t1)
k1, k2 = k2, k1
if func is None:
# unregistered kind relation
result = UndefinedKind
else:
result = func(k1, k2)
if not isinstance(result, Kind):
raise RuntimeError(
"Dispatcher for {!r} and {!r} must return a Kind, but got {!r}".format(
prev_kind, kind, result
))
return result
@property
def __doc__(self):
docs = [
"Kind dispatcher : %s" % self.name,
"Note that support for this is experimental. See the docs for :class:`KindDispatcher` for details"
]
if self.doc:
docs.append(self.doc)
s = "Registered kind classes\n"
s += '=' * len(s)
docs.append(s)
amb_sigs = []
typ_sigs = defaultdict(list)
for sigs in self._dispatcher.ordering[::-1]:
key = self._dispatcher.funcs[sigs]
typ_sigs[key].append(sigs)
for func, sigs in typ_sigs.items():
sigs_str = ', '.join('<%s>' % str_signature(sig) for sig in sigs)
if isinstance(func, RaiseNotImplementedError):
amb_sigs.append(sigs_str)
continue
s = 'Inputs: %s\n' % sigs_str
s += '-' * len(s) + '\n'
if func.__doc__:
s += func.__doc__.strip()
else:
s += func.__name__
docs.append(s)
if amb_sigs:
s = "Ambiguous kind classes\n"
s += '=' * len(s)
docs.append(s)
s = '\n'.join(amb_sigs)
docs.append(s)
return '\n\n'.join(docs)
| KindDispatcher |
python | PrefectHQ__prefect | src/prefect/events/schemas/automations.py | {
"start": 7755,
"end": 7864
} | class ____(Enum):
lateness = "lateness"
duration = "duration"
successes = "successes"
| PrefectMetric |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/unit_tests/integration/api/bulk.py | {
"start": 5069,
"end": 7148
} | class ____:
def __init__(self) -> None:
self._template = {
"data": {
"node": {},
"extensions": {
"cost": {
"requestedQueryCost": 1,
"actualQueryCost": 1,
"throttleStatus": {"maximumAvailable": 2000.0, "currentlyAvailable": 1999, "restoreRate": 100.0},
}
},
}
}
def with_running_status(self, bulk_operation_id: str, object_count: str = "10") -> "JobStatusResponseBuilder":
self._template["data"]["node"] = {
"id": bulk_operation_id,
"status": "RUNNING",
"errorCode": None,
"createdAt": "2024-05-28T18:57:54Z",
"objectCount": object_count,
"fileSize": None,
"url": None,
"partialDataUrl": None,
}
return self
def with_completed_status(self, bulk_operation_id: str, job_result_url: str, object_count: str = "4") -> "JobStatusResponseBuilder":
self._template["data"]["node"] = {
"id": bulk_operation_id,
"status": "COMPLETED",
"errorCode": None,
"createdAt": "2024-05-05T00:45:48Z",
"objectCount": object_count,
"fileSize": "774",
"url": job_result_url,
"partialDataUrl": None,
}
return self
def with_canceled_status(self, bulk_operation_id: str, job_result_url: str, object_count: str = "4") -> "JobStatusResponseBuilder":
self._template["data"]["node"] = {
"id": bulk_operation_id,
"status": "CANCELED",
"errorCode": None,
"createdAt": "2024-05-05T00:45:48Z",
"objectCount": object_count,
"fileSize": "774",
"url": job_result_url,
"partialDataUrl": None,
}
return self
def build(self) -> HttpResponse:
return HttpResponse(json.dumps(self._template), status_code=200)
| JobStatusResponseBuilder |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 3477,
"end": 3612
} | class ____(CommentsNode):
"""Extends CommentsNode to include replies in discussions."""
replies: Replies
| DiscussionsCommentsNode |
python | pytorch__pytorch | torch/testing/_internal/distributed/multi_threaded_pg.py | {
"start": 7264,
"end": 7915
} | class ____:
def __init__(self, dst):
self.dst = dst
@torch.no_grad()
def work(self, data):
# Can't handle gather with multiple tensor lists
assert len(data[self.dst][0]) == 1
out_tensor_list = data[self.dst][0][0]
for rank, each_rank_data in enumerate(data):
src_in_tensor_list = each_rank_data[1]
# Can't handle gather with multiple tensor lists
assert len(src_in_tensor_list) == 1
dest_tensor = out_tensor_list[rank]
# See Note [Hide collectives mutation from autograd]
dest_tensor.detach().copy_(src_in_tensor_list[0])
| Gather |
python | huggingface__transformers | src/transformers/models/encodec/modeling_encodec.py | {
"start": 2867,
"end": 6977
} | class ____(nn.Module):
"""Conv1d with asymmetric or causal padding and normalization."""
def __init__(
self, config, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, dilation: int = 1
):
super().__init__()
self.causal = config.use_causal_conv
self.pad_mode = config.pad_mode
self.norm_type = config.norm_type
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}'
)
# warn user on unusual setup between dilation and stride
if stride > 1 and dilation > 1:
logger.warning(
"EncodecConv1d has been initialized with stride > 1 and dilation > 1"
f" (kernel_size={kernel_size} stride={stride}, dilation={dilation})."
)
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, stride, dilation=dilation)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if self.norm_type == "weight_norm":
self.conv = weight_norm(self.conv)
elif self.norm_type == "time_group_norm":
self.norm = nn.GroupNorm(1, out_channels)
kernel_size = self.conv.kernel_size[0]
stride = torch.tensor(self.conv.stride[0], dtype=torch.int64)
dilation = self.conv.dilation[0]
# Effective kernel size with dilations.
kernel_size = torch.tensor((kernel_size - 1) * dilation + 1, dtype=torch.int64)
self.register_buffer("stride", stride, persistent=False)
self.register_buffer("kernel_size", kernel_size, persistent=False)
self.register_buffer("padding_total", kernel_size - stride, persistent=False)
def _get_extra_padding_for_conv1d(
self,
hidden_states: torch.Tensor,
) -> torch.Tensor:
"""See `pad_for_conv1d`."""
length = hidden_states.shape[-1]
n_frames = (length - self.kernel_size + self.padding_total) / self.stride + 1
n_frames = torch.ceil(n_frames).to(torch.int64) - 1
ideal_length = n_frames * self.stride + self.kernel_size - self.padding_total
return ideal_length - length
@staticmethod
def _pad1d(hidden_states: torch.Tensor, paddings: tuple[int, int], mode: str = "zero", value: float = 0.0):
"""Tiny wrapper around torch.nn.functional.pad, just to allow for reflect padding on small input.
If this is the case, we insert extra 0 padding to the right before the reflection happens.
"""
length = hidden_states.shape[-1]
padding_left, padding_right = paddings
if mode != "reflect":
return nn.functional.pad(hidden_states, paddings, mode, value)
max_pad = max(padding_left, padding_right)
extra_pad = 0
if length <= max_pad:
extra_pad = max_pad - length + 1
hidden_states = nn.functional.pad(hidden_states, (0, extra_pad))
padded = nn.functional.pad(hidden_states, paddings, mode, value)
end = padded.shape[-1] - extra_pad
return padded[..., :end]
def forward(self, hidden_states):
extra_padding = self._get_extra_padding_for_conv1d(hidden_states)
if self.causal:
# Left padding for causal
hidden_states = self._pad1d(hidden_states, (self.padding_total, extra_padding), mode=self.pad_mode)
else:
# Asymmetric padding required for odd strides
padding_right = self.padding_total // 2
padding_left = self.padding_total - padding_right
hidden_states = self._pad1d(
hidden_states, (padding_left, padding_right + extra_padding), mode=self.pad_mode
)
hidden_states = self.conv(hidden_states)
if self.norm_type == "time_group_norm":
hidden_states = self.norm(hidden_states)
return hidden_states
| EncodecConv1d |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict18.py | {
"start": 1404,
"end": 1650
} | class ____(TD5[_T1], Generic[_T1]):
z: str
def func5(a: TD7[Literal[1]]) -> Literal[1]:
return a["x"]
func5({"x": 1, "y": 1, "z": "a"})
f3: TD7[Literal[1]] = {"x": 1, "y": 1, "z": "a"}
reveal_type(func5({"x": 1, "y": 1, "z": "a"}))
| TD7 |
python | kubernetes-client__python | kubernetes/base/leaderelection/leaderelection_test.py | {
"start": 7969,
"end": 9900
} | class ____:
def __init__(self, name, namespace, identity, shared_lock, on_create=None, on_update=None, on_change=None, on_try_update=None):
# self.leader_record is shared between two MockResourceLock objects
self.leader_record = []
self.renew_count = 0
self.renew_count_max = 4
self.name = name
self.namespace = namespace
self.identity = str(identity)
self.lock = shared_lock
self.on_create = on_create
self.on_update = on_update
self.on_change = on_change
self.on_try_update = on_try_update
def get(self, name, namespace):
self.lock.acquire()
try:
if self.leader_record:
return True, self.leader_record[0]
ApiException.body = json.dumps({'code': 404})
return False, ApiException
finally:
self.lock.release()
def create(self, name, namespace, election_record):
self.lock.acquire()
try:
if len(self.leader_record) == 1:
return False
self.leader_record.append(election_record)
self.on_create()
self.renew_count += 1
return True
finally:
self.lock.release()
def update(self, name, namespace, updated_record):
self.lock.acquire()
try:
if self.on_try_update:
self.on_try_update()
if self.renew_count >= self.renew_count_max:
return False
old_record = self.leader_record[0]
self.leader_record[0] = updated_record
self.on_update()
if old_record.holder_identity != updated_record.holder_identity:
self.on_change()
self.renew_count += 1
return True
finally:
self.lock.release()
if __name__ == '__main__':
unittest.main()
| MockResourceLock |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 58459,
"end": 58610
} | class ____(sgqlc.types.Scalar):
"""An ISO-8601 encoded UTC date string with millisecond precision."""
__schema__ = github_schema
| PreciseDateTime |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_datasync.py | {
"start": 3133,
"end": 5761
} | class ____:
# Runs once for each test
def setup_method(self, method):
args = {
"owner": "airflow",
"start_date": DEFAULT_DATE,
}
self.dag = DAG(
TEST_DAG_ID + "test_schedule_dag_once",
default_args=args,
schedule="@once",
)
self.client = boto3.client("datasync", region_name="us-east-1")
self.datasync = None
self.source_location_arn = self.client.create_location_smb(
**MOCK_DATA["create_source_location_kwargs"]
)["LocationArn"]
self.destination_location_arn = self.client.create_location_s3(
**MOCK_DATA["create_destination_location_kwargs"]
)["LocationArn"]
self.task_arn = self.client.create_task(
SourceLocationArn=self.source_location_arn,
DestinationLocationArn=self.destination_location_arn,
)["TaskArn"]
def teardown_method(self, method):
# Delete all tasks:
tasks = self.client.list_tasks()
for task in tasks["Tasks"]:
self.client.delete_task(TaskArn=task["TaskArn"])
# Delete all locations:
locations = self.client.list_locations()
for location in locations["Locations"]:
self.client.delete_location(LocationArn=location["LocationArn"])
self.client = None
def test_generic_params():
op = DataSyncOperator(
task_id="generic-task",
task_arn="arn:fake",
source_location_uri="fake://source",
destination_location_uri="fake://destination",
aws_conn_id="fake-conn-id",
region_name="cn-north-1",
verify=False,
botocore_config={"read_timeout": 42},
# Non-generic hook params
wait_interval_seconds=42,
)
assert op.hook.client_type == "datasync"
assert op.hook.resource_type is None
assert op.hook.aws_conn_id == "fake-conn-id"
assert op.hook._region_name == "cn-north-1"
assert op.hook._verify is False
assert op.hook._config is not None
assert op.hook._config.read_timeout == 42
assert op.hook.wait_interval_seconds == 42
op = DataSyncOperator(
task_id="generic-task",
task_arn="arn:fake",
source_location_uri="fake://source",
destination_location_uri="fake://destination",
)
assert op.hook.aws_conn_id == "aws_default"
assert op.hook._region_name is None
assert op.hook._verify is None
assert op.hook._config is None
assert op.hook.wait_interval_seconds is not None
@mock_aws
@mock.patch.object(DataSyncHook, "get_conn")
| DataSyncTestCaseBase |
python | modin-project__modin | modin/pandas/accessor.py | {
"start": 3147,
"end": 4338
} | class ____(BaseSparseAccessor):
@classmethod
def _validate(cls, data: DataFrame):
"""
Verify that `data` dtypes are compatible with `pandas.core.dtypes.dtypes.SparseDtype`.
Parameters
----------
data : DataFrame
Object to check.
Raises
------
AttributeError
If check fails.
"""
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(cls._validation_msg)
@property
def density(self):
return self._parent._default_to_pandas(pandas.DataFrame.sparse).density
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
ErrorMessage.default_to_pandas("`from_spmatrix`")
return pd.DataFrame(
pandas.DataFrame.sparse.from_spmatrix(data, index=index, columns=columns)
)
def to_dense(self):
return self._default_to_pandas(pandas.DataFrame.sparse.to_dense)
def to_coo(self):
return self._default_to_pandas(pandas.DataFrame.sparse.to_coo)
@_inherit_docstrings(pandas.core.arrays.sparse.accessor.SparseAccessor)
| SparseFrameAccessor |
python | huggingface__transformers | tests/models/xlnet/test_modeling_xlnet.py | {
"start": 1340,
"end": 16563
} | class ____:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
mem_len=10,
clamp_len=-1,
reuse_len=15,
is_training=True,
use_labels=True,
vocab_size=99,
cutoffs=[10, 50, 80],
hidden_size=32,
num_attention_heads=4,
d_inner=128,
num_hidden_layers=2,
type_sequence_label_size=2,
bi_data=False,
same_length=False,
initializer_range=0.05,
seed=1,
type_vocab_size=2,
bos_token_id=1,
eos_token_id=2,
pad_token_id=5,
num_choices=4,
):
self.parent = parent
self.batch_size = 14
self.seq_length = 7
self.mem_len = 10
# self.key_len = seq_length + mem_len
self.clamp_len = -1
self.reuse_len = 15
self.is_training = True
self.use_labels = True
self.vocab_size = 99
self.cutoffs = [10, 50, 80]
self.hidden_size = 32
self.num_attention_heads = 4
self.d_inner = 128
self.num_hidden_layers = 3
self.type_sequence_label_size = 2
self.bi_data = False
self.same_length = False
self.initializer_range = 0.05
self.seed = 1
self.type_vocab_size = 2
self.bos_token_id = 1
self.eos_token_id = 2
self.pad_token_id = 5
self.num_choices = 4
def prepare_config_and_inputs(self):
input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
segment_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
input_mask = random_attention_mask([self.batch_size, self.seq_length])
input_ids_q = ids_tensor([self.batch_size, self.seq_length + 1], self.vocab_size)
perm_mask = torch.zeros(
self.batch_size,
self.seq_length + 1,
self.seq_length + 1,
dtype=torch.float,
device=torch_device,
)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros(
self.batch_size,
1,
self.seq_length + 1,
dtype=torch.float,
device=torch_device,
)
target_mapping[:, 0, -1] = 1.0 # predict last token
sequence_labels = None
lm_labels = None
is_impossible_labels = None
token_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
is_impossible_labels = ids_tensor([self.batch_size], 2).float()
token_labels = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = self.get_config()
return (
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
)
def get_config(self):
return XLNetConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
n_head=self.num_attention_heads,
d_inner=self.d_inner,
n_layer=self.num_hidden_layers,
mem_len=self.mem_len,
clamp_len=self.clamp_len,
same_length=self.same_length,
reuse_len=self.reuse_len,
bi_data=self.bi_data,
initializer_range=self.initializer_range,
num_labels=self.type_sequence_label_size,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
eos_token_id=self.eos_token_id,
)
def set_seed(self):
random.seed(self.seed)
torch.manual_seed(self.seed)
def create_and_check_xlnet_base_model(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1, input_mask=input_mask)
result = model(input_ids_1, attention_mask=input_mask)
result = model(input_ids_1, token_type_ids=segment_ids)
result = model(input_ids_1)
config.mem_len = 0
model = XLNetModel(config)
model.to(torch_device)
model.eval()
base_model_output = model(input_ids_1)
self.parent.assertEqual(len(base_model_output), 2)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in result.mems],
[(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def create_and_check_use_mems_train(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForSequenceClassification(config)
model.to(torch_device)
model.train()
train_size = input_ids_1.shape[0]
batch_size = 4
for i in range(train_size // batch_size + 1):
input_ids = input_ids_1[i : (i + 1) * batch_size]
labels = sequence_labels[i : (i + 1) * batch_size]
outputs = model(input_ids=input_ids, labels=labels, return_dict=True)
self.parent.assertIsNone(outputs.mems)
self.parent.assertIsNotNone(outputs.loss)
def create_and_check_xlnet_model_use_mems(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
causal_mask = torch.ones(
input_ids_1.shape[0],
input_ids_1.shape[1],
input_ids_1.shape[1],
dtype=torch.float,
device=torch_device,
)
causal_mask = torch.triu(causal_mask, diagonal=0)
outputs_cache = model(input_ids_1, use_mems=True, perm_mask=causal_mask)
outputs_no_cache = model(input_ids_1, use_mems=False, perm_mask=causal_mask)
outputs_conf = model(input_ids_1)
self.parent.assertTrue(len(outputs_cache) == len(outputs_conf))
self.parent.assertTrue(len(outputs_cache) == len(outputs_no_cache) + 1)
output, mems = outputs_cache.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids_1, next_tokens], dim=-1)
# causal mask
causal_mask = torch.ones(
input_ids_1.shape[0],
input_ids_1.shape[1] + 1,
input_ids_1.shape[1] + 1,
dtype=torch.float,
device=torch_device,
)
causal_mask = torch.triu(causal_mask, diagonal=0)
single_mask = torch.ones(input_ids_1.shape[0], 1, 1, dtype=torch.float, device=torch_device)
# second forward pass
output_from_no_past = model(next_input_ids, perm_mask=causal_mask)["last_hidden_state"]
output_from_past = model(next_tokens, mems=mems, perm_mask=single_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_xlnet_base_model_with_att_output(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetModel(config)
model.to(torch_device)
model.eval()
attentions = model(input_ids_1, target_mapping=target_mapping, output_attentions=True)["attentions"]
self.parent.assertEqual(len(attentions), config.n_layer)
self.parent.assertIsInstance(attentions[0], tuple)
self.parent.assertEqual(len(attentions[0]), 2)
self.parent.assertTrue(attentions[0][0].shape, attentions[0][0].shape)
def create_and_check_xlnet_lm_head(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetLMHeadModel(config)
model.to(torch_device)
model.eval()
result1 = model(input_ids_1, token_type_ids=segment_ids, labels=lm_labels)
result2 = model(input_ids_2, token_type_ids=segment_ids, labels=lm_labels, mems=result1.mems)
_ = model(input_ids_q, perm_mask=perm_mask, target_mapping=target_mapping)
self.parent.assertEqual(result1.loss.shape, ())
self.parent.assertEqual(result1.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in result1.mems],
[(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
self.parent.assertEqual(result2.loss.shape, ())
self.parent.assertEqual(result2.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in result2.mems],
[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def create_and_check_xlnet_qa(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
result_with_labels = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
p_mask=input_mask,
)
result_with_labels = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
cls_index=sequence_labels,
is_impossible=is_impossible_labels,
)
total_loss, mems = result_with_labels.to_tuple()
result_with_labels = model(
input_ids_1,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
total_loss, mems = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)
)
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
self.parent.assertListEqual(
[mem.shape for mem in result.mems],
[(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def create_and_check_xlnet_token_classif(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
result = model(input_ids_1, labels=token_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.type_sequence_label_size))
self.parent.assertListEqual(
[mem.shape for mem in result.mems],
[(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def create_and_check_xlnet_sequence_classif(
self,
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
):
model = XLNetForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids_1)
result = model(input_ids_1, labels=sequence_labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
self.parent.assertListEqual(
[mem.shape for mem in result.mems],
[(self.seq_length, self.batch_size, self.hidden_size)] * self.num_hidden_layers,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids_1,
input_ids_2,
input_ids_q,
perm_mask,
input_mask,
target_mapping,
segment_ids,
lm_labels,
sequence_labels,
is_impossible_labels,
token_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids_1}
return config, inputs_dict
@require_torch
| XLNetModelTester |
python | pypa__pip | src/pip/_internal/models/direct_url.py | {
"start": 1829,
"end": 2489
} | class ____:
name: ClassVar = "vcs_info"
vcs: str
commit_id: str
requested_revision: str | None = None
@classmethod
def _from_dict(cls, d: dict[str, Any] | None) -> VcsInfo | None:
if d is None:
return None
return cls(
vcs=_get_required(d, str, "vcs"),
commit_id=_get_required(d, str, "commit_id"),
requested_revision=_get(d, str, "requested_revision"),
)
def _to_dict(self) -> dict[str, Any]:
return _filter_none(
vcs=self.vcs,
requested_revision=self.requested_revision,
commit_id=self.commit_id,
)
| VcsInfo |
python | jazzband__django-oauth-toolkit | oauth2_provider/models.py | {
"start": 11438,
"end": 13644
} | class ____(models.Model):
"""
A Grant instance represents a token with a short lifetime that can
be swapped for an access token, as described in :rfc:`4.1.2`
Fields:
* :attr:`user` The Django user who requested the grant
* :attr:`code` The authorization code generated by the authorization server
* :attr:`application` Application instance this grant was asked for
* :attr:`expires` Expire time in seconds, defaults to
:data:`settings.AUTHORIZATION_CODE_EXPIRE_SECONDS`
* :attr:`redirect_uri` Self explained
* :attr:`scope` Required scopes, optional
* :attr:`code_challenge` PKCE code challenge
* :attr:`code_challenge_method` PKCE code challenge transform algorithm
"""
CODE_CHALLENGE_PLAIN = "plain"
CODE_CHALLENGE_S256 = "S256"
CODE_CHALLENGE_METHODS = ((CODE_CHALLENGE_PLAIN, "plain"), (CODE_CHALLENGE_S256, "S256"))
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="%(app_label)s_%(class)s"
)
code = models.CharField(max_length=255, unique=True) # code comes from oauthlib
application = models.ForeignKey(oauth2_settings.APPLICATION_MODEL, on_delete=models.CASCADE)
expires = models.DateTimeField()
redirect_uri = models.TextField()
scope = models.TextField(blank=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
code_challenge = models.CharField(max_length=128, blank=True, default="")
code_challenge_method = models.CharField(
max_length=10, blank=True, default="", choices=CODE_CHALLENGE_METHODS
)
nonce = models.CharField(max_length=255, blank=True, default="")
claims = models.TextField(blank=True)
def is_expired(self):
"""
Check token expiration with timezone awareness
"""
if not self.expires:
return True
return timezone.now() >= self.expires
def redirect_uri_allowed(self, uri):
return uri == self.redirect_uri
def __str__(self):
return self.code
class Meta:
abstract = True
| AbstractGrant |
python | walkccc__LeetCode | solutions/378. Kth Smallest Element in a Sorted Matrix/378.py | {
"start": 0,
"end": 426
} | class ____:
def kthSmallest(self, matrix: list[list[int]], k: int) -> int:
minHeap = [] # (matrix[i][j], i, j)
i = 0
while i < k and i < len(matrix):
heapq.heappush(minHeap, (matrix[i][0], i, 0))
i += 1
while k > 1:
k -= 1
_, i, j = heapq.heappop(minHeap)
if j + 1 < len(matrix[0]):
heapq.heappush(minHeap, (matrix[i][j + 1], i, j + 1))
return minHeap[0][0]
| Solution |
python | docker__docker-py | tests/integration/models_networks_test.py | {
"start": 96,
"end": 1662
} | class ____(BaseIntegrationTest):
def test_create(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(name, labels={'foo': 'bar'})
self.tmp_networks.append(network.id)
assert network.name == name
assert network.attrs['Labels']['foo'] == "bar"
def test_get(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network_id = client.networks.create(name).id
self.tmp_networks.append(network_id)
network = client.networks.get(network_id)
assert network.name == name
def test_list_remove(self):
client = docker.from_env(version=TEST_API_VERSION)
name = helpers.random_name()
network = client.networks.create(name)
self.tmp_networks.append(network.id)
assert network.id in [n.id for n in client.networks.list()]
assert network.id not in [
n.id for n in
client.networks.list(ids=["fdhjklfdfdshjkfds"])
]
assert network.id in [
n.id for n in
client.networks.list(ids=[network.id])
]
assert network.id not in [
n.id for n in
client.networks.list(names=["fdshjklfdsjhkl"])
]
assert network.id in [
n.id for n in
client.networks.list(names=[name])
]
network.remove()
assert network.id not in [n.id for n in client.networks.list()]
| NetworkCollectionTest |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/keyfunc_dict.py | {
"start": 380,
"end": 638
} | class ____(Base):
__tablename__ = "item"
id: Mapped[int] = mapped_column(primary_key=True)
notes: Mapped[Dict[str, "Note"]] = relationship(
collection_class=attribute_keyed_dict("keyword"),
cascade="all, delete-orphan",
)
| Item |
python | rq__rq | tests/test_intermediate_queue.py | {
"start": 378,
"end": 10685
} | class ____(RQTestCase):
def setUp(self):
super().setUp()
self.queue = Queue('foo', connection=self.connection)
self.intermediate_queue = IntermediateQueue(self.queue.key, connection=self.connection)
def test_set_first_seen(self):
"""Ensure that the first_seen attribute is set correctly."""
intermediate_queue = self.intermediate_queue
job = self.queue.enqueue(say_hello)
# set_first_seen() should only succeed the first time around
self.assertTrue(intermediate_queue.set_first_seen(job.id))
self.assertFalse(intermediate_queue.set_first_seen(job.id))
# It should succeed again after deleting the key
self.connection.delete(intermediate_queue.get_first_seen_key(job.id))
self.assertTrue(intermediate_queue.set_first_seen(job.id))
def test_get_first_seen(self):
"""Ensure that the first_seen attribute is set correctly."""
intermediate_queue = self.intermediate_queue
job = self.queue.enqueue(say_hello)
self.assertIsNone(intermediate_queue.get_first_seen(job.id))
# Check first seen was set correctly
intermediate_queue.set_first_seen(job.id)
timestamp = intermediate_queue.get_first_seen(job.id)
assert timestamp
self.assertLess(datetime.now(tz=timezone.utc) - timestamp, timedelta(seconds=5))
def test_should_be_cleaned_up(self):
"""Job in the intermediate queue should be cleaned up if it was seen more than 1 minute ago."""
intermediate_queue = self.intermediate_queue
job = self.queue.enqueue(say_hello)
# Returns False if there's no first seen timestamp
self.assertFalse(intermediate_queue.should_be_cleaned_up(job.id))
# Returns False since first seen timestamp is less than 1 minute ago
intermediate_queue.set_first_seen(job.id)
self.assertFalse(intermediate_queue.should_be_cleaned_up(job.id))
first_seen_key = intermediate_queue.get_first_seen_key(job.id)
two_minutes_ago = datetime.now(tz=timezone.utc) - timedelta(minutes=2)
self.connection.set(first_seen_key, two_minutes_ago.timestamp(), ex=10)
self.assertTrue(intermediate_queue.should_be_cleaned_up(job.id))
def test_get_job_ids(self):
"""Dequeueing job from a single queue moves job to intermediate queue."""
intermediate_queue = self.intermediate_queue
job_1 = self.queue.enqueue(say_hello)
# Ensure that the intermediate queue is empty
self.connection.delete(intermediate_queue.key)
# Job ID is not in intermediate queue
self.assertEqual(intermediate_queue.get_job_ids(), [])
result = Queue.dequeue_any([self.queue], timeout=None, connection=self.connection)
assert result
_job, queue = result
# After job is dequeued, the job ID is in the intermediate queue
self.assertEqual(intermediate_queue.get_job_ids(), [job_1.id])
# Test the blocking version
job_2 = queue.enqueue(say_hello)
result = Queue.dequeue_any([queue], timeout=1, connection=self.connection)
assert result
_job, queue = result
# After job is dequeued, the job ID is in the intermediate queue
self.assertEqual(intermediate_queue.get_job_ids(), [job_1.id, job_2.id])
# After job_1.id is removed, only job_2.id is in the intermediate queue
intermediate_queue.remove(job_1.id)
self.assertEqual(intermediate_queue.get_job_ids(), [job_2.id])
def test_cleanup_intermediate_queue_in_maintenance(self):
"""Ensure jobs stuck in the intermediate queue are cleaned up."""
intermediate_queue = self.intermediate_queue
job = self.queue.enqueue(say_hello)
self.connection.delete(intermediate_queue.key)
# If job execution fails after it's dequeued, job should be in the intermediate queue
# and it's status is still QUEUED
with patch.object(Worker, 'execute_job'):
worker = Worker(self.queue, connection=self.connection)
worker.work(burst=True)
# If worker.execute_job() does nothing, job status should be `queued`
# even though it's not in the queue, but it should be in the intermediate queue
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertNotIn(job.id, self.queue.get_job_ids())
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
self.assertIsNone(intermediate_queue.get_first_seen(job.id))
intermediate_queue.cleanup(worker, self.queue)
# After intermediate_queue.cleanup is called, the job should be marked as seen,
# but since it's been less than 1 minute, it should not be cleaned up
self.assertIsNotNone(intermediate_queue.get_first_seen(job.id))
self.assertFalse(intermediate_queue.should_be_cleaned_up(job.id))
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
# If we set the first seen timestamp to 2 minutes ago, the job should be cleaned up
first_seen_key = intermediate_queue.get_first_seen_key(job.id)
two_minutes_ago = datetime.now(tz=timezone.utc) - timedelta(minutes=2)
self.connection.set(first_seen_key, two_minutes_ago.timestamp(), ex=10)
intermediate_queue.cleanup(worker, self.queue)
self.assertEqual(intermediate_queue.get_job_ids(), [])
self.assertEqual(job.get_status(), 'failed')
job = self.queue.enqueue(say_hello)
worker.work(burst=True)
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
# If job is gone, it should be immediately removed from the intermediate queue
job.delete()
intermediate_queue.cleanup(worker, self.queue)
self.assertEqual(intermediate_queue.get_job_ids(), [])
def test_cleanup_intermediate_queue(self):
"""Ensure jobs stuck in the intermediate queue are cleaned up."""
intermediate_queue = self.intermediate_queue
job = self.queue.enqueue(say_hello)
self.connection.delete(intermediate_queue.key)
# If job execution fails after it's dequeued, job should be in the intermediate queue
# and it's status is still QUEUED
with patch.object(Worker, 'execute_job'):
worker = Worker(self.queue, connection=self.connection)
worker.work(burst=True)
# If worker.execute_job() does nothing, job status should be `queued`
# even though it's not in the queue, but it should be in the intermediate queue
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertNotIn(job.id, self.queue.get_job_ids())
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
self.assertIsNone(intermediate_queue.get_first_seen(job.id))
intermediate_queue.cleanup(worker, self.queue)
# After intermediate_queue.cleanup is called, the job should be marked as seen,
# but since it's been less than 1 minute, it should not be cleaned up
self.assertIsNotNone(intermediate_queue.get_first_seen(job.id))
self.assertFalse(intermediate_queue.should_be_cleaned_up(job.id))
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
# If we set the first seen timestamp to 2 minutes ago, the job should be cleaned up
first_seen_key = intermediate_queue.get_first_seen_key(job.id)
two_minutes_ago = datetime.now(tz=timezone.utc) - timedelta(minutes=2)
self.connection.set(first_seen_key, two_minutes_ago.timestamp(), ex=10)
intermediate_queue.cleanup(worker, self.queue)
self.assertEqual(intermediate_queue.get_job_ids(), [])
self.assertEqual(job.get_status(), 'failed')
job = self.queue.enqueue(say_hello)
worker.work(burst=True)
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
# If job is gone, it should be immediately removed from the intermediate queue
job.delete()
intermediate_queue.cleanup(worker, self.queue)
self.assertEqual(intermediate_queue.get_job_ids(), [])
def test_no_cleanup_while_in_started_queue(self):
"""Ensure jobs stuck in the intermediate queue are cleaned up."""
intermediate_queue = self.intermediate_queue
job = self.queue.enqueue(say_hello)
self.connection.delete(intermediate_queue.key)
# If job execution fails after it's dequeued, job should be in the intermediate queue
# and it's status is still QUEUED
with patch.object(Worker, 'perform_job'):
worker = Worker(self.queue, connection=self.connection)
worker.work(burst=True)
# If worker.perform_job() does nothing, job status should be `queued`
# even though it's not in the queue, but it should be in the intermediate queue
# and the job should be in the started queue (since we only mocked perform_job)
self.assertEqual(job.get_status(), JobStatus.QUEUED)
self.assertNotIn(job.id, self.queue.get_job_ids())
self.assertEqual(intermediate_queue.get_job_ids(), [job.id])
self.assertIn(job.id, job.started_job_registry.get_job_ids())
self.assertIn(
(worker.execution.job_id, worker.execution.id),
job.started_job_registry.get_job_and_execution_ids(),
)
# this should NOT remove the job from the queue, nor set the first see key
# because it's still in the "execution state".
# perform_job was mocked and never called success or failure.
intermediate_queue.cleanup(worker, self.queue)
self.assertIsNone(intermediate_queue.get_first_seen(job.id))
self.assertEqual(job.get_status(), JobStatus.QUEUED)
def test_clean_intermediate_queue_deprecation(self):
with pytest.deprecated_call():
worker = Worker(self.queue, connection=self.connection)
clean_intermediate_queue(worker, self.queue)
| TestIntermediateQueue |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 30479,
"end": 30611
} | class ____(TemplateView):
template_name = "account/verification_sent." + app_settings.TEMPLATE_EXTENSION
| EmailVerificationSentView |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail.py | {
"start": 1303,
"end": 1496
} | class ____(BaseModel):
model_config = ConfigDict(extra=Extra.forbid)
ForbidExtraModel(x=1)
# MYPY: error: Unexpected keyword argument "x" for "ForbidExtraModel" [call-arg]
| ForbidExtraModel |
python | tensorflow__tensorflow | tensorflow/lite/testing/zip_test_utils.py | {
"start": 8018,
"end": 28192
} | class ____:
"""Utility class for writing ProtoBuf like messages."""
def __init__(self, fp, name=None, parent=None):
self.fp = fp
self.indent = parent.indent if parent else 0
self.name = name
def __enter__(self):
if self.name:
self.write(self.name + " {")
self.indent += 2
return self
def __exit__(self, *exc_info):
if self.name:
self.indent -= 2
self.write("}")
return True
def write(self, data):
self.fp.write(" " * self.indent + data + "\n")
def write_field(self, key, val):
self.write(key + ": \"" + val + "\"")
def sub_message(self, name):
return TextFormatWriter(self.fp, name, self)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consisting of keys "inputs" and "outputs"
Raises:
RuntimeError: Example dictionary does not have input / output names.
"""
writer = TextFormatWriter(fp)
writer.write_field("load_model", os.path.basename(model_name))
for example in examples:
inputs = []
for name in example["inputs"].keys():
if name:
inputs.append(name)
outputs = []
for name in example["outputs"].keys():
if name:
outputs.append(name)
if not (inputs and outputs):
raise RuntimeError("Empty input / output names.")
# Reshape message
with writer.sub_message("reshape") as reshape:
for name, value in example["inputs"].items():
with reshape.sub_message("input") as input_msg:
input_msg.write_field("key", name)
input_msg.write_field("value", ",".join(map(str, value.shape)))
# Invoke message
with writer.sub_message("invoke") as invoke:
for name, value in example["inputs"].items():
with invoke.sub_message("input") as input_msg:
input_msg.write_field("key", name)
input_msg.write_field("value", format_result(value))
# Expectations
for name, value in example["outputs"].items():
with invoke.sub_message("output") as output_msg:
output_msg.write_field("key", name)
output_msg.write_field("value", format_result(value))
with invoke.sub_message("output_shape") as output_shape:
output_shape.write_field("key", name)
output_shape.write_field("value",
",".join([str(dim) for dim in value.shape]))
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def _normalize_input_name(input_name):
"""Remove :i suffix from input tensor names."""
return input_name.split(":")[0]
def _normalize_output_name(output_name):
"""Remove :0 suffix from output tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
def _get_tensor_info(tensors, default_name_prefix, normalize_func):
"""Get the list of tensor name and info."""
tensor_names = []
tensor_info_map = {}
for idx, tensor in enumerate(tensors):
if not tensor.name:
tensor.name = default_name_prefix + str(idx)
tensor_info = tf.compat.v1.saved_model.utils.build_tensor_info(tensor)
tensor_name = normalize_func(tensor.name)
tensor_info_map[tensor_name] = tensor_info
tensor_names.append(tensor_name)
return tensor_names, tensor_info_map
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_convert_options=ExtraConvertOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartesian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartesian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to
tflite, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_convert_options: Additional convert options.
use_frozen_graph: Whether or not freeze graph before convertion.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to fail
because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
all_parameter_count = parameter_count
if options.multi_gen_state:
all_parameter_count += options.multi_gen_state.parameter_count
if not options.no_tests_limit and all_parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are at least %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n" %
(zip_path, all_parameter_count, _MAX_TESTS_PER_ZIP))
if options.multi_gen_state:
options.multi_gen_state.parameter_count = all_parameter_count
# TODO(aselle): Make this allow multiple inputs outputs.
if options.multi_gen_state:
archive = options.multi_gen_state.archive
else:
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
converter_errors = 0
processed_labels = set()
if options.make_tf_ptq_tests:
# For cases with fully_quantize is True, also generates a case with
# fully_quantize is False. Marks these cases as suitable for PTQ tests.
parameter_count = 0
for parameters in test_parameters:
if True in parameters.get("fully_quantize", []):
parameters.update({"fully_quantize": [True, False], "tf_ptq": [True]})
# TODO(b/199054047): Support 16x8 quantization in TF Quantization.
parameters.update({"quant_16x8": [False]})
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
if options.make_edgetpu_tests:
extra_convert_options.inference_input_type = tf.uint8
extra_convert_options.inference_output_type = tf.uint8
# Only count parameters when fully_quantize is True.
parameter_count = 0
for parameters in test_parameters:
if True in parameters.get("fully_quantize",
[]) and False in parameters.get(
"quant_16x8", [False]):
parameter_count += functools.reduce(operator.mul, [
len(values)
for key, values in parameters.items()
if key != "fully_quantize" and key != "quant_16x8"
])
label_base_path = zip_path
if options.multi_gen_state:
label_base_path = options.multi_gen_state.label_base_path
i = 1
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = label_base_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
zip_path_label = label
if len(os.path.basename(zip_path_label)) > 245:
zip_path_label = label_base_path.replace(".zip", "_") + str(i)
i += 1
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
if options.make_tf_ptq_tests and not param_dict.get("tf_ptq", False):
continue
if options.make_edgetpu_tests and (not param_dict.get(
"fully_quantize", False) or param_dict.get("quant_16x8", False)):
continue
def generate_inputs_outputs(tflite_model_binary,
min_value=0,
max_value=255):
"""Generate input values and output values of the given tflite model.
Args:
tflite_model_binary: A serialized flatbuffer as a string.
min_value: min value for the input tensor.
max_value: max value for the input tensor.
Returns:
(input_values, output_values): Maps of input values and output values
built.
"""
interpreter = lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
input_values = {}
for input_detail in input_details:
input_value = create_tensor_data(
input_detail["dtype"],
input_detail["shape"],
min_value=min_value,
max_value=max_value)
interpreter.set_tensor(input_detail["index"], input_value)
input_values.update(
{_normalize_input_name(input_detail["name"]): input_value})
interpreter.invoke()
output_details = interpreter.get_output_details()
output_values = {}
for output_detail in output_details:
output_values.update({
_normalize_output_name(output_detail["name"]):
interpreter.get_tensor(output_detail["index"])
})
return input_values, output_values
def build_example(label, param_dict_real, zip_path_label):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
zip_path_label: Filename in the zip
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `tflite_converter_log` (log of conversion), `tf_log` (log of tf
conversion), `converter` (a string of success status of the
conversion), `tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {
"tflite_converter": report_lib.NOTRUN,
"tf": report_lib.FAILED
}
# Build graph
report["tf_log"] = ""
report["tflite_converter_log"] = ""
tf.compat.v1.reset_default_graph()
with tf.Graph().as_default():
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
inputs = [x for x in inputs if x is not None]
except (tf.errors.UnimplementedError,
tf.errors.InvalidArgumentError, ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.compat.v1.Session()
try:
baseline_inputs, baseline_outputs = (
make_test_inputs(param_dict_real, sess, inputs, outputs))
baseline_inputs = [x for x in baseline_inputs if x is not None]
# Converts baseline inputs/outputs to maps. The signature input and
# output names are set to be the same as the tensor names.
input_names = [_normalize_input_name(x.name) for x in inputs]
output_names = [_normalize_output_name(x.name) for x in outputs]
baseline_input_map = dict(zip(input_names, baseline_inputs))
baseline_output_map = dict(zip(output_names, baseline_outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["tflite_converter"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Builds a saved model with the default signature key.
input_names, tensor_info_inputs = _get_tensor_info(
inputs, "input_", _normalize_input_name)
output_tensors, tensor_info_outputs = _get_tensor_info(
outputs, "output_", _normalize_output_name)
input_tensors = [
(name, t.shape, t.dtype) for name, t in zip(input_names, inputs)
]
inference_signature = (
tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs=tensor_info_inputs,
outputs=tensor_info_outputs,
method_name="op_test"))
saved_model_dir = tempfile.mkdtemp("op_test")
saved_model_tags = [tf.saved_model.SERVING]
signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
saved_model_dir)
builder.add_meta_graph_and_variables(
sess,
saved_model_tags,
signature_def_map={
signature_key: inference_signature,
},
strip_default_attrs=True)
builder.save(as_text=False)
# pylint: disable=g-long-ternary
graph_def = freeze_graph(
sess,
tf.compat.v1.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_convert_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, converter_log = options.tflite_convert_function(
options,
saved_model_dir,
input_tensors,
output_tensors,
extra_convert_options=extra_convert_options,
test_params=param_dict_real)
report["tflite_converter"] = (
report_lib.SUCCESS
if tflite_model_binary is not None else report_lib.FAILED)
report["tflite_converter_log"] = converter_log
if options.save_graphdefs:
zipinfo = zipfile.ZipInfo(zip_path_label + ".pbtxt")
archive.writestr(zipinfo, text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
if options.make_edgetpu_tests:
# Set proper min max values according to input dtype.
baseline_input_map, baseline_output_map = generate_inputs_outputs(
tflite_model_binary, min_value=0, max_value=255)
zipinfo = zipfile.ZipInfo(zip_path_label + ".bin")
if sys.byteorder == "big":
tflite_model_binary = flatbuffer_utils.byte_swap_tflite_buffer(
tflite_model_binary, "big", "little"
)
archive.writestr(zipinfo, tflite_model_binary, zipfile.ZIP_DEFLATED)
example = {
"inputs": baseline_input_map,
"outputs": baseline_output_map
}
example_fp = io.StringIO()
write_examples(example_fp, [example])
zipinfo = zipfile.ZipInfo(zip_path_label + ".inputs")
archive.writestr(zipinfo, example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = io.StringIO()
write_test_cases(example_fp2, zip_path_label + ".bin", [example])
zipinfo = zipfile.ZipInfo(zip_path_label + "_tests.txt")
archive.writestr(zipinfo, example_fp2.getvalue(),
zipfile.ZIP_DEFLATED)
zip_manifest_label = zip_path_label + " " + label
if zip_path_label == label:
zip_manifest_label = zip_path_label
zip_manifest.append(zip_manifest_label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict, zip_path_label)
if report["tflite_converter"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
converter_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["tflite_converter_log"])
convert_report.append((param_dict, report))
if not options.no_conversion_report:
report_io = io.StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
if options.multi_gen_state:
zipinfo = zipfile.ZipInfo("report_" + options.multi_gen_state.test_name +
".html")
archive.writestr(zipinfo, report_io.getvalue())
else:
zipinfo = zipfile.ZipInfo("report.html")
archive.writestr(zipinfo, report_io.getvalue())
if options.multi_gen_state:
options.multi_gen_state.zip_manifest.extend(zip_manifest)
else:
zipinfo = zipfile.ZipInfo("manifest.txt")
archive.writestr(zipinfo, "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(
1 for x in convert_report if x[1]["tf"] == report_lib.SUCCESS)
converter_success = sum(1 for x in convert_report
if x[1]["tflite_converter"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(converter_success) / float(tf_success) * 100.
tf.compat.v1.logging.info(
("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d converted graphs (%.1f%%"), zip_path, total_conversions,
tf_success, converter_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if tf_failures != expected_tf_failures and not (options.make_edgetpu_tests or
options.make_tf_ptq_tests):
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") %
(expected_tf_failures, zip_path, tf_failures))
if not options.ignore_converter_errors and converter_errors > 0:
raise RuntimeError("Found %d errors while generating models" %
converter_errors)
| TextFormatWriter |
python | gevent__gevent | src/gevent/tests/test__event.py | {
"start": 2314,
"end": 2442
} | class ____(AbstractGenericGetTestCase):
def wait(self, timeout):
AsyncResult().get(timeout=timeout)
| TestAsyncResultGet |
python | huggingface__transformers | tests/models/llava_next/test_modeling_llava_next.py | {
"start": 5986,
"end": 12034
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `LlavaNextForConditionalGeneration`.
"""
all_model_classes = (
(
LlavaNextModel,
LlavaNextForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-text-to-text": LlavaNextForConditionalGeneration,
"any-to-any": LlavaNextForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = LlavaNextVisionText2TextModelTester(self)
common_properties = ["image_token_index", "vision_feature_layer", "image_seq_length"]
self.config_tester = ConfigTester(
self, config_class=LlavaNextConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images don't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
curr_input_dict["image_sizes"] = curr_input_dict["image_sizes"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
image_sizes = curr_input_dict["image_sizes"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
image_sizes = torch.cat([image_sizes, image_sizes], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values, image_sizes=image_sizes)
def test_odd_sized_image(self):
# prepare model configuration
config = self.model_tester.get_config()
# prepare input
num_image_tokens = 24
pixel_values = floats_tensor([1, 5, 3, config.vision_config.image_size, config.vision_config.image_size])
input_ids = ids_tensor([1, 64], config.text_config.vocab_size - 2) + 2
input_ids[:, :num_image_tokens] = config.image_token_index
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"image_sizes": torch.tensor([[13, 16]]), # odd-sized image
"input_ids": input_ids,
"attention_mask": attention_mask,
}
# forward with odd-sized image input
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model(**inputs_dict)
@parameterized.expand(
[
(-1,),
([-1],),
([-1, -2],),
],
)
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
| LlavaNextForConditionalGenerationModelTest |
python | langchain-ai__langchain | libs/partners/xai/langchain_xai/chat_models.py | {
"start": 1186,
"end": 23490
} | class ____(BaseChatOpenAI): # type: ignore[override]
r"""ChatXAI chat model.
Refer to [xAI's documentation](https://docs.x.ai/docs/api-reference#chat-completions)
for more nuanced details on the API's behavior and supported parameters.
Setup:
Install `langchain-xai` and set environment variable `XAI_API_KEY`.
```bash
pip install -U langchain-xai
export XAI_API_KEY="your-api-key"
```
Key init args — completion params:
model:
Name of model to use.
temperature:
Sampling temperature between `0` and `2`. Higher values mean more random completions,
while lower values (like `0.2`) mean more focused and deterministic completions.
(Default: `1`.)
max_tokens:
Max number of tokens to generate. Refer to your [model's documentation](https://docs.x.ai/docs/models#model-pricing)
for the maximum number of tokens it can generate.
logprobs:
Whether to return logprobs.
Key init args — client params:
timeout:
Timeout for requests.
max_retries:
Max number of retries.
api_key:
xAI API key. If not passed in will be read from env var `XAI_API_KEY`.
Instantiate:
```python
from langchain_xai import ChatXAI
model = ChatXAI(
model="grok-4",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
# api_key="...",
# other params...
)
```
Invoke:
```python
messages = [
(
"system",
"You are a helpful translator. Translate the user sentence to French.",
),
("human", "I love programming."),
]
model.invoke(messages)
```
```python
AIMessage(
content="J'adore la programmation.",
response_metadata={
"token_usage": {
"completion_tokens": 9,
"prompt_tokens": 32,
"total_tokens": 41,
},
"model_name": "grok-4",
"system_fingerprint": None,
"finish_reason": "stop",
"logprobs": None,
},
id="run-168dceca-3b8b-4283-94e3-4c739dbc1525-0",
usage_metadata={
"input_tokens": 32,
"output_tokens": 9,
"total_tokens": 41,
},
)
```
Stream:
```python
for chunk in model.stream(messages):
print(chunk.text, end="")
```
```python
content='J' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content="'" id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='ad' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='ore' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content=' la' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content=' programm' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='ation' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='.' id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
content='' response_metadata={'finish_reason': 'stop', 'model_name': 'grok-4'} id='run-1bc996b5-293f-4114-96a1-e0f755c05eb9'
```
Async:
```python
await model.ainvoke(messages)
# stream:
# async for chunk in (await model.astream(messages))
# batch:
# await model.abatch([messages])
```
```python
AIMessage(
content="J'adore la programmation.",
response_metadata={
"token_usage": {
"completion_tokens": 9,
"prompt_tokens": 32,
"total_tokens": 41,
},
"model_name": "grok-4",
"system_fingerprint": None,
"finish_reason": "stop",
"logprobs": None,
},
id="run-09371a11-7f72-4c53-8e7c-9de5c238b34c-0",
usage_metadata={
"input_tokens": 32,
"output_tokens": 9,
"total_tokens": 41,
},
)
```
Reasoning:
[Certain xAI models](https://docs.x.ai/docs/models#model-pricing) support reasoning,
which allows the model to provide reasoning content along with the response.
If provided, reasoning content is returned under the `additional_kwargs` field of the
`AIMessage` or `AIMessageChunk`.
If supported, reasoning effort can be specified in the model constructor's `extra_body`
argument, which will control the amount of reasoning the model does. The value can be one of
`'low'` or `'high'`.
```python
model = ChatXAI(
model="grok-3-mini",
extra_body={"reasoning_effort": "high"},
)
```
!!! note
As of 2025-07-10, `reasoning_content` is only returned in Grok 3 models, such as
[Grok 3 Mini](https://docs.x.ai/docs/models/grok-3-mini).
!!! note
Note that in [Grok 4](https://docs.x.ai/docs/models/grok-4-0709), as of 2025-07-10,
reasoning is not exposed in `reasoning_content` (other than initial `'Thinking...'` text),
reasoning cannot be disabled, and the `reasoning_effort` cannot be specified.
Tool calling / function calling:
```python
from pydantic import BaseModel, Field
model = ChatXAI(model="grok-4")
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke("Which city is bigger: LA or NY?")
ai_msg.tool_calls
```
```python
[
{
"name": "GetPopulation",
"args": {"location": "NY"},
"id": "call_m5tstyn2004pre9bfuxvom8x",
"type": "tool_call",
},
{
"name": "GetPopulation",
"args": {"location": "LA"},
"id": "call_0vjgq455gq1av5sp9eb1pw6a",
"type": "tool_call",
},
]
```
!!! note
With stream response, the tool / function call will be returned in whole in a
single chunk, instead of being streamed across chunks.
Tool choice can be controlled by setting the `tool_choice` parameter in the model
constructor's `extra_body` argument. For example, to disable tool / function calling:
```python
model = ChatXAI(model="grok-4", extra_body={"tool_choice": "none"})
```
To require that the model always calls a tool / function, set `tool_choice` to `'required'`:
```python
model = ChatXAI(model="grok-4", extra_body={"tool_choice": "required"})
```
To specify a tool / function to call, set `tool_choice` to the name of the tool / function:
```python
from pydantic import BaseModel, Field
model = ChatXAI(
model="grok-4",
extra_body={
"tool_choice": {"type": "function", "function": {"name": "GetWeather"}}
},
)
class GetWeather(BaseModel):
\"\"\"Get the current weather in a given location\"\"\"
location: str = Field(..., description='The city and state, e.g. San Francisco, CA')
class GetPopulation(BaseModel):
\"\"\"Get the current population in a given location\"\"\"
location: str = Field(..., description='The city and state, e.g. San Francisco, CA')
model_with_tools = model.bind_tools([GetWeather, GetPopulation])
ai_msg = model_with_tools.invoke(
"Which city is bigger: LA or NY?",
)
ai_msg.tool_calls
```
The resulting tool call would be:
```python
[
{
"name": "GetWeather",
"args": {"location": "Los Angeles, CA"},
"id": "call_81668711",
"type": "tool_call",
}
]
```
Parallel tool calling / parallel function calling:
By default, parallel tool / function calling is enabled, so you can process
multiple function calls in one request/response cycle. When two or more tool calls
are required, all of the tool call requests will be included in the response body.
Structured output:
```python
from typing import Optional
from pydantic import BaseModel, Field
class Joke(BaseModel):
'''Joke to tell user.'''
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
rating: int | None = Field(description="How funny the joke is, from 1 to 10")
structured_model = model.with_structured_output(Joke)
structured_model.invoke("Tell me a joke about cats")
```
```python
Joke(
setup="Why was the cat sitting on the computer?",
punchline="To keep an eye on the mouse!",
rating=7,
)
```
Live Search:
xAI supports a [Live Search](https://docs.x.ai/docs/guides/live-search)
feature that enables Grok to ground its answers using results from web searches.
```python
from langchain_xai import ChatXAI
model = ChatXAI(
model="grok-4",
search_parameters={
"mode": "auto",
# Example optional parameters below:
"max_search_results": 3,
"from_date": "2025-05-26",
"to_date": "2025-05-27",
},
)
model.invoke("Provide me a digest of world news in the last 24 hours.")
```
!!! note
[Citations](https://docs.x.ai/docs/guides/live-search#returning-citations)
are only available in [Grok 3](https://docs.x.ai/docs/models/grok-3).
Token usage:
```python
ai_msg = model.invoke(messages)
ai_msg.usage_metadata
```
```python
{"input_tokens": 37, "output_tokens": 6, "total_tokens": 43}
```
Logprobs:
```python
logprobs_model = model.bind(logprobs=True)
messages = [("human", "Say Hello World! Do not return anything else.")]
ai_msg = logprobs_model.invoke(messages)
ai_msg.response_metadata["logprobs"]
```
```python
{
"content": None,
"token_ids": [22557, 3304, 28808, 2],
"tokens": [" Hello", " World", "!", "</s>"],
"token_logprobs": [-4.7683716e-06, -5.9604645e-07, 0, -0.057373047],
}
```
Response metadata:
```python
ai_msg = model.invoke(messages)
ai_msg.response_metadata
```
```python
{
"token_usage": {
"completion_tokens": 4,
"prompt_tokens": 19,
"total_tokens": 23,
},
"model_name": "grok-4",
"system_fingerprint": None,
"finish_reason": "stop",
"logprobs": None,
}
```
""" # noqa: E501
model_name: str = Field(default="grok-4", alias="model")
"""Model name to use."""
xai_api_key: SecretStr | None = Field(
alias="api_key",
default_factory=secret_from_env("XAI_API_KEY", default=None),
)
"""xAI API key.
Automatically read from env variable `XAI_API_KEY` if not provided.
"""
xai_api_base: str = Field(default="https://api.x.ai/v1/")
"""Base URL path for API requests."""
search_parameters: dict[str, Any] | None = None
"""Parameters for search requests. Example: `{"mode": "auto"}`."""
openai_api_key: SecretStr | None = None
openai_api_base: str | None = None
model_config = ConfigDict(
populate_by_name=True,
)
@property
def lc_secrets(self) -> dict[str, str]:
"""A map of constructor argument names to secret ids.
For example, `{"xai_api_key": "XAI_API_KEY"}`
"""
return {"xai_api_key": "XAI_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain_xai", "chat_models"]`
"""
return ["langchain_xai", "chat_models"]
@property
def lc_attributes(self) -> dict[str, Any]:
"""List of attribute names that should be included in the serialized kwargs.
These attributes must be accepted by the constructor.
"""
attributes: dict[str, Any] = {}
if self.xai_api_base:
attributes["xai_api_base"] = self.xai_api_base
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "xai-chat"
def _get_ls_params(
self,
stop: list[str] | None = None,
**kwargs: Any, # noqa: ANN401
) -> LangSmithParams:
"""Get the parameters used to invoke the model."""
params = super()._get_ls_params(stop=stop, **kwargs)
params["ls_provider"] = "xai"
return params
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n is not None and self.n < 1:
msg = "n must be at least 1."
raise ValueError(msg)
if self.n is not None and self.n > 1 and self.streaming:
msg = "n must be 1 when streaming."
raise ValueError(msg)
client_params: dict = {
"api_key": (
self.xai_api_key.get_secret_value() if self.xai_api_key else None
),
"base_url": self.xai_api_base,
"timeout": self.request_timeout,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
if self.max_retries is not None:
client_params["max_retries"] = self.max_retries
if client_params["api_key"] is None:
msg = (
"xAI API key is not set. Please set it in the `xai_api_key` field or "
"in the `XAI_API_KEY` environment variable."
)
raise ValueError(msg)
if not (self.client or None):
sync_specific: dict = {"http_client": self.http_client}
self.client = openai.OpenAI(
**client_params, **sync_specific
).chat.completions
self.root_client = openai.OpenAI(**client_params, **sync_specific)
if not (self.async_client or None):
async_specific: dict = {"http_client": self.http_async_client}
self.async_client = openai.AsyncOpenAI(
**client_params, **async_specific
).chat.completions
self.root_async_client = openai.AsyncOpenAI(
**client_params,
**async_specific,
)
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
self.profile = _get_default_model_profile(self.model_name)
return self
@property
def _default_params(self) -> dict[str, Any]:
"""Get default parameters."""
params = super()._default_params
if self.search_parameters:
if "extra_body" in params:
params["extra_body"]["search_parameters"] = self.search_parameters
else:
params["extra_body"] = {"search_parameters": self.search_parameters}
return params
def _create_chat_result(
self,
response: dict | openai.BaseModel,
generation_info: dict | None = None,
) -> ChatResult:
rtn = super()._create_chat_result(response, generation_info)
for generation in rtn.generations:
generation.message.response_metadata["model_provider"] = "xai"
if not isinstance(response, openai.BaseModel):
return rtn
if hasattr(response.choices[0].message, "reasoning_content"): # type: ignore[attr-defined]
rtn.generations[0].message.additional_kwargs["reasoning_content"] = (
response.choices[0].message.reasoning_content # type: ignore[attr-defined]
)
if hasattr(response, "citations"):
rtn.generations[0].message.additional_kwargs["citations"] = (
response.citations
)
return rtn
def _convert_chunk_to_generation_chunk(
self,
chunk: dict,
default_chunk_class: type,
base_generation_info: dict | None,
) -> ChatGenerationChunk | None:
generation_chunk = super()._convert_chunk_to_generation_chunk(
chunk,
default_chunk_class,
base_generation_info,
)
if generation_chunk:
generation_chunk.message.response_metadata["model_provider"] = "xai"
if (choices := chunk.get("choices")) and generation_chunk:
top = choices[0]
if isinstance(generation_chunk.message, AIMessageChunk) and (
reasoning_content := top.get("delta", {}).get("reasoning_content")
):
generation_chunk.message.additional_kwargs["reasoning_content"] = (
reasoning_content
)
if (
(citations := chunk.get("citations"))
and generation_chunk
and isinstance(generation_chunk.message, AIMessageChunk)
):
generation_chunk.message.additional_kwargs["citations"] = citations
return generation_chunk
def with_structured_output(
self,
schema: _DictOrPydanticClass | None = None,
*,
method: Literal[
"function_calling", "json_mode", "json_schema"
] = "function_calling",
include_raw: bool = False,
strict: bool | None = None,
**kwargs: Any, # noqa: ANN401
) -> Runnable[LanguageModelInput, _DictOrPydantic]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- An OpenAI function/tool schema,
- A JSON Schema,
- A `TypedDict` class,
- Or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
method: The method for steering model generation, one of:
- `'function_calling'`:
Uses xAI's [tool-calling features](https://docs.x.ai/docs/guides/function-calling).
- `'json_schema'`:
Uses xAI's [structured output feature](https://docs.x.ai/docs/guides/structured-outputs).
- `'json_mode'`:
Uses xAI's JSON mode feature.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
strict:
- `True`:
Model output is guaranteed to exactly match the schema.
The input schema will also be validated according to the [supported schemas](https://platform.openai.com/docs/guides/structured-outputs/supported-schemas?api-mode=responses#supported-schemas).
- `False`:
Input schema will not be validated and model output will not be
validated.
- `None`:
`strict` argument will not be passed to the model.
kwargs: Additional keyword args aren't supported.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
"""
# Some applications require that incompatible parameters (e.g., unsupported
# methods) be handled.
if method == "function_calling" and strict:
strict = None
return super().with_structured_output(
schema, method=method, include_raw=include_raw, strict=strict, **kwargs
)
| ChatXAI |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 4091,
"end": 6807
} | class ____(ModelOutput):
r"""
mim (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels` and `pixel_values` are present, `input_ids_masked` is absent and `mim_weight` > 0.):
Masked Image Modeling loss as used in BeIT calculated only for unimodal image data.
mlm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels` and `input_ids_masked` are present, `pixel_values` is absent and `mlm_weight` > 0.):
Masked Language Modeling loss as used in BERT calculated only for unimodal text data.
itm (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `itm_labels`, `input_ids_masked`, `pixel_values` are present and `itm_weight` > 0.):
Image Text Matching (ITM) loss calculated for paired image-text data. Note that ITM loss is calculated on
masked pairs in FLAVA.
global_contrastive (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `input_ids` and `pixel_values` are present and `global_contrastive_weight` > 0.):
Contrastive loss for image-text similarity similar to CLIP but calculated globally for paired image-text
data. This is calculated on unmasked images and texts.
mmm_image (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mim_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_image_weight` > 0.):
Masked Multimodal Modeling loss's image component calculated on paired image-text data.
mmm_text (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mlm_labels`, `pixel_values` and `input_ids_masked` are present and `mmm_text_weight` > 0.):
Masked Multimodal Modeling loss's text component calculated on paired image-text data.
"""
mim: Optional[torch.FloatTensor] = None
mlm: Optional[torch.FloatTensor] = None
itm: Optional[torch.FloatTensor] = None
global_contrastive: Optional[torch.FloatTensor] = None
mmm_image: Optional[torch.FloatTensor] = None
mmm_text: Optional[torch.FloatTensor] = None
def all_none(self) -> bool:
all_none = True
for v in self.values():
if v is not None:
all_none = False
break
return all_none
@dataclass
@auto_docstring(
custom_intro="""
Output from FlavaForPreTraining containing embeddings, and outputs from individual encoders.
Note that `image_embeddings` and `text_embeddings` returned are similar to pooled output returned from a
transformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and
`text_projection` layers on `image_embeddings` and `text_embeddings` respectively.
"""
)
| FlavaLosses |
python | PrefectHQ__prefect | src/integrations/prefect-shell/tests/test_commands_windows.py | {
"start": 5953,
"end": 8471
} | class ____:
async def execute(self, op, method):
if method == "run":
return await op.run()
elif method == "trigger":
proc = await op.trigger()
await proc.wait_for_completion()
return await proc.fetch_result()
def test_echo(self):
op = ShellOperation(commands=["echo Hello"])
assert op.run() == ["Hello"]
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_error(self, method):
op = ShellOperation(commands=["throw"])
with pytest.raises(RuntimeError, match="return code"):
await self.execute(op, method)
@pytest.mark.skipif(sys.version >= "3.12", reason="Fails on Python 3.12")
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_output(self, prefect_task_runs_caplog, method):
op = ShellOperation(commands=["echo 'testing'"])
assert await self.execute(op, method) == ["testing"]
records = prefect_task_runs_caplog.records
assert len(records) == 3
assert "triggered with 1 commands running" in records[0].message
assert "testing" in records[1].message
assert "completed with return code 0" in records[2].message
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_current_env(self, method):
op = ShellOperation(commands=["echo $env:USERPROFILE"])
assert await self.execute(op, method) == [os.environ["USERPROFILE"]]
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_updated_env(self, method):
op = ShellOperation(
commands=["echo $env:TEST_VAR"], env={"TEST_VAR": "test value"}
)
assert await self.execute(op, method) == ["test value"]
@pytest.mark.parametrize("method", ["run", "trigger"])
async def test_cwd(self, method):
op = ShellOperation(commands=["Get-Location"], working_dir=Path.home())
assert os.fspath(Path.home()) in (await self.execute(op, method))
async def test_context_manager(self):
async with ShellOperation(commands=["echo 'testing'"]) as op:
proc = await op.trigger()
await proc.wait_for_completion()
await proc.fetch_result() == ["testing"]
def test_async_context_manager(self):
with ShellOperation(commands=["echo 'testing'"]) as op:
proc = op.trigger()
proc.wait_for_completion()
proc.fetch_result() == ["testing", ""]
| TestShellOperation |
python | anthropics__anthropic-sdk-python | src/anthropic/types/message_create_params.py | {
"start": 993,
"end": 10398
} | class ____(TypedDict, total=False):
max_tokens: Required[int]
"""The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
"""
messages: Required[Iterable[MessageParam]]
"""Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
"""
model: Required[ModelParam]
"""
The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
"""
metadata: MetadataParam
"""An object describing metadata about the request."""
service_tier: Literal["auto", "standard_only"]
"""
Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
"""
stop_sequences: SequenceNotStr[str]
"""Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
"""
system: Union[str, Iterable[TextBlockParam]]
"""System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
"""
temperature: float
"""Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
"""
thinking: ThinkingConfigParam
"""Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
"""
tool_choice: ToolChoiceParam
"""How the model should use the provided tools.
The model can use a specific tool, any available tool, decide by itself, or not
use tools at all.
"""
tools: Iterable[ToolUnionParam]
"""Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
"""
top_k: int
"""Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
"""
top_p: float
"""Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
"""
Metadata: TypeAlias = MetadataParam
"""This is deprecated, `MetadataParam` should be used instead"""
ToolChoice: TypeAlias = ToolChoiceParam
"""This is deprecated, `ToolChoiceParam` should be used instead"""
ToolChoiceToolChoiceAuto: TypeAlias = ToolChoiceAutoParam
"""This is deprecated, `ToolChoiceAutoParam` should be used instead"""
ToolChoiceToolChoiceAny: TypeAlias = ToolChoiceAnyParam
"""This is deprecated, `ToolChoiceAnyParam` should be used instead"""
ToolChoiceToolChoiceTool: TypeAlias = ToolChoiceToolParam
"""This is deprecated, `ToolChoiceToolParam` should be used instead"""
| MessageCreateParamsBase |
python | Textualize__textual | docs/examples/styles/link_style_hover.py | {
"start": 64,
"end": 763
} | class ____(App):
CSS_PATH = "link_style_hover.tcss"
def compose(self):
yield Label(
"Visit the [link='https://textualize.io']Textualize[/link] website.",
id="lbl1", # (1)!
)
yield Label(
"Click [@click=app.bell]here[/] for the bell sound.",
id="lbl2", # (2)!
)
yield Label(
"You can also click [@click=app.bell]here[/] for the bell sound.",
id="lbl3", # (3)!
)
yield Label(
"[@click=app.quit]Exit this application.[/]",
id="lbl4", # (4)!
)
if __name__ == "__main__":
app = LinkHoverStyleApp()
app.run()
| LinkHoverStyleApp |
python | walkccc__LeetCode | solutions/20. Valid Parentheses/20.py | {
"start": 0,
"end": 309
} | class ____:
def isValid(self, s: str) -> bool:
stack = []
for c in s:
if c == '(':
stack.append(')')
elif c == '{':
stack.append('}')
elif c == '[':
stack.append(']')
elif not stack or stack.pop() != c:
return False
return not stack
| Solution |
python | pytorch__pytorch | test/distributed/test_c10d_ucc.py | {
"start": 35071,
"end": 37239
} | class ____(test_c10d_common.AbstractCommTest, MultiProcessTestCase):
@property
def device(self):
return "cpu"
def setUp(self):
super().setUp()
self._spawn_processes()
def tearDown(self):
super().tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
@requires_ucc()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_default_pg_ucc(self):
self._test_sequence_num_set_default_pg(backend="ucc")
@requires_ucc()
@skip_if_lt_x_gpu(2)
def test_sequence_num_set_ucc_new_group(self):
self._test_sequence_num_set_new_group(backend="ucc")
@skip_if_lt_x_gpu(2)
@requires_ucc()
def test_sequence_num_incremented_ucc_default(self):
self._test_sequence_num_incremented_default_group("ucc")
@skip_if_lt_x_gpu(4)
@requires_ucc()
def test_sequence_num_incremented_ucc_subgroup(self):
if self.world_size < 4:
return skip_but_pass_in_sandcastle("Test requires world_size of at least 4")
self._test_sequence_num_incremented_subgroup("ucc")
@skip_but_pass_in_sandcastle("Fails on M60")
@requires_ucc()
def test_ucc_barrier_device_ids(self):
store = c10d.FileStore(self.file_name, self.world_size)
c10d.init_process_group(
backend="ucc", rank=self.rank, world_size=self.world_size, store=store
)
with self.assertRaisesRegex(RuntimeError, "device_ids not supported"):
c10d.barrier(device_ids=[self.rank])
@skip_but_pass_in_sandcastle("Fails on M60")
@skip_if_lt_x_gpu(2)
@requires_ucc()
def test_ucc_warn_not_in_group(self):
self._test_warn_not_in_group(backend="ucc")
@skip_if_lt_x_gpu(2)
@requires_ucc()
def test_ucc_rank_membership(self):
self._test_rank_membership(backend="ucc")
@skip_if_lt_x_gpu(2)
@requires_ucc()
def test_tensor_dtype_mismatch(self):
self._test_tensor_dtype_mismatch(backend="ucc")
@skip_if_lt_x_gpu(2)
@requires_ucc()
def test_tensor_dtype_complex(self):
self._test_tensor_dtype_complex(backend="ucc")
| CommTest |
python | Textualize__textual | docs/examples/guide/actions/actions04.py | {
"start": 252,
"end": 683
} | class ____(App):
BINDINGS = [
("r", "set_background('red')", "Red"),
("g", "set_background('green')", "Green"),
("b", "set_background('blue')", "Blue"),
]
def compose(self) -> ComposeResult:
yield Static(TEXT)
def action_set_background(self, color: str) -> None:
self.screen.styles.background = color
if __name__ == "__main__":
app = ActionsApp()
app.run()
| ActionsApp |
python | getsentry__sentry | src/sentry/testutils/cases.py | {
"start": 39201,
"end": 47743
} | class ____(BaseTestCase):
"""
Mixin for enabling test case classes to talk to snuba
Useful when you are working on acceptance tests or integration
tests that require snuba.
"""
# We need Django to flush all databases.
databases: set[str] | str = "__all__"
def setUp(self):
super().setUp()
self.init_snuba()
@pytest.fixture(autouse=True)
def initialize(self, reset_snuba, call_snuba):
self.call_snuba = call_snuba
def create_project(self, **kwargs) -> Project:
if "flags" not in kwargs:
# We insert events directly into snuba in tests, so we need to set has_transactions to True so the
# application knows that events have been sent
kwargs["flags"] = Project.flags.has_transactions
return super().create_project(**kwargs)
def init_snuba(self):
self.snuba_eventstream = SnubaEventStream()
self.snuba_tagstore = SnubaTagStorage()
def store_event(self, *args, **kwargs):
"""
Simulates storing an event for testing.
To set event title:
- use "message": "{title}" field for errors
- use "transaction": "{title}" field for transactions
More info on event payloads: https://develop.sentry.dev/sdk/event-payloads/
"""
with mock.patch("sentry.eventstream.insert", self.snuba_eventstream.insert):
stored_event = Factories.store_event(*args, **kwargs)
# Error groups
stored_group = stored_event.group
if stored_group is not None:
self.store_group(stored_group)
# Performance groups
stored_groups = stored_event.groups
if stored_groups is not None:
for group in stored_groups:
self.store_group(group)
return stored_event
def wait_for_event_count(self, project_id, total, attempts=2):
"""
Wait until the event count reaches the provided value or until attempts is reached.
Useful when you're storing several events and need to ensure that snuba/clickhouse
state has settled.
"""
# Verify that events have settled in snuba's storage.
# While snuba is synchronous, clickhouse isn't entirely synchronous.
attempt = 0
snuba_filter = eventstore.Filter(project_ids=[project_id])
last_events_seen = 0
while attempt < attempts:
events = eventstore.backend.get_events(
snuba_filter, referrer="test.wait_for_event_count"
)
last_events_seen = len(events)
if len(events) >= total:
break
attempt += 1
time.sleep(0.05)
if attempt == attempts:
assert (
False
), f"Could not ensure that {total} event(s) were persisted within {attempt} attempt(s). Event count is instead currently {last_events_seen}."
def build_session(self, **kwargs):
session = {
"session_id": str(uuid4()),
"distinct_id": str(uuid4()),
"status": "ok",
"seq": 0,
"retention_days": 90,
"duration": 60.0,
"errors": 0,
"started": time.time() // 60 * 60,
"received": time.time(),
}
# Support both passing the values for these field directly, and the full objects
translators = [
("release", "version", "release"),
("environment", "name", "environment"),
("project_id", "id", "project"),
("org_id", "id", "organization"),
]
for key, attr, default_attr in translators:
if key not in kwargs:
kwargs[key] = getattr(self, default_attr)
val = kwargs[key]
kwargs[key] = getattr(val, attr, val)
session.update(kwargs)
return session
def store_group(self, group):
data = [self.__wrap_group(group)]
assert (
_snuba_pool.urlopen(
"POST",
"/tests/entities/groupedmessage/insert",
body=json.dumps(data),
headers={},
).status
== 200
)
def store_span(self, span, is_eap=False):
self.store_spans([span], is_eap=is_eap)
def store_spans(self, spans, is_eap=False):
if is_eap:
files = {}
for i, span in enumerate(spans):
trace_item = span_to_trace_item(span)
files[f"item_{i}"] = trace_item.SerializeToString()
assert (
requests.post(
settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT,
files=files,
).status_code
== 200
)
else:
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/entities/spans/insert",
data=json.dumps(spans),
).status_code
== 200
)
def store_ourlogs(self, ourlogs):
files = {f"log_{i}": log.SerializeToString() for i, log in enumerate(ourlogs)}
response = requests.post(
settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT,
files=files,
)
assert response.status_code == 200
def store_trace_metrics(self, trace_metrics):
files = {
f"trace_metric_{i}": trace_metric.SerializeToString()
for i, trace_metric in enumerate(trace_metrics)
}
response = requests.post(
settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT,
files=files,
)
assert response.status_code == 200
def store_profile_functions(self, profile_functions):
files = {
f"profile_functions_{i}": profile_function.SerializeToString()
for i, profile_function in enumerate(profile_functions)
}
response = requests.post(
settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT,
files=files,
)
assert response.status_code == 200
def store_issues(self, issues):
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/entities/search_issues/insert",
data=json.dumps(issues),
).status_code
== 200
)
def to_snuba_time_format(self, datetime_value):
date_format = "%Y-%m-%d %H:%M:%S%z"
return datetime_value.strftime(date_format)
def __wrap_group(self, group):
return {
"event": "change",
"kind": "insert",
"table": "sentry_groupedmessage",
"columnnames": [
"id",
"logger",
"level",
"message",
"status",
"times_seen",
"last_seen",
"first_seen",
"data",
"project_id",
"time_spent_total",
"time_spent_count",
"resolved_at",
"active_at",
"is_public",
"platform",
"num_comments",
"first_release_id",
"short_id",
],
"columnvalues": [
group.id,
group.logger,
group.level,
group.message,
group.status,
group.times_seen,
self.to_snuba_time_format(group.last_seen),
self.to_snuba_time_format(group.first_seen),
group.data,
group.project.id,
group.time_spent_total,
group.time_spent_count,
group.resolved_at,
self.to_snuba_time_format(group.active_at),
group.is_public,
group.platform,
group.num_comments,
group.first_release.id if group.first_release else None,
group.short_id,
],
}
def snuba_insert(self, events):
"Write a (wrapped) event (or events) to Snuba."
if not isinstance(events, list):
events = [events]
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/entities/events/insert",
data=json.dumps(events),
).status_code
== 200
)
| SnubaTestCase |
python | huggingface__transformers | src/transformers/models/donut/image_processing_donut_fast.py | {
"start": 1273,
"end": 9522
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 2560, "width": 1920}
do_resize = True
do_rescale = True
do_normalize = True
do_thumbnail = True
do_align_long_axis = False
do_pad = True
valid_kwargs = DonutImageProcessorKwargs
def __init__(self, **kwargs: Unpack[DonutImageProcessorKwargs]):
size = kwargs.pop("size", None)
if isinstance(size, (tuple, list)):
size = size[::-1]
kwargs["size"] = size
super().__init__(**kwargs)
@auto_docstring
def preprocess(self, images: ImageInput, **kwargs: Unpack[DonutImageProcessorKwargs]) -> BatchFeature:
if "size" in kwargs:
size = kwargs.pop("size")
if isinstance(size, (tuple, list)):
size = size[::-1]
kwargs["size"] = size
return super().preprocess(images, **kwargs)
def align_long_axis(
self,
image: "torch.Tensor",
size: SizeDict,
) -> "torch.Tensor":
"""
Align the long axis of the image to the longest axis of the specified size.
Args:
image (`torch.Tensor`):
The image to be aligned.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to align the long axis to.
Returns:
`torch.Tensor`: The aligned image.
"""
input_height, input_width = image.shape[-2:]
output_height, output_width = size.height, size.width
if (output_width < output_height and input_width > input_height) or (
output_width > output_height and input_width < input_height
):
height_dim, width_dim = image.dim() - 2, image.dim() - 1
image = torch.rot90(image, 3, dims=[height_dim, width_dim])
return image
def pad_image(
self,
image: "torch.Tensor",
size: SizeDict,
random_padding: bool = False,
) -> "torch.Tensor":
"""
Pad the image to the specified size.
Args:
image (`torch.Tensor`):
The image to be padded.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to pad the image to.
random_padding (`bool`, *optional*, defaults to `False`):
Whether to use random padding or not.
data_format (`str` or `ChannelDimension`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
output_height, output_width = size.height, size.width
input_height, input_width = image.shape[-2:]
delta_width = output_width - input_width
delta_height = output_height - input_height
if random_padding:
pad_top = torch.random.randint(low=0, high=delta_height + 1)
pad_left = torch.random.randint(low=0, high=delta_width + 1)
else:
pad_top = delta_height // 2
pad_left = delta_width // 2
pad_bottom = delta_height - pad_top
pad_right = delta_width - pad_left
padding = (pad_left, pad_top, pad_right, pad_bottom)
return F.pad(image, padding)
def pad(self, *args, **kwargs):
logger.info("pad is deprecated and will be removed in version 4.27. Please use pad_image instead.")
return self.pad_image(*args, **kwargs)
def thumbnail(
self,
image: "torch.Tensor",
size: SizeDict,
) -> "torch.Tensor":
"""
Resize the image to make a thumbnail. The image is resized so that no dimension is larger than any
corresponding dimension of the specified size.
Args:
image (`torch.Tensor`):
The image to be resized.
size (`dict[str, int]`):
The size `{"height": h, "width": w}` to resize the image to.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
The resampling filter to use.
data_format (`Optional[Union[str, ChannelDimension]]`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
input_height, input_width = image.shape[-2:]
output_height, output_width = size.height, size.width
# We always resize to the smallest of either the input or output size.
height = min(input_height, output_height)
width = min(input_width, output_width)
if height == input_height and width == input_width:
return image
if input_height > input_width:
width = int(input_width * height / input_height)
elif input_width > input_height:
height = int(input_height * width / input_width)
return self.resize(
image,
size=SizeDict(width=width, height=height),
interpolation=F.InterpolationMode.BICUBIC,
)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
do_thumbnail: bool,
do_align_long_axis: bool,
do_pad: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_align_long_axis:
stacked_images = self.align_long_axis(image=stacked_images, size=size)
if do_resize:
shortest_edge = min(size.height, size.width)
stacked_images = self.resize(
image=stacked_images, size=SizeDict(shortest_edge=shortest_edge), interpolation=interpolation
)
if do_thumbnail:
stacked_images = self.thumbnail(image=stacked_images, size=size)
if do_pad:
stacked_images = self.pad_image(image=stacked_images, size=size, random_padding=False)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
__all__ = ["DonutImageProcessorFast"]
| DonutImageProcessorFast |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-commercetools/source_commercetools/components.py | {
"start": 346,
"end": 1576
} | class ____(DeclarativeOauth2Authenticator):
@backoff.on_exception(
backoff.expo,
DefaultBackoffException,
on_backoff=lambda details: logger.info(
f"Caught retryable error after {details['tries']} tries. Waiting {details['wait']} seconds then retrying..."
),
max_time=300,
)
def _get_refresh_access_token_response(self):
region = self.config["region"]
project_key = self.config["project_key"]
host = self.config["host"]
url = f"https://auth.{region}.{host}.commercetools.com/oauth/token?grant_type=client_credentials&scope=manage_project:{project_key}"
try:
response = requests.post(url, auth=(self.config["client_id"], self.config["client_secret"]))
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
if e.response.status_code == 429 or e.response.status_code >= 500:
raise DefaultBackoffException(request=e.response.request, response=e.response)
raise
except Exception as e:
raise Exception(f"Error while refreshing access token: {e}") from e
| CommerceToolsOauth2Authenticator |
python | django__django | tests/staticfiles_tests/cases.py | {
"start": 1578,
"end": 1690
} | class ____(BaseStaticFilesMixin, SimpleTestCase):
pass
@override_settings(**TEST_SETTINGS)
| StaticFilesTestCase |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/base_test.py | {
"start": 4742,
"end": 6267
} | class ____(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing two segments."""
n = inp
for i in range(2):
c = constant_op.constant(float(i), name="c%d" % i)
n = math_ops.add(n, c, name="add%d" % i)
n = math_ops.mul(n, n, name="mul%d" % i)
n = self.trt_incompatible_op(n, name="incompatible")
c = constant_op.constant(2.0, name="c2")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul2")
c = constant_op.constant(3.0, name="c3")
n = math_ops.add(n, c, name="add3")
n = math_ops.mul(n, n, name="mul3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_000": ["c0", "c1", "add0", "add1", "mul0", "mul1"],
"TRTEngineOp_001": ["c2", "c3", "add2", "add3", "mul2", "mul3"]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# Disable the test in fp16 mode since multiple matmul and add ops together
# can cause overflow.
return (
(run_params.precision_mode != "FP16") and
not (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.use_calibration)), "test FP32 and non-calibration"
| SimpleMultiEnginesTest2 |
python | pyca__cryptography | src/cryptography/hazmat/primitives/ciphers/algorithms.py | {
"start": 1031,
"end": 1386
} | class ____(BlockCipherAlgorithm):
name = "AES"
block_size = 128
# 512 added to support AES-256-XTS, which uses 512-bit keys
key_sizes = frozenset([128, 192, 256, 512])
def __init__(self, key: utils.Buffer):
self.key = _verify_key_size(self, key)
@property
def key_size(self) -> int:
return len(self.key) * 8
| AES |
python | nedbat__coveragepy | tests/test_arcs.py | {
"start": 47133,
"end": 50341
} | class ____(CoverageTest):
"""Tests of if statements being optimized away."""
def test_optimized_away_if_0(self) -> None:
self.check_coverage(
"""\
a = 1
if len([2]):
c = 3
if 0:
if len([5]):
d = 6
else:
e = 8
f = 9
""",
lines=[1, 2, 3, 4, 8, 9],
branchz="23 24",
branchz_missing="24",
)
def test_optimized_away_if_1(self) -> None:
self.check_coverage(
"""\
a = 1
if len([2]):
c = 3
if 1:
if len([5]):
d = 6
else:
e = 8
f = 9
""",
lines=[1, 2, 3, 4, 5, 6, 9],
branchz="23 24 56 59",
branchz_missing="24 59",
)
def test_optimized_away_if_1_no_else(self) -> None:
self.check_coverage(
"""\
a = 1
if 1:
b = 3
c = 4
d = 5
""",
lines=[1, 2, 3, 4, 5],
branchz="",
branchz_missing="",
)
def test_optimized_if_nested(self) -> None:
self.check_coverage(
"""\
a = 1
if 0:
if 0:
b = 4
else:
c = 6
else:
if 0:
d = 9
else:
if 0: e = 11
f = 12
if 0: g = 13
h = 14
i = 15
""",
lines=[1, 2, 8, 11, 12, 13, 14, 15],
branchz="",
branchz_missing="",
)
def test_dunder_debug(self) -> None:
# Since some of our tests use __debug__, let's make sure it is true as
# we expect
assert __debug__
# Check that executed code has __debug__
self.check_coverage(
"""\
assert __debug__, "assert __debug__"
""",
)
# Check that if it didn't have debug, it would let us know.
with pytest.raises(AssertionError):
self.check_coverage(
"""\
assert not __debug__, "assert not __debug__"
""",
)
def test_if_debug(self) -> None:
self.check_coverage(
"""\
for value in [True, False]:
if value:
if __debug__:
x = 4
else:
x = 6
""",
branchz="12 1. 23 26",
branchz_missing="",
)
def test_if_not_debug(self) -> None:
self.check_coverage(
"""\
lines = set()
for value in [True, False]:
if value:
if not __debug__:
lines.add(5)
else:
lines.add(7)
assert lines == {7}
""",
branchz="23 28 34 37",
)
| OptimizedIfTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.