language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/pep8.py | {
"start": 619,
"end": 3126
} | class ____(SanitySingleVersion):
"""Sanity test for PEP 8 style guidelines using pycodestyle."""
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'A100'
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt')
current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True))
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
cmd = [
python.path,
'-m', 'pycodestyle',
'--max-line-length', '160',
'--config', '/dev/null',
'--ignore', ','.join(sorted(current_ignore)),
] + paths # fmt: skip
if paths:
try:
stdout, stderr = run_command(args, cmd, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr:
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
else:
stdout = None
if args.explain:
return SanitySuccess(self.name)
if stdout:
pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
results = parse_to_list_of_dict(pattern, stdout)
else:
results = []
messages = [SanityMessage(
message=r['message'],
path=r['path'],
line=int(r['line']),
column=int(r['column']),
level='warning' if r['code'].startswith('W') else 'error',
code=r['code'],
) for r in results]
errors = settings.process_errors(messages, paths)
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
| Pep8Test |
python | pola-rs__polars | py-polars/src/polars/exceptions.py | {
"start": 5500,
"end": 5649
} | class ____(PolarsWarning):
"""Warning issued when a custom ufunc is handled differently than numpy ufunc would.""" # noqa: W505
| CustomUFuncWarning |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 12603,
"end": 12964
} | class ____(models.Model):
cafe = models.IntegerField(verbose_name="café")
parent_cafe = models.ForeignKey(
"self",
related_name="children",
on_delete=models.CASCADE,
verbose_name="café latte",
)
class Meta:
app_label = "django_extensions"
verbose_name = "café unicode model"
| UnicodeVerboseNameModel |
python | google__pytype | pytype/tests/test_pyi1.py | {
"start": 109,
"end": 29637
} | class ____(test_base.BaseTest):
"""Tests for PYI."""
def test_module_parameter(self):
"""This test that types.ModuleType works."""
with test_utils.Tempdir() as d:
d.create_file(
"mod.pyi",
"""
import types
def f(x: types.ModuleType = ...) -> None: ...
""",
)
self.Check(
"""
import os
import mod
mod.f(os)
""",
pythonpath=[d.path],
)
def test_optional(self):
with test_utils.Tempdir() as d:
d.create_file(
"mod.pyi",
"""
def f(x: int = ...) -> None: ...
""",
)
ty = self.Infer(
"""
import mod
def f():
return mod.f()
def g():
return mod.f(3)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import mod
def f() -> NoneType: ...
def g() -> NoneType: ...
""",
)
def test_solve(self):
with test_utils.Tempdir() as d:
d.create_file(
"mod.pyi",
"""
def f(node: int, *args, **kwargs) -> str: ...
""",
)
ty = self.Infer(
"""
import mod
def g(x):
return mod.f(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import mod
def g(x) -> str: ...
""",
)
def test_typing(self):
with test_utils.Tempdir() as d:
d.create_file(
"mod.pyi",
"""
from typing import Any, IO, List, Optional
def split(s: Optional[int]) -> List[str]: ...
""",
)
ty = self.Infer(
"""
import mod
def g(x):
return mod.split(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import mod
from typing import List
def g(x) -> List[str]: ...
""",
)
def test_classes(self):
with test_utils.Tempdir() as d:
d.create_file(
"classes.pyi",
"""
class A:
def foo(self) -> A: ...
class B(A):
pass
""",
)
ty = self.Infer(
"""
import classes
x = classes.B().foo()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import classes
x = ... # type: classes.A
""",
)
def test_empty_module(self):
with test_utils.Tempdir() as d:
d.create_file(
"vague.pyi",
"""
from typing import Any
def __getattr__(name) -> Any: ...
""",
)
ty = self.Infer(
"""
import vague
x = vague.foo + vague.bar
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import vague
from typing import Any
x = ... # type: Any
""",
)
def test_decorators(self):
with test_utils.Tempdir() as d:
d.create_file(
"decorated.pyi",
"""
class A:
@staticmethod
def u(a, b) -> int: ...
@classmethod
def v(cls, a, b) -> int: ...
def w(self, a, b) -> int: ...
""",
)
ty = self.Infer(
"""
import decorated
u = decorated.A.u(1, 2)
v = decorated.A.v(1, 2)
a = decorated.A()
x = a.u(1, 2)
y = a.v(1, 2)
z = a.w(1, 2)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import decorated
a = ... # type: decorated.A
u = ... # type: int
v = ... # type: int
x = ... # type: int
y = ... # type: int
z = ... # type: int
""",
)
def test_pass_pyi_classmethod(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
class A:
@classmethod
def v(cls) -> float: ...
def w(self, x: classmethod) -> int: ...
""",
)
ty = self.Infer(
"""
import a
u = a.A().w(a.A.v)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
u = ... # type: int
""",
)
def test_optional_parameters(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
def parse(source, filename = ..., mode = ..., *args, **kwargs) -> int: ...
""",
)
ty = self.Infer(
"""
import a
u = a.parse("True")
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
u = ... # type: int
""",
)
def test_optimize(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Any
class Bar(dict[Any, int]): ...
""",
)
ty = self.Infer(
"""
import a
def f(foo, bar):
return __any_object__[1]
def g():
out = f('foo', 'bar')
out = out.split()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
from typing import Any
def f(foo, bar) -> Any: ...
def g() -> NoneType: ...
""",
)
def test_iterable(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Iterable
def f(l: Iterable[int]) -> int: ...
""",
)
ty = self.Infer(
"""
import a
u = a.f([1, 2, 3])
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
u = ... # type: int
""",
)
def test_object(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
def make_object() -> object: ...
""",
)
ty = self.Infer(
"""
import a
def f(x=None):
x = a.make_object()
z = x - __any_object__ # type: ignore
z + __any_object__
return True
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
def f(x=...) -> bool: ...
""",
)
def test_callable(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
from typing import Callable
def process_function(func: Callable[..., Any]) -> None: ...
""",
)
ty = self.Infer(
"""
import foo
def bar():
pass
x = foo.process_function(bar)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def bar() -> None: ...
x: None
""",
)
def test_hex(self):
ty = self.Infer("""
x = hex(4)
""")
self.assertTypesMatchPytd(
ty,
"""
x = ... # type: str
""",
)
def test_base_class(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
S = TypeVar('S')
T = TypeVar('T')
class A(Generic[S]):
def bar(self, s: S) -> S: ...
class B(Generic[T], A[T]): ...
class C(A[int]): ...
class D:
def baz(self) -> int: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return x.bar("foo")
def g(x):
return x.bar(3)
def h(x):
return x.baz()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def f(x) -> Any: ...
def g(x) -> Any: ...
def h(x) -> Any: ...
""",
)
def test_old_style_class_object_match(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
def f(x) -> Any: ...
class Foo: pass
""",
)
ty = self.Infer(
"""
import foo
def g():
return foo.f(foo.Foo())
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any
def g() -> Any: ...
""",
)
def test_identity(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import TypeVar
T = TypeVar("T")
def f(x: T) -> T: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.f(3)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x = ... # type: int
""",
)
def test_import_function_template(self):
with test_utils.Tempdir() as d1:
d1.create_file(
"foo.pyi",
"""
from typing import TypeVar
T = TypeVar("T")
def f(x: T) -> T: ...
""",
)
with test_utils.Tempdir() as d2:
d2.create_file(
"bar.pyi",
"""
import foo
f = foo.f
""",
)
ty = self.Infer(
"""
import bar
x = bar.f("")
""",
pythonpath=[d1.path, d2.path],
)
self.assertTypesMatchPytd(
ty,
"""
import bar
x = ... # type: str
""",
)
def test_multiple_getattr(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
def __getattr__(name) -> Any: ...
""",
)
ty, errors = self.InferWithErrors(
"""
from foo import *
from bar import * # Nonsense import generates a top-level __getattr__ # import-error[e]
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
def __getattr__(name) -> Any: ...
""",
)
self.assertErrorRegexes(errors, {"e": r"bar"})
def test_pyi_list_item(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
lst = ... # type: list
def f(x: int) -> str: ...
""",
)
ty = self.Infer(
"""
import a
x = a.f(a.lst[0])
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: str
""",
)
def test_keyword_only_args(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Any
def foo(x: str, *y: Any, z: complex = ...) -> int: ...
""",
)
ty = self.Infer(
"""
import a
x = a.foo("foo %d %d", 3, 3)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: int
""",
)
def test_posarg(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar
T = TypeVar("T")
def get_pos(x: T, *args: int, z: int, **kws: int) -> T: ...
""",
)
ty = self.Infer(
"""
import a
v = a.get_pos("foo", 3, 4, z=5)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
v = ... # type: str
""",
)
def test_kwonly_arg(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import TypeVar
T = TypeVar("T")
def get_kwonly(x: int, *args: int, z: T, **kwargs: int) -> T: ...
""",
)
ty = self.Infer(
"""
import a
v = a.get_kwonly(3, 4, z=5j)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
v = ... # type: complex
""",
)
def test_starargs(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Dict, TypeVar
K = TypeVar("K")
V = TypeVar("V")
def foo(a: K, *b, c: V, **d) -> Dict[K, V]: ...
""",
)
ty, errors = self.InferWithErrors(
"""
import foo
a = foo.foo(*tuple(), **dict()) # missing-parameter[e1]
b = foo.foo(*(1,), **{"c": 3j})
c = foo.foo(*(1,)) # missing-parameter[e2]
d = foo.foo(*(), **{"d": 3j}) # missing-parameter[e3]
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Any, Dict
a = ... # type: Any
b = ... # type: Dict[int, complex]
c = ... # type: Any
d = ... # type: Any
""",
)
self.assertErrorRegexes(
errors, {"e1": r"\ba\b", "e2": r"\bc\b", "e3": r"\ba\b"}
)
def test_union_with_superclass(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
class A1(): pass
class A2(A1): pass
class A3(A2): pass
""",
)
ty = self.Infer(
"""
import a
def f(x):
# Constrain the type of x so it doesn't pull everything into our pytd
x = x + 16
if x:
return a.A1()
else:
return a.A3()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
def f(x) -> a.A1: ...
""",
)
def test_builtins_module(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
import builtins
x = ... # type: builtins.int
""",
)
ty = self.Infer(
"""
import a
x = a.x
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: int
""",
)
def test_frozenset(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Any, FrozenSet, Set
x = ... # type: FrozenSet[str]
y = ... # type: Set[Any]
""",
)
ty = self.Infer(
"""
import a
x = a.x - a.x
y = a.x - a.y
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import FrozenSet
import a
x = ... # type: FrozenSet[str]
y = ... # type: FrozenSet[str]
""",
)
def test_raises(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(raises): ...
""",
)
self.Check("import foo", pythonpath=[d.path])
def test_typevar_conflict(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, Sequence
class A(List[int], Sequence[str]): ...
""",
)
ty, _ = self.InferWithErrors(
"""
import foo # pyi-error
x = [] + foo.A()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
foo = ... # type: Any
x = ... # type: list
""",
)
def test_same_typevar_name(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
T = TypeVar("T")
class MySupportsAbs(Generic[T]): ...
class MyContextManager(Generic[T]):
def __enter__(self) -> T: ...
class Foo(MySupportsAbs[float], MyContextManager[Foo]): ...
""",
)
ty = self.Infer(
"""
import foo
v = foo.Foo().__enter__()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
v = ... # type: foo.Foo
""",
)
def test_type_param_in_mutation(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
T = TypeVar("T")
T2 = TypeVar("T2")
class Bar(Generic[T]):
def bar(self, x:T2):
self = Bar[T2]
""",
)
ty = self.Infer(
"""
import foo
x = foo.Bar()
x.bar(10)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x = ... # type: foo.Bar[int]
""",
)
def test_bad_type_param_in_mutation(self):
with test_utils.Tempdir() as d:
# T2 is not in scope when used in the mutation in Bar.bar()
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
T = TypeVar("T")
T2 = TypeVar("T2")
class Bar(Generic[T]):
def quux(self, x: T2): ...
def bar(self):
self = Bar[T2]
""",
)
# We should get an error at import time rather than at use time here.
_, errors = self.InferWithErrors(
"""
import foo # pyi-error[e]
x = foo.Bar()
x.bar()
""",
pythonpath=[d.path],
)
self.assertErrorRegexes(errors, {"e": r"T2"})
def test_star_import(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
x = ... # type: int
T = TypeVar("T")
class A: ...
def f(x: T) -> T: ...
B = A
""",
)
d.create_file(
"bar.pyi",
"""
from foo import *
""",
)
self.Check(
"""
import bar
bar.x
bar.T
bar.A
bar.f
bar.B
""",
pythonpath=[d.path],
)
def test_star_import_value(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
T = TypeVar("T")
def f(x: T) -> T: ...
class Foo: pass
""",
)
d.create_file(
"bar.pyi",
"""
from foo import *
""",
)
ty = self.Infer(
"""
import bar
v1 = bar.Foo()
v2 = bar.f("")
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import bar
v1 = ... # type: foo.Foo
v2 = ... # type: str
""",
)
def test_star_import_getattr(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
def __getattr__(name) -> Any: ...
""",
)
d.create_file(
"bar.pyi",
"""
from foo import *
""",
)
self.Check(
"""
import bar
bar.rumpelstiltskin
""",
pythonpath=[d.path],
)
def test_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: Foo): ...
g = f
class Foo: ...
""",
)
self.Check("import foo", pythonpath=[d.path])
def test_custom_binary_operator(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo:
def __sub__(self, other) -> str: ...
class Bar(Foo):
def __rsub__(self, other) -> int: ...
""",
)
self.Check(
"""
import foo
(foo.Foo() - foo.Bar()).real
""",
pythonpath=[d.path],
)
def test_parameterized_any(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
x = ... # type: Any
y = ... # type: x[Any]
""",
)
self.Check(
"""
import foo
""",
pythonpath=[d.path],
)
def test_parameterized_external_any(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
x = ... # type: Any
""",
)
d.create_file(
"bar.pyi",
"""
import foo
from typing import Any
x = ... # type: foo.x[Any]
""",
)
self.Check(
"""
import bar
""",
pythonpath=[d.path],
)
def test_parameterized_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
x = ... # type: Any
""",
)
d.create_file(
"bar.pyi",
"""
import foo
from typing import Any
x = foo.x[Any]
""",
)
self.Check(
"""
import bar
""",
pythonpath=[d.path],
)
def test_anything_constant(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
Foo = ... # type: Any
""",
)
d.create_file(
"bar.pyi",
"""
import foo
def f(x: foo.Foo) -> None: ...
""",
)
self.Check(
"""
import bar
bar.f(42)
""",
pythonpath=[d.path],
)
def test_alias_staticmethod(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class A:
@staticmethod
def t(a: str) -> None: ...
""",
)
ty = self.Infer(
"""
import foo
ta = foo.A.t
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Callable
ta = ... # type: Callable[[str], None]
""",
)
def test_alias_constant(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo:
const = ... # type: int
Const = Foo.const
""",
)
ty = self.Infer(
"""
import foo
Const = foo.Const
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
Const = ... # type: int
""",
)
def test_alias_method(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo:
def f(self) -> int: ...
Func = Foo.f
""",
)
ty = self.Infer(
"""
import foo
Func = foo.Func
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
def Func(self) -> int: ...
""",
)
def test_alias_aliases(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Foo:
a1 = const
a2 = f
const = ... # type: int
def f(self) -> int: ...
Const = Foo.a1
Func = Foo.a2
""",
)
ty = self.Infer(
"""
import foo
Const = foo.Const
Func = foo.Func
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
Const = ... # type: int
def Func(self) -> int: ...
""",
)
def test_generic_inheritance(self):
with test_utils.Tempdir() as d:
# Inspired by typeshed/stdlib/2/UserString.pyi
d.create_file(
"foo.pyi",
"""
from typing import Sequence, MutableSequence, TypeVar
TFoo = TypeVar("TFoo", bound=Foo)
class Foo(Sequence[Foo]):
def __getitem__(self: TFoo, i: int) -> TFoo: ...
class Bar(Foo, MutableSequence[Bar]): ...
""",
)
ty = self.Infer(
"""
import foo
v = foo.Bar()[0]
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
v = ... # type: foo.Bar
""",
)
def test_dot_import(self):
with test_utils.Tempdir() as d:
d.create_file("foo/a.pyi", "class A: ...")
d.create_file(
"foo/b.pyi",
"""
from . import a
X = a.A
""",
)
ty = self.Infer(
"""
from foo import b
a = b.X()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo import b
a = ... # type: foo.a.A
""",
)
def test_dot_dot_import(self):
with test_utils.Tempdir() as d:
d.create_file("foo/a.pyi", "class A: ...")
d.create_file(
"foo/bar/b.pyi",
"""
from .. import a
X = a.A
""",
)
ty = self.Infer(
"""
from foo.bar import b
a = b.X()
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from foo.bar import b
a = ... # type: foo.a.A
""",
)
def test_typing_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
import typing as _typing
def f(x: _typing.Tuple[str, str]) -> None: ...
""",
)
self.Check("import foo", pythonpath=[d.path])
def test_parameterize_builtin_tuple(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: tuple[int]) -> tuple[int, int]: ...
""",
)
ty, _ = self.InferWithErrors(
"""
import foo
foo.f((0, 0)) # wrong-arg-types
x = foo.f((0,))
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Tuple
x: Tuple[int, int]
""",
)
def test_implicit_mutation(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Generic, TypeVar
T = TypeVar('T')
class Foo(Generic[T]):
def __init__(self, x: T) -> None: ...
""",
)
ty = self.Infer(
"""
import foo
x = foo.Foo(x=0)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
x: foo.Foo[int]
""",
)
def test_import_typevar_for_property(self):
with test_utils.Tempdir() as d:
d.create_file(
"_typeshed.pyi",
"""
from typing import TypeVar
Self = TypeVar('Self')
""",
)
d.create_file(
"foo.pyi",
"""
from _typeshed import Self
class Foo:
@property
def foo(self: Self) -> Self: ...
""",
)
self.Check(
"""
import foo
assert_type(foo.Foo().foo, foo.Foo)
""",
pythonpath=[d.path],
)
def test_bad_annotation(self):
with test_utils.Tempdir() as d:
d.create_file(
"bad.pyi",
"""
def f() -> None: ...
class X:
x: f
""",
)
self.CheckWithErrors(
"""
import bad # pyi-error
""",
pythonpath=[d.path],
)
def test_bad_external_type(self):
with self.DepTree([
("dep_func.pyi", "def NotAClass(): ...\n"),
(
"dep.pyi",
"""
import dep_func
def Bad() -> dep_func.NotAClass: ...
""",
),
]):
self.CheckWithErrors("""
import dep # pyi-error
""")
def test_nonexistent_import(self):
with test_utils.Tempdir() as d:
d.create_file(
"bad.pyi",
"""
import nonexistent
x = nonexistent.x
""",
)
err = self.CheckWithErrors(
"""
import bad # pyi-error[e]
""",
pythonpath=[d.path],
)
self.assertErrorSequences(
err,
{
"e": [
"Couldn't import pyi",
"nonexistent",
"referenced from",
"bad",
]
},
)
if __name__ == "__main__":
test_base.main()
| PYITest |
python | keras-team__keras | keras/src/layers/pooling/average_pooling3d.py | {
"start": 185,
"end": 3238
} | class ____(BasePooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Downsamples the input along its spatial dimensions (depth, height, and
width) by taking the average value over an input window (of size defined by
`pool_size`) for each channel of the input. The window is shifted by
`strides` along each dimension.
Args:
pool_size: int or tuple of 3 integers, factors by which to downscale
(dim1, dim2, dim3). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 3 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)` while
`"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, pooled_dim1, pooled_dim2, pooled_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, pooled_dim1, pooled_dim2, pooled_dim3)`
Example:
```python
depth = 30
height = 30
width = 30
channels = 3
inputs = keras.layers.Input(shape=(depth, height, width, channels))
layer = keras.layers.AveragePooling3D(pool_size=3)
outputs = layer(inputs) # Shape: (batch_size, 10, 10, 10, 3)
```
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs,
):
super().__init__(
pool_size,
strides,
pool_dimensions=3,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
| AveragePooling3D |
python | numba__numba | numba/tests/test_cli.py | {
"start": 1165,
"end": 3934
} | class ____(TestCase):
def test_as_module_exit_code(self):
cmdline = [sys.executable, "-m", "numba"]
with self.assertRaises(AssertionError) as raises:
run_cmd(cmdline)
self.assertIn("process failed with code 1", str(raises.exception))
def test_sysinfo_from_module(self):
cmdline = [sys.executable, "-m", "numba", "-s"]
o, _ = run_cmd(cmdline)
self.assertIn("System info", o)
def test_json_sysinfo_from_module(self):
with TemporaryDirectory() as d:
path = os.path.join(d, "test_json_sysinfo.json")
cmdline = [sys.executable, "-m", "numba", "--sys-json", path]
run_cmd(cmdline)
with self.subTest(msg=f"{path} exists"):
self.assertTrue(os.path.exists(path))
with self.subTest(msg="json load"):
with open(path, 'r') as f:
info = json.load(f)
safe_contents = {
int: (
nsi._cpu_count,
),
float: (
nsi._runtime,
),
str: (
nsi._start,
nsi._start_utc,
nsi._machine,
nsi._cpu_name,
nsi._platform_name,
nsi._os_name,
nsi._os_version,
nsi._python_comp,
nsi._python_impl,
nsi._python_version,
nsi._llvm_version,
),
bool: (
nsi._cu_dev_init,
nsi._svml_state,
nsi._svml_loaded,
nsi._svml_operational,
nsi._llvm_svml_patched,
nsi._tbb_thread,
nsi._openmp_thread,
nsi._wkq_thread,
),
list: (
nsi._errors,
nsi._warnings,
),
dict: (
nsi._numba_env_vars,
),
}
for t, keys in safe_contents.items():
for k in keys:
with self.subTest(k=k):
self.assertIsInstance(info[k], t)
@needs_gdb
def test_gdb_status_from_module(self):
# Check that the `python -m numba -g` works ok
cmdline = [sys.executable, "-m", "numba", "-g"]
o, _ = run_cmd(cmdline)
self.assertIn("GDB info", o)
# It's not known a priori whether the extension is supported, this just
# checks that the last logical item in the output is printed.
self.assertIn("Numba printing extension support", o)
| TestCLI |
python | neetcode-gh__leetcode | python/0704-binary-search.py | {
"start": 0,
"end": 383
} | class ____:
def search(self, nums: List[int], target: int) -> int:
l, r = 0, len(nums) - 1
while l <= r:
m = l + ((r - l) // 2) # (l + r) // 2 can lead to overflow
if nums[m] > target:
r = m - 1
elif nums[m] < target:
l = m + 1
else:
return m
return -1
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass2.py | {
"start": 157,
"end": 375
} | class ____(Enum):
apple = 1
orange = 2
pear = 3
def requires_fruit_mapping(a: Mapping[str, Fruit]):
pass
requires_fruit_mapping(Fruit.__members__)
aaa = len(Fruit)
for i in Fruit:
print(i)
| Fruit |
python | bokeh__bokeh | tests/unit/bokeh/test_objects.py | {
"start": 16253,
"end": 21558
} | class ____(TestContainerMutation):
def test_whether_included_in_props_with_values(self) -> None:
obj = HasStringDictProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo # noqa: F841
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the dict should count as replacing the default
obj.foo['bar'] = 42
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self) -> None:
obj = HasStringDictProp()
old_dict = obj.foo
assert isinstance(old_dict, PropertyValueDict)
assert 1 == len(old_dict._owners)
obj.foo = dict(a=1)
new_dict = obj.foo
assert isinstance(new_dict, PropertyValueDict)
assert old_dict is not new_dict
assert 0 == len(old_dict._owners)
assert 1 == len(new_dict._owners)
def test_dict_delitem_string(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x['b']
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_delitem_int(self) -> None:
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 2 : "b", 3 : "c" })
def test_dict_setitem_string(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x['b'] = 42
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=42, c=3))
def test_dict_setitem_int(self) -> None:
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x[2] = "bar"
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 1 : "a", 2 : "bar", 3 : "c" })
def test_dict_clear(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.clear()
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict())
def test_dict_pop(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.pop('b')
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_pop_default_works(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
assert 42 == obj.foo.pop('z', 42)
def test_dict_popitem_works(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
i = obj.foo.popitem()
assert i == ('a', 1) or i == ('b', 2) or i == ('c', 3)
# we don't _check_mutation since the end value is nondeterministic
def test_dict_setdefault(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
b = x.setdefault('b', 43)
assert 2 == b
z = x.setdefault('z', 44)
assert 44 == z
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=2, c=3, z=44))
def test_dict_update(self) -> None:
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.update(dict(b=7, c=8))
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=7, c=8))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| TestDictMutation |
python | huggingface__transformers | utils/notification_service.py | {
"start": 4093,
"end": 69242
} | class ____:
def __init__(
self,
title: str,
ci_title: str,
model_results: dict,
additional_results: dict,
selected_warnings: list | None = None,
prev_ci_artifacts=None,
other_ci_artifacts=None,
):
self.title = title
self.ci_title = ci_title
# Failures and success of the modeling tests
self.n_model_success = sum(r["success"] for r in model_results.values())
self.n_model_single_gpu_failures = sum(dicts_to_sum(r["failed"])["single"] for r in model_results.values())
self.n_model_multi_gpu_failures = sum(dicts_to_sum(r["failed"])["multi"] for r in model_results.values())
# Some suites do not have a distinction between single and multi GPU.
self.n_model_unknown_failures = sum(dicts_to_sum(r["failed"])["unclassified"] for r in model_results.values())
self.n_model_failures = (
self.n_model_single_gpu_failures + self.n_model_multi_gpu_failures + self.n_model_unknown_failures
)
self.n_model_jobs_errored_out = sum(r["error"] for r in model_results.values())
# Failures and success of the additional tests
self.n_additional_success = sum(r["success"] for r in additional_results.values())
self.n_additional_jobs_errored_out = sum(r["error"] for r in additional_results.values())
if len(additional_results) > 0:
# `dicts_to_sum` uses `dicts_to_sum` which requires a non empty dictionary. Let's just add an empty entry.
all_additional_failures = dicts_to_sum([r["failed"] for r in additional_results.values()])
self.n_additional_single_gpu_failures = all_additional_failures["single"]
self.n_additional_multi_gpu_failures = all_additional_failures["multi"]
self.n_additional_unknown_gpu_failures = all_additional_failures["unclassified"]
else:
self.n_additional_single_gpu_failures = 0
self.n_additional_multi_gpu_failures = 0
self.n_additional_unknown_gpu_failures = 0
self.n_additional_failures = (
self.n_additional_single_gpu_failures
+ self.n_additional_multi_gpu_failures
+ self.n_additional_unknown_gpu_failures
)
# Results
self.n_failures = self.n_model_failures + self.n_additional_failures
self.n_success = self.n_model_success + self.n_additional_success
self.n_tests = self.n_failures + self.n_success
self.n_jobs_errored_out = self.n_model_jobs_errored_out + self.n_additional_jobs_errored_out
self.model_results = model_results
self.additional_results = additional_results
self.thread_ts = None
if selected_warnings is None:
selected_warnings = []
self.selected_warnings = selected_warnings
self.prev_ci_artifacts = prev_ci_artifacts
self.other_ci_artifacts = other_ci_artifacts
@property
def time(self) -> str:
all_results = [*self.model_results.values(), *self.additional_results.values()]
time_spent = []
for r in all_results:
if len(r["time_spent"]):
time_spent.extend(r["time_spent"])
total_secs = sum(time_spent)
hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"
@property
def header(self) -> dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def ci_title_section(self) -> dict:
return {"type": "section", "text": {"type": "mrkdwn", "text": self.ci_title}}
@property
def no_failures(self) -> dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"[SUCCESS] There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def failures(self) -> dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n"
f"[ERROR] There were {self.n_jobs_errored_out} jobs errored out (not producing test output files).\n"
f"The suite ran in {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def warnings(self) -> dict:
# If something goes wrong, let's avoid the CI report failing to be sent.
button_text = "Check warnings (Link not found)"
# Use the workflow run link
job_link = f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}"
for job in github_actions_jobs:
if "Extract warnings in CI artifacts" in job["name"] and job["conclusion"] == "success":
button_text = "Check warnings"
# Use the actual job link
job_link = job["html_url"]
break
huggingface_hub_warnings = [x for x in self.selected_warnings if "huggingface_hub" in x]
text = f"There are {len(self.selected_warnings)} warnings being selected."
text += f"\n{len(huggingface_hub_warnings)} of them are from `huggingface_hub`."
return {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": button_text, "emoji": True},
"url": job_link,
},
}
@staticmethod
def get_device_report(report, rjust=6):
if "single" in report and "multi" in report:
return f"{str(report['single']).rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
elif "single" in report:
return f"{str(report['single']).rjust(rjust)} | {'0'.rjust(rjust)} | "
elif "multi" in report:
return f"{'0'.rjust(rjust)} | {str(report['multi']).rjust(rjust)} | "
@property
def category_failures(self) -> dict:
if job_name != "run_models_gpu":
category_failures_report = ""
return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}}
model_failures = [v["failed"] for v in self.model_results.values()]
category_failures = {}
for model_failure in model_failures:
for key, value in model_failure.items():
if key not in category_failures:
category_failures[key] = dict(value)
else:
category_failures[key]["unclassified"] += value["unclassified"]
category_failures[key]["single"] += value["single"]
category_failures[key]["multi"] += value["multi"]
individual_reports = []
for key, value in category_failures.items():
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
individual_reports.append(f"{device_report}{key}")
else:
individual_reports.append(key)
header = "Single | Multi | Category\n"
category_failures_report = prepare_reports(
title="The following categories had failures", header=header, reports=individual_reports
)
return {"type": "section", "text": {"type": "mrkdwn", "text": category_failures_report}}
def compute_diff_for_failure_reports(self, curr_failure_report, prev_failure_report): # noqa
# Remove the leading and training parts that don't contain failure count information.
model_failures = curr_failure_report.split("\n")[3:-2]
prev_model_failures = prev_failure_report.split("\n")[3:-2]
entries_changed = set(model_failures).difference(prev_model_failures)
prev_map = {}
for f in prev_model_failures:
items = [x.strip() for x in f.split("| ")]
prev_map[items[-1]] = [int(x) for x in items[:-1]]
curr_map = {}
for f in entries_changed:
items = [x.strip() for x in f.split("| ")]
curr_map[items[-1]] = [int(x) for x in items[:-1]]
diff_map = {}
for k, v in curr_map.items():
if k not in prev_map:
diff_map[k] = v
else:
diff = [x - y for x, y in zip(v, prev_map[k])]
if max(diff) > 0:
diff_map[k] = diff
entries_changed = []
for model_name, diff_values in diff_map.items():
diff = [str(x) for x in diff_values]
diff = [f"+{x}" if (x != "0" and not x.startswith("-")) else x for x in diff]
diff = [x.rjust(9) for x in diff]
device_report = " | ".join(diff) + " | "
report = f"{device_report}{model_name}"
entries_changed.append(report)
entries_changed = sorted(entries_changed, key=lambda s: s.split("| ")[-1])
return entries_changed
@property
def model_failures(self) -> list[dict]:
# Obtain per-model failures
def per_model_sum(model_category_dict):
return dicts_to_sum(model_category_dict["failed"].values())
failures = {}
non_model_failures = {
k: per_model_sum(v) for k, v in self.model_results.items() if sum(per_model_sum(v).values())
}
for k, v in self.model_results.items():
# The keys in `model_results` may contain things like `models_vit` or `quantization_autoawq`
# Remove the prefix to make the report cleaner.
k = k.replace("models_", "").replace("quantization_", "")
if k in NON_MODEL_TEST_MODULES:
continue
if sum(per_model_sum(v).values()):
dict_failed = dict(v["failed"])
# Model job has a special form for reporting
if job_name == "run_models_gpu":
pytorch_specific_failures = dict_failed.pop("PyTorch")
other_failures = dicts_to_sum(dict_failed.values())
failures[k] = {
"PyTorch": pytorch_specific_failures,
"other": other_failures,
}
else:
test_name = job_to_test_map[job_name]
specific_failures = dict_failed.pop(test_name)
failures[k] = {
test_name: specific_failures,
}
model_reports = []
other_module_reports = []
for key, value in non_model_failures.items():
key = key.replace("models_", "").replace("quantization_", "")
if key in NON_MODEL_TEST_MODULES:
device_report = self.get_device_report(value)
if sum(value.values()):
if device_report:
report = f"{device_report}{key}"
else:
report = key
other_module_reports.append(report)
for key, value in failures.items():
# Model job has a special form for reporting
if job_name == "run_models_gpu":
device_report_values = [
value["PyTorch"]["single"],
value["PyTorch"]["multi"],
sum(value["other"].values()),
]
else:
test_name = job_to_test_map[job_name]
device_report_values = [
value[test_name]["single"],
value[test_name]["multi"],
]
if sum(device_report_values):
# This is related to `model_header` below
rjust_width = 9 if job_name == "run_models_gpu" else 6
device_report = " | ".join([str(x).rjust(rjust_width) for x in device_report_values]) + " | "
report = f"{device_report}{key}"
model_reports.append(report)
# (Possibly truncated) reports for the current workflow run - to be sent to Slack channels
if job_name == "run_models_gpu":
model_header = "Single PT | Multi PT | Other | Category\n"
else:
model_header = "Single | Multi | Category\n"
# Used when calling `prepare_reports` below to prepare the `title` argument
label = test_to_result_name[job_to_test_map[job_name]]
sorted_model_reports = sorted(model_reports, key=lambda s: s.split("| ")[-1])
model_failures_report = prepare_reports(
title=f"These following {label} modules had failures", header=model_header, reports=sorted_model_reports
)
module_header = "Single | Multi | Category\n"
sorted_module_reports = sorted(other_module_reports, key=lambda s: s.split("| ")[-1])
module_failures_report = prepare_reports(
title=f"The following {label} modules had failures", header=module_header, reports=sorted_module_reports
)
# To be sent to Slack channels
model_failure_sections = [{"type": "section", "text": {"type": "mrkdwn", "text": model_failures_report}}]
model_failure_sections.append({"type": "section", "text": {"type": "mrkdwn", "text": module_failures_report}})
# Save the complete (i.e. no truncation) failure tables (of the current workflow run)
# (to be uploaded as artifacts)
model_failures_report = prepare_reports(
title=f"These following {label} modules had failures",
header=model_header,
reports=sorted_model_reports,
to_truncate=False,
)
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/model_failures_report.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(model_failures_report)
module_failures_report = prepare_reports(
title=f"The following {label} modules had failures",
header=module_header,
reports=sorted_module_reports,
to_truncate=False,
)
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/module_failures_report.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(module_failures_report)
if self.prev_ci_artifacts is not None:
# if the last run produces artifact named `ci_results_{job_name}`
if (
f"ci_results_{job_name}" in self.prev_ci_artifacts
and "model_failures_report.txt" in self.prev_ci_artifacts[f"ci_results_{job_name}"]
):
# Compute the difference of the previous/current (model failure) table
prev_model_failures = self.prev_ci_artifacts[f"ci_results_{job_name}"]["model_failures_report.txt"]
entries_changed = self.compute_diff_for_failure_reports(model_failures_report, prev_model_failures)
if len(entries_changed) > 0:
# Save the complete difference
diff_report = prepare_reports(
title="Changed model modules failures",
header=model_header,
reports=entries_changed,
to_truncate=False,
)
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/changed_model_failures_report.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(diff_report)
# To be sent to Slack channels
diff_report = prepare_reports(
title="*Changed model modules failures*",
header=model_header,
reports=entries_changed,
)
model_failure_sections.append(
{"type": "section", "text": {"type": "mrkdwn", "text": diff_report}},
)
return model_failure_sections
@property
def additional_failures(self) -> dict:
failures = {k: v["failed"] for k, v in self.additional_results.items()}
errors = {k: v["error"] for k, v in self.additional_results.items()}
individual_reports = []
for key, value in failures.items():
device_report = self.get_device_report(value)
if sum(value.values()) or errors[key]:
report = f"{key}"
if errors[key]:
report = f"[Errored out] {report}"
if device_report:
report = f"{device_report}{report}"
individual_reports.append(report)
header = "Single | Multi | Category\n"
failures_report = prepare_reports(
title="The following non-modeling tests had failures", header=header, reports=individual_reports
)
return {"type": "section", "text": {"type": "mrkdwn", "text": failures_report}}
@property
def payload(self) -> str:
blocks = [self.header]
if self.ci_title:
blocks.append(self.ci_title_section)
if self.n_model_failures > 0 or self.n_additional_failures > 0 or self.n_jobs_errored_out > 0:
blocks.append(self.failures)
if self.n_model_failures > 0:
block = self.category_failures
if block["text"]["text"]:
blocks.append(block)
for block in self.model_failures:
if block["text"]["text"]:
blocks.append(block)
if self.n_additional_failures > 0:
blocks.append(self.additional_failures)
if self.n_model_failures == 0 and self.n_additional_failures == 0:
blocks.append(self.no_failures)
if len(self.selected_warnings) > 0:
blocks.append(self.warnings)
new_failure_blocks = []
for idx, (prev_workflow_run_id, prev_ci_artifacts) in enumerate(
[self.prev_ci_artifacts] + self.other_ci_artifacts
):
if idx == 0:
# This is the truncated version to show on slack. For now.
new_failure_blocks = self.get_new_model_failure_blocks(
prev_ci_artifacts=prev_ci_artifacts, with_header=False
)
# To save the list of new model failures and uploaed to hub repositories
extra_blocks = self.get_new_model_failure_blocks(prev_ci_artifacts=prev_ci_artifacts, to_truncate=False)
if extra_blocks:
filename = "new_failures"
if idx > 0:
filename = f"{filename}_against_{prev_workflow_run_id}"
failure_text = extra_blocks[-1]["text"]["text"]
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.txt")
with open(file_path, "w", encoding="UTF-8") as fp:
fp.write(failure_text)
# upload results to Hub dataset
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.txt")
_ = api.upload_file(
path_or_fileobj=file_path,
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{filename}.txt",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
# extra processing to save to json format
new_failed_tests = {}
nb_new_failed_tests = 0
for line in failure_text.split():
if "https://github.com/huggingface/transformers/actions/runs" in line:
pattern = r"<(https://github.com/huggingface/transformers/actions/runs/.+?/job/.+?)\|(.+?)>"
items = re.findall(pattern, line)
elif "tests/" in line:
# TODO: Improve the condition here.
if "tests/models/" in line or (
"tests/quantization/" in line and job_name == "run_quantization_torch_gpu"
):
model = line.split("/")[2]
else:
model = line.split("/")[1]
if model not in new_failed_tests:
new_failed_tests[model] = {"single-gpu": [], "multi-gpu": []}
for _, device in items:
new_failed_tests[model][f"{device}-gpu"].append(line)
nb_new_failed_tests += 1
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.json")
with open(file_path, "w", encoding="UTF-8") as fp:
json.dump(new_failed_tests, fp, ensure_ascii=False, indent=4)
# upload results to Hub dataset
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.json")
commit_info = api.upload_file(
path_or_fileobj=file_path,
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{filename}.json",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
new_failures_url = f"https://huggingface.co/datasets/{report_repo_id}/raw/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/{filename}.json"
if idx == 0:
block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*There are {nb_new_failed_tests} new failed tests*\n\n(compared to previous run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check new failures"},
"url": new_failures_url,
},
}
blocks.append(block)
else:
block = {
"type": "section",
"text": {
"type": "mrkdwn",
# TODO: We should NOT assume it's always Nvidia CI, but it's the case at this moment.
"text": f"*There are {nb_new_failed_tests} failed tests unique to this run*\n\n(compared to{' Nvidia CI ' if is_scheduled_ci_run else ' '}run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check failures"},
"url": new_failures_url,
},
}
blocks.append(block)
if diff_file_url is not None:
block = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"*Test results diff*\n\n(compared to previous run: <https://github.com/huggingface/transformers/actions/runs/{prev_workflow_run_id}|{prev_workflow_run_id}>)",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check test result diff file"},
"url": diff_file_url,
},
}
blocks.append(block)
if len(new_failure_blocks) > 0:
blocks.extend(new_failure_blocks)
return json.dumps(blocks)
@staticmethod
def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False):
blocks = []
title_block = {"type": "header", "text": {"type": "plain_text", "text": title}}
blocks.append(title_block)
if ci_title:
ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}}
blocks.append(ci_title_block)
offline_runners = []
if runner_not_available:
text = "[FAIL] CI runners are not available! Tests are not run."
result = os.environ.get("OFFLINE_RUNNERS")
if result is not None:
offline_runners = json.loads(result)
elif runner_failed:
text = "[FAIL] CI runners have problems! Tests are not run."
elif setup_failed:
text = "[FAIL] Setup job failed. Tests are not run."
else:
text = "[FAIL] There was an issue running the tests."
error_block_1 = {
"type": "header",
"text": {
"type": "plain_text",
"text": text,
},
}
text = ""
if len(offline_runners) > 0:
text = "\n • " + "\n • ".join(offline_runners)
text = f"The following runners are offline:\n{text}\n\n"
text += "Let's fix it ASAP!"
error_block_2 = {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
blocks.extend([error_block_1, error_block_2])
payload = json.dumps(blocks)
print("Sending the following payload")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
text=text,
blocks=payload,
)
def post(self):
payload = self.payload
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(payload)}))
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
self.thread_ts = client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
blocks=payload,
text=text,
)
def get_reply_blocks(self, job_name, job_result, failures, device, text):
"""
failures: A list with elements of the form {"line": full test name, "trace": error trace}
"""
# `text` must be less than 3001 characters in Slack SDK
# keep some room for adding "[Truncated]" when necessary
MAX_ERROR_TEXT = 3000 - len("[Truncated]")
failure_text = ""
for idx, error in enumerate(failures):
new_text = failure_text + f"*{error['line']}*\n_{error['trace']}_\n\n"
if len(new_text) > MAX_ERROR_TEXT:
# `failure_text` here has length <= 3000
failure_text = failure_text + "[Truncated]"
break
# `failure_text` here has length <= MAX_ERROR_TEXT
failure_text = new_text
title = job_name
if device is not None:
title += f" ({device}-gpu)"
content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
# TODO: Make sure we always have a valid job link (or at least a way not to break the report sending)
# Currently we get the device from a job's artifact name.
# If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`.
# This could be done by adding `machine_type` in a job's `strategy`.
# (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`)
if job_result["job_link"] is not None and job_result["job_link"][device] is not None:
content["accessory"] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_result["job_link"][device],
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failure_text}},
]
def get_new_model_failure_blocks(self, prev_ci_artifacts, with_header=True, to_truncate=True):
if prev_ci_artifacts is None:
return []
if len(self.model_results) > 0:
target_results = self.model_results
else:
target_results = self.additional_results[job_to_test_map[job_name]]
# Make the format uniform between `model_results` and `additional_results[XXX]`
if "failures" in target_results:
target_results = {job_name: target_results}
sorted_dict = sorted(target_results.items(), key=lambda t: t[0])
job = job_to_test_map[job_name]
prev_model_results = {}
if (
f"ci_results_{job_name}" in prev_ci_artifacts
and f"{test_to_result_name[job]}_results.json" in prev_ci_artifacts[f"ci_results_{job_name}"]
):
prev_model_results = json.loads(
prev_ci_artifacts[f"ci_results_{job_name}"][f"{test_to_result_name[job]}_results.json"]
)
# Make the format uniform between `model_results` and `additional_results[XXX]`
if "failures" in prev_model_results:
prev_model_results = {job_name: prev_model_results}
all_failure_lines = {}
for job, job_result in sorted_dict:
if len(job_result["failures"]):
devices = sorted(job_result["failures"].keys(), reverse=True)
for device in devices:
failures = job_result["failures"][device]
prev_error_lines = {}
if job in prev_model_results and device in prev_model_results[job]["failures"]:
prev_error_lines = {error["line"] for error in prev_model_results[job]["failures"][device]}
url = None
if job_result["job_link"] is not None and job_result["job_link"][device] is not None:
url = job_result["job_link"][device]
for idx, error in enumerate(failures):
if error["line"] in prev_error_lines:
continue
new_text = f"{error['line']}\n\n"
if new_text not in all_failure_lines:
all_failure_lines[new_text] = []
all_failure_lines[new_text].append(f"<{url}|{device}>" if url is not None else device)
MAX_ERROR_TEXT = 3000 - len("[Truncated]") - len("```New failures```\n\n")
if not to_truncate:
MAX_ERROR_TEXT = float("inf")
failure_text = ""
for line, devices in all_failure_lines.items():
new_text = failure_text + f"{'|'.join(devices)} gpu\n{line}"
if len(new_text) > MAX_ERROR_TEXT:
# `failure_text` here has length <= 3000
failure_text = failure_text + "[Truncated]"
break
# `failure_text` here has length <= MAX_ERROR_TEXT
failure_text = new_text
blocks = []
if failure_text:
if with_header:
blocks.append(
{"type": "header", "text": {"type": "plain_text", "text": "New failures", "emoji": True}}
)
else:
failure_text = f"{failure_text}"
blocks.append({"type": "section", "text": {"type": "mrkdwn", "text": failure_text}})
return blocks
def post_reply(self):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
for device, failures in job_result["failures"].items():
text = "\n".join(
sorted([f"*{k}*: {v[device]}" for k, v in job_result["failed"].items() if v[device]])
)
blocks = self.get_reply_blocks(job, job_result, failures, device, text=text)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
for job, job_result in self.additional_results.items():
if len(job_result["failures"]):
for device, failures in job_result["failures"].items():
blocks = self.get_reply_blocks(
job,
job_result,
failures,
device,
text=f"Number of failures: {job_result['failed'][device]}",
)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,
text=f"Results for {job}",
blocks=blocks,
thread_ts=self.thread_ts["ts"],
)
time.sleep(1)
def retrieve_artifact(artifact_path: str, gpu: str | None):
if gpu not in [None, "single", "multi"]:
raise ValueError(f"Invalid GPU for artifact. Passed GPU: `{gpu}`.")
_artifact = {}
if os.path.exists(artifact_path):
files = os.listdir(artifact_path)
for file in files:
try:
with open(os.path.join(artifact_path, file)) as f:
_artifact[file.split(".")[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(artifact_path, file)}.") from e
return _artifact
def retrieve_available_artifacts():
class Artifact:
def __init__(self, name: str, single_gpu: bool = False, multi_gpu: bool = False):
self.name = name
self.single_gpu = single_gpu
self.multi_gpu = multi_gpu
self.paths = []
def __str__(self):
return self.name
def add_path(self, path: str, gpu: str | None = None):
self.paths.append({"name": self.name, "path": path, "gpu": gpu})
_available_artifacts: dict[str, Artifact] = {}
directories = filter(os.path.isdir, os.listdir())
for directory in directories:
artifact_name = directory
name_parts = artifact_name.split("_postfix_")
if len(name_parts) > 1:
artifact_name = name_parts[0]
if artifact_name.startswith("single-gpu"):
artifact_name = artifact_name[len("single-gpu") + 1 :]
if artifact_name in _available_artifacts:
_available_artifacts[artifact_name].single_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu="single")
elif artifact_name.startswith("multi-gpu"):
artifact_name = artifact_name[len("multi-gpu") + 1 :]
if artifact_name in _available_artifacts:
_available_artifacts[artifact_name].multi_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu="multi")
else:
if artifact_name not in _available_artifacts:
_available_artifacts[artifact_name] = Artifact(artifact_name)
_available_artifacts[artifact_name].add_path(directory)
return _available_artifacts
def prepare_reports(title, header, reports, to_truncate=True):
report = ""
MAX_ERROR_TEXT = 3000 - len("[Truncated]")
if not to_truncate:
MAX_ERROR_TEXT = float("inf")
if len(reports) > 0:
# `text` must be less than 3001 characters in Slack SDK
# keep some room for adding "[Truncated]" when necessary
for idx in range(len(reports)):
_report = header + "\n".join(reports[: idx + 1])
new_report = f"{title}:\n```\n{_report}\n```\n"
if len(new_report) > MAX_ERROR_TEXT:
# `report` here has length <= 3000
report = report + "[Truncated]"
break
report = new_report
return report
def pop_default(l: list[Any], i: int, default: Any) -> Any:
try:
return l.pop(i)
except IndexError:
return default
if __name__ == "__main__":
api = HfApi()
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
SLACK_REPORT_CHANNEL_ID = os.environ["SLACK_REPORT_CHANNEL"]
# runner_status = os.environ.get("RUNNER_STATUS")
# runner_env_status = os.environ.get("RUNNER_ENV_STATUS")
setup_status = os.environ.get("SETUP_STATUS")
# runner_not_available = True if runner_status is not None and runner_status != "success" else False
# runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False
# Let's keep the lines regardig runners' status (we might be able to use them again in the future)
runner_not_available = False
runner_failed = False
# Some jobs don't depend (`needs`) on the job `setup`: in this case, the status of the job `setup` is `skipped`.
setup_failed = setup_status not in ["skipped", "success"]
org = "huggingface"
repo = "transformers"
repository_full_name = f"{org}/{repo}"
# This env. variable is set in workflow file (under the job `send_results`).
ci_event = os.environ["CI_EVENT"]
# To find the PR number in a commit title, for example, `Add AwesomeFormer model (#99999)`
pr_number_re = re.compile(r"\(#(\d+)\)$")
# Add Commit/PR title with a link for push CI
ci_title = os.environ.get("CI_TITLE", "")
ci_sha = os.environ.get("CI_SHA")
ci_url = None
if ci_sha:
ci_url = f"https://github.com/{repository_full_name}/commit/{ci_sha}"
if ci_title:
if ci_url is None:
raise ValueError(
"When a title is found (`ci_title`), it means a `push` event or a `workflow_run` even (triggered by "
"another `push` event), and the commit SHA has to be provided in order to create the URL to the "
"commit page."
)
ci_title = ci_title.strip().split("\n")[0].strip()
# Retrieve the PR title and author login to complete the report
commit_number = ci_url.split("/")[-1]
ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/commits/{commit_number}"
ci_details = requests.get(ci_detail_url).json()
ci_author = ci_details["author"]["login"]
merged_by = None
# Find the PR number (if any) and change the url to the actual PR page.
numbers = pr_number_re.findall(ci_title)
if len(numbers) > 0:
pr_number = numbers[0]
ci_detail_url = f"https://api.github.com/repos/{repository_full_name}/pulls/{pr_number}"
ci_details = requests.get(ci_detail_url).json()
ci_author = ci_details["user"]["login"]
ci_url = f"https://github.com/{repository_full_name}/pull/{pr_number}"
merged_by = ci_details["merged_by"]["login"]
if merged_by is None:
ci_title = f"<{ci_url}|{ci_title}>\nAuthor: GH_{ci_author}"
else:
ci_title = f"<{ci_url}|{ci_title}>\nAuthor: GH_{ci_author} | Merged by: GH_{merged_by}"
elif ci_sha:
ci_title = f"<{ci_url}|commit: {ci_sha}>"
else:
ci_title = ""
# `title` will be updated at the end before calling `Message()`.
title = f"[INFO] Results of {ci_event}"
if runner_not_available or runner_failed or setup_failed:
Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed)
exit(0)
# sys.argv[0] is always `utils/notification_service.py`.
arguments = sys.argv[1:]
# In our usage in `.github/workflows/slack-report.yml`, we always pass an argument when calling this script.
# The argument could be an empty string `""` if a job doesn't depend on the job `setup`.
if arguments[0] == "":
job_matrix = []
else:
job_matrix_as_str = arguments[0]
try:
folder_slices = ast.literal_eval(job_matrix_as_str)
if len(folder_slices) > 0:
if isinstance(folder_slices[0], list):
# Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names).
job_matrix = [
x.replace("models/", "models_").replace("quantization/", "quantization_")
for folders in folder_slices
for x in folders
]
elif isinstance(folder_slices[0], str):
job_matrix = [
x.replace("models/", "models_").replace("quantization/", "quantization_")
for x in folder_slices
]
except Exception:
Message.error_out(title, ci_title)
raise ValueError("Errored out.")
github_actions_jobs = get_jobs(
workflow_run_id=os.environ["GITHUB_RUN_ID"], token=os.environ["ACCESS_REPO_INFO_TOKEN"]
)
github_actions_job_links = {job["name"]: job["html_url"] for job in github_actions_jobs}
artifact_name_to_job_map = {}
for job in github_actions_jobs:
for step in job["steps"]:
if step["name"].startswith("Test suite reports artifacts: "):
artifact_name = step["name"][len("Test suite reports artifacts: ") :]
artifact_name_to_job_map[artifact_name] = job
break
available_artifacts = retrieve_available_artifacts()
test_categories = [
"PyTorch",
"Tokenizers",
"Pipelines",
"Trainer",
"ONNX",
"Auto",
"Quantization",
"Unclassified",
]
job_name = os.getenv("CI_TEST_JOB")
report_name_prefix = job_name
# This dict will contain all the information relative to each model:
# - Failures: the total, as well as the number of failures per-category defined above
# - Success: total
# - Time spent: as a comma-separated list of elapsed time
# - Failures: as a line-break separated list of errors
matrix_job_results = {
matrix_name: {
"failed": {m: {"unclassified": 0, "single": 0, "multi": 0} for m in test_categories},
"errors": 0,
"success": 0,
"skipped": 0,
"time_spent": [],
"error": False,
"failures": {},
"job_link": {},
"captured_info": {},
}
for matrix_name in job_matrix
if f"{report_name_prefix}_{matrix_name}_test_reports" in available_artifacts
}
matrix_job_results_extra = {
matrix_name: {
"captured_info": {},
}
for matrix_name in job_matrix
if f"{report_name_prefix}_{matrix_name}_test_reports" in available_artifacts
}
unclassified_model_failures = []
for matrix_name in matrix_job_results:
for artifact_path_dict in available_artifacts[f"{report_name_prefix}_{matrix_name}_test_reports"].paths:
path = artifact_path_dict["path"]
artifact_gpu = artifact_path_dict["gpu"]
if path not in artifact_name_to_job_map:
# Mismatch between available artifacts and reported jobs on github. It happens.
continue
artifact = retrieve_artifact(path, artifact_gpu)
if "summary_short" not in artifact:
# The process might be killed (for example, CPU OOM), or the job is canceled for some reason), etc.
matrix_job_results[matrix_name]["error"] = True
if "stats" in artifact:
# Link to the GitHub Action job
job = artifact_name_to_job_map[path]
matrix_job_results[matrix_name]["job_link"][artifact_gpu] = job["html_url"]
failed, errors, success, skipped, time_spent = handle_test_results(artifact["stats"])
matrix_job_results[matrix_name]["success"] += success
matrix_job_results[matrix_name]["errors"] += errors
matrix_job_results[matrix_name]["skipped"] += skipped
matrix_job_results[matrix_name]["time_spent"].append(float(time_spent[:-1]))
stacktraces = handle_stacktraces(artifact["failures_line"])
# Add the captured actual outputs for patched methods (`torch.testing.assert_close`, `assertEqual` etc.)
if "captured_info" in artifact:
step_number = None
for step in job.get("steps", []):
if step["name"] == "Captured information":
step_number = step["number"]
break
if step_number is not None:
step_link = f"{job['html_url']}#step:{step_number}:1"
matrix_job_results[matrix_name]["captured_info"][artifact_gpu] = step_link
matrix_job_results_extra[matrix_name]["captured_info"][artifact_gpu] = {
"link": step_link,
"captured_info": artifact["captured_info"],
}
for line in artifact["summary_short"].split("\n"):
if line.startswith("FAILED "):
# Avoid the extra `FAILED` entry given by `run_test_using_subprocess` causing issue when calling
# `stacktraces.pop` below.
# See `run_test_using_subprocess` in `src/transformers/testing_utils.py`
if " - Failed: (subprocess)" in line:
continue
line = line[len("FAILED ") :]
line = line.split()[0].replace("\n", "")
if artifact_gpu not in matrix_job_results[matrix_name]["failures"]:
matrix_job_results[matrix_name]["failures"][artifact_gpu] = []
trace = pop_default(stacktraces, 0, "Cannot retrieve error message.")
matrix_job_results[matrix_name]["failures"][artifact_gpu].append(
{"line": line, "trace": trace}
)
# TODO: How to deal wit this
if re.search("tests/quantization", line):
matrix_job_results[matrix_name]["failed"]["Quantization"][artifact_gpu] += 1
elif re.search("test_modeling", line):
matrix_job_results[matrix_name]["failed"]["PyTorch"][artifact_gpu] += 1
elif re.search("test_tokenization", line):
matrix_job_results[matrix_name]["failed"]["Tokenizers"][artifact_gpu] += 1
elif re.search("test_pipelines", line):
matrix_job_results[matrix_name]["failed"]["Pipelines"][artifact_gpu] += 1
elif re.search("test_trainer", line):
matrix_job_results[matrix_name]["failed"]["Trainer"][artifact_gpu] += 1
elif re.search("onnx", line):
matrix_job_results[matrix_name]["failed"]["ONNX"][artifact_gpu] += 1
elif re.search("auto", line):
matrix_job_results[matrix_name]["failed"]["Auto"][artifact_gpu] += 1
else:
matrix_job_results[matrix_name]["failed"]["Unclassified"][artifact_gpu] += 1
unclassified_model_failures.append(line)
# Additional runs
additional_files = {
"PyTorch pipelines": "run_pipelines_torch_gpu_test_reports",
"Examples directory": "run_examples_gpu_test_reports",
"DeepSpeed": "run_torch_cuda_extensions_gpu_test_reports",
"Kernels": "run_kernels_gpu_test_reports",
}
if ci_event in ["push", "Nightly CI"] or ci_event.startswith("Past CI"):
del additional_files["Examples directory"]
del additional_files["PyTorch pipelines"]
elif ci_event.startswith("Scheduled CI (AMD)"):
del additional_files["DeepSpeed"]
elif ci_event.startswith("Push CI (AMD)"):
additional_files = {}
report_repo_id = os.getenv("REPORT_REPO_ID")
# if it is not a scheduled run, upload the reports to a subfolder under `report_repo_folder`
report_repo_subfolder = ""
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
report_repo_subfolder = f"runs/{report_repo_subfolder}"
workflow_run = get_last_daily_ci_run(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv("GITHUB_RUN_ID")
)
workflow_run_created_time = workflow_run["created_at"]
workflow_id = workflow_run["workflow_id"]
report_repo_folder = workflow_run_created_time.split("T")[0]
if report_repo_subfolder:
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
# Remove some entries in `additional_files` if they are not concerned.
test_name = None
if job_name in job_to_test_map:
test_name = job_to_test_map[job_name]
additional_files = {k: v for k, v in additional_files.items() if k == test_name}
additional_results = {
key: {
"failed": {"unclassified": 0, "single": 0, "multi": 0},
"errors": 0,
"success": 0,
"skipped": 0,
"time_spent": [],
"error": False,
"failures": {},
"job_link": {},
}
for key in additional_files
}
for key in additional_results:
# If a whole suite of test fails, the artifact isn't available.
if additional_files[key] not in available_artifacts:
additional_results[key]["error"] = True
continue
for artifact_path_dict in available_artifacts[additional_files[key]].paths:
path = artifact_path_dict["path"]
artifact_gpu = artifact_path_dict["gpu"]
# Link to the GitHub Action job
job = artifact_name_to_job_map[path]
additional_results[key]["job_link"][artifact_gpu] = job["html_url"]
artifact = retrieve_artifact(path, artifact_gpu)
stacktraces = handle_stacktraces(artifact["failures_line"])
failed, errors, success, skipped, time_spent = handle_test_results(artifact["stats"])
additional_results[key]["failed"][artifact_gpu or "unclassified"] += failed
additional_results[key]["success"] += success
additional_results[key]["errors"] += errors
additional_results[key]["skipped"] += skipped
additional_results[key]["time_spent"].append(float(time_spent[:-1]))
if len(artifact["errors"]):
additional_results[key]["error"] = True
if failed:
for line in artifact["summary_short"].split("\n"):
if line.startswith("FAILED "):
# Avoid the extra `FAILED` entry given by `run_test_using_subprocess` causing issue when calling
# `stacktraces.pop` below.
# See `run_test_using_subprocess` in `src/transformers/testing_utils.py`
if " - Failed: (subprocess)" in line:
continue
line = line[len("FAILED ") :]
line = line.split()[0].replace("\n", "")
if artifact_gpu not in additional_results[key]["failures"]:
additional_results[key]["failures"][artifact_gpu] = []
trace = pop_default(stacktraces, 0, "Cannot retrieve error message.")
additional_results[key]["failures"][artifact_gpu].append({"line": line, "trace": trace})
# Let's only check the warning for the model testing job. Currently, the job `run_extract_warnings` is only run
# when `inputs.job` (in the workflow file) is `run_models_gpu`. The reason is: otherwise we need to save several
# artifacts with different names which complicates the logic for an insignificant part of the CI workflow reporting.
selected_warnings = []
if job_name == "run_models_gpu":
if "warnings_in_ci" in available_artifacts:
directory = available_artifacts["warnings_in_ci"].paths[0]["path"]
with open(os.path.join(directory, "selected_warnings.json")) as fp:
selected_warnings = json.load(fp)
if not os.path.isdir(os.path.join(os.getcwd(), f"ci_results_{job_name}")):
os.makedirs(os.path.join(os.getcwd(), f"ci_results_{job_name}"))
nvidia_daily_ci_workflow = (
"huggingface/transformers/.github/workflows/self-scheduled-caller.yml",
"huggingface/transformers/.github/workflows/self-scheduled-flash-attn-caller.yml",
)
amd_daily_ci_workflows = (
"huggingface/transformers/.github/workflows/self-scheduled-amd-mi325-caller.yml",
"huggingface/transformers/.github/workflows/self-scheduled-amd-mi355-caller.yml",
)
is_nvidia_daily_ci_workflow = os.environ.get("GITHUB_WORKFLOW_REF").startswith(nvidia_daily_ci_workflow)
is_amd_daily_ci_workflow = os.environ.get("GITHUB_WORKFLOW_REF").startswith(amd_daily_ci_workflows)
is_scheduled_ci_run = os.environ.get("GITHUB_EVENT_NAME") == "schedule"
# For AMD workflow runs: the different AMD CI callers (MI210/MI250/MI300, etc.) are triggered by `workflow_run`
# event of `.github/workflows/self-scheduled-amd-caller.yml`.
if os.environ.get("GITHUB_EVENT_NAME") == "workflow_run":
# Get the path to the file on the runner that contains the full event webhook payload.
event_payload_path = os.environ.get("GITHUB_EVENT_PATH")
# Load the event payload
with open(event_payload_path) as fp:
event_payload = json.load(fp)
# The event that triggers the original `workflow_run`.
if "workflow_run" in event_payload:
is_scheduled_ci_run = event_payload["workflow_run"]["event"] == "schedule"
test_name_and_result_pairs = []
if len(matrix_job_results) > 0:
test_name = job_to_test_map[job_name]
test_name_and_result_pairs.append((test_name, matrix_job_results))
for test_name, result in additional_results.items():
test_name_and_result_pairs.append((test_name, result))
for test_name, result in test_name_and_result_pairs:
with open(f"ci_results_{job_name}/{test_to_result_name[test_name]}_results.json", "w", encoding="UTF-8") as fp:
json.dump(result, fp, indent=4, ensure_ascii=False)
api.upload_file(
path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[test_name]}_results.json",
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{test_to_result_name[test_name]}_results.json",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
if len(matrix_job_results_extra) > 0:
with open(
f"ci_results_{job_name}/{test_to_result_name[test_name]}_results_extra.json", "w", encoding="UTF-8"
) as fp:
json.dump(matrix_job_results_extra, fp, indent=4, ensure_ascii=False)
api.upload_file(
path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[test_name]}_results_extra.json",
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{test_to_result_name[test_name]}_results_extra.json",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
# Let's create a file contain job --> job link
if len(matrix_job_results) > 0:
target_results = matrix_job_results
else:
default_result = {
"failed": {"unclassified": 0, "single": 0, "multi": 0},
"success": 0,
"time_spent": [],
"error": False,
"failures": {},
"job_link": {},
}
key = job_to_test_map.get(job_name)
target_results = additional_results.get(key, default_result) if key is not None else default_result
# Make the format uniform between `model_results` and `additional_results[XXX]`
if "failures" in target_results:
target_results = {job_name: target_results}
job_links = {}
sorted_dict = sorted(target_results.items(), key=lambda t: t[0])
for job, job_result in sorted_dict:
if job.startswith("models_"):
job = job[len("models_") :]
elif job.startswith("quantization_"):
job = job[len("quantization_") :]
job_links[job] = job_result["job_link"]
with open(f"ci_results_{job_name}/job_links.json", "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, indent=4, ensure_ascii=False)
api.upload_file(
path_or_fileobj=f"ci_results_{job_name}/job_links.json",
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/job_links.json",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
prev_workflow_run_id = None
other_workflow_run_ids = []
if is_scheduled_ci_run:
prev_workflow_run_id = get_last_daily_ci_workflow_run_id(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=workflow_id
)
# For a scheduled run that is not the Nvidia's scheduled daily CI, add Nvidia's scheduled daily CI run as a target to compare.
if not is_nvidia_daily_ci_workflow:
# The id of the workflow `.github/workflows/self-scheduled-caller.yml` (not of a workflow run of it).
other_workflow_id = "90575235"
# We need to get the Nvidia's scheduled daily CI run that match the current run (i.e. run with the same commit SHA)
other_workflow_run_id = get_last_daily_ci_workflow_run_id(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=other_workflow_id, commit_sha=ci_sha
)
other_workflow_run_ids.append(other_workflow_run_id)
# triggered via `issue_comment` for CI on pull requests (e.g. using the comment `run-slow:`)
elif os.environ.get("GITHUB_EVENT_NAME") in ["issue_comment"]:
# TODO (ydshieh): Make this flexible once we implement `run-slow` for AMD CI and others.
# The id of the workflow `.github/workflows/self-scheduled-caller.yml` (not of a workflow run of it).
prev_workflow_id = "90575235"
# TODO (ydshieh): It's better to make sure using the last completed scheduled workflow run with the commit being a parent
# of the PR's `merge_commit`.
prev_workflow_run_id = get_last_daily_ci_workflow_run_id(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=prev_workflow_id
)
else:
prev_workflow_run_id = os.environ["PREV_WORKFLOW_RUN_ID"]
other_workflow_run_id = os.environ["OTHER_WORKFLOW_RUN_ID"]
other_workflow_run_ids.append(other_workflow_run_id)
prev_ci_artifacts = (None, None)
other_ci_artifacts = []
output_dir = os.path.join(os.getcwd(), "previous_reports")
os.makedirs(output_dir, exist_ok=True)
for idx, target_workflow_run_id in enumerate([prev_workflow_run_id] + other_workflow_run_ids):
if target_workflow_run_id is None or target_workflow_run_id == "":
continue
else:
artifact_names = [f"ci_results_{job_name}"]
ci_artifacts = get_last_daily_ci_reports(
artifact_names=artifact_names,
output_dir=output_dir,
token=os.environ["ACCESS_REPO_INFO_TOKEN"],
workflow_run_id=target_workflow_run_id,
)
if idx == 0:
prev_ci_artifacts = (target_workflow_run_id, ci_artifacts)
else:
other_ci_artifacts.append((target_workflow_run_id, ci_artifacts))
# Only for AMD at this moment.
# TODO: put this into a method
diff_file_url = None
if is_amd_daily_ci_workflow:
if not (prev_workflow_run_id is None or prev_workflow_run_id == ""):
ci_artifacts = get_last_daily_ci_reports(
artifact_names=None,
output_dir=output_dir,
token=os.environ["ACCESS_REPO_INFO_TOKEN"],
workflow_run_id=prev_workflow_run_id,
)
current_artifacts = sorted([d for d in os.listdir() if os.path.isdir(d) and d.endswith("_test_reports")])
prev_artifacts = sorted([d for d in os.listdir(output_dir) if os.path.isdir(os.path.join(output_dir, d)) and d.endswith("_test_reports")]) # fmt: skip
current_artifacts_set = {}
for d in current_artifacts:
current_artifacts_set[d] = os.path.join(d, "summary_short.txt")
prev_artifacts_set = {}
for d in prev_artifacts:
prev_artifacts_set[d] = os.path.join(output_dir, d, "summary_short.txt")
report = compare_job_sets(prev_artifacts_set, current_artifacts_set)
with open(f"ci_results_{job_name}/test_results_diff.json", "w") as fp:
fp.write(report)
# upload
commit_info = api.upload_file(
path_or_fileobj=f"ci_results_{job_name}/test_results_diff.json",
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/test_results_diff.json",
repo_id=report_repo_id,
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
diff_file_url = f"https://huggingface.co/datasets/{report_repo_id}/resolve/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/test_results_diff.json"
ci_name_in_report = ""
if job_name in job_to_test_map:
ci_name_in_report = job_to_test_map[job_name]
title = f"[INFO] Results of {ci_event}: {ci_name_in_report}"
message = Message(
title,
ci_title,
matrix_job_results,
additional_results,
selected_warnings=selected_warnings,
prev_ci_artifacts=prev_ci_artifacts,
other_ci_artifacts=other_ci_artifacts,
)
# send report only if there is any failure (for push CI)
if message.n_failures or (ci_event != "push" and not ci_event.startswith("Push CI (AMD)")):
message.post()
message.post_reply()
| Message |
python | pandas-dev__pandas | pandas/tests/groupby/test_grouping.py | {
"start": 548,
"end": 5495
} | class ____:
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=["A", "B"])
g = df.groupby("A")
with pytest.raises(KeyError, match="\"Columns not found: 'C'\""):
g[["C"]]
with pytest.raises(KeyError, match="^[^A]+$"):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[["A", "C"]]
def test_groupby_duplicated_column_errormsg(self):
# GH7511
df = DataFrame(
columns=["A", "B", "A", "C"], data=[range(4), range(2, 6), range(0, 8, 2)]
)
msg = "Grouper for 'A' not 1-dimensional"
with pytest.raises(ValueError, match=msg):
df.groupby("A")
with pytest.raises(ValueError, match=msg):
df.groupby(["A", "B"])
grouped = df.groupby("B")
c = grouped.count()
assert c.columns.nlevels == 1
assert c.columns.size == 3
def test_column_select_via_attr(self, df):
result = df.groupby("A").C.sum()
expected = df.groupby("A")["C"].sum()
tm.assert_series_equal(result, expected)
df["mean"] = 1.5
result = df.groupby("A").mean(numeric_only=True)
expected = df.groupby("A")[["C", "D", "mean"]].agg("mean")
tm.assert_frame_equal(result, expected)
def test_getitem_list_of_columns(self):
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.default_rng(2).standard_normal(8),
"D": np.random.default_rng(2).standard_normal(8),
"E": np.random.default_rng(2).standard_normal(8),
}
)
result = df.groupby("A")[["C", "D"]].mean()
result2 = df.groupby("A")[df.columns[2:4]].mean()
expected = df.loc[:, ["A", "C", "D"]].groupby("A").mean()
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_getitem_numeric_column_names(self):
# GH #13731
df = DataFrame(
{
0: list("abcd") * 2,
2: np.random.default_rng(2).standard_normal(8),
4: np.random.default_rng(2).standard_normal(8),
6: np.random.default_rng(2).standard_normal(8),
}
)
result = df.groupby(0)[df.columns[1:3]].mean()
result2 = df.groupby(0)[[2, 4]].mean()
expected = df.loc[:, [0, 2, 4]].groupby(0).mean()
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# per GH 23566 enforced deprecation raises a ValueError
with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):
df.groupby(0)[2, 4].mean()
def test_getitem_single_tuple_of_columns_raises(self, df):
# per GH 23566 enforced deprecation raises a ValueError
with pytest.raises(ValueError, match="Cannot subset columns with a tuple"):
df.groupby("A")["C", "D"].mean()
def test_getitem_single_column(self):
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.default_rng(2).standard_normal(8),
"D": np.random.default_rng(2).standard_normal(8),
"E": np.random.default_rng(2).standard_normal(8),
}
)
result = df.groupby("A")["C"].mean()
as_frame = df.loc[:, ["A", "C"]].groupby("A").mean()
as_series = as_frame.iloc[:, 0]
expected = as_series
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x.sum(), lambda x: x.agg(lambda y: y.sum())]
)
def test_getitem_from_grouper(self, func):
# GH 50383
df = DataFrame({"a": [1, 1, 2], "b": 3, "c": 4, "d": 5})
gb = df.groupby(["a", "b"])[["a", "c"]]
idx = MultiIndex.from_tuples([(1, 3), (2, 3)], names=["a", "b"])
expected = DataFrame({"a": [2, 2], "c": [8, 4]}, index=idx)
result = func(gb)
tm.assert_frame_equal(result, expected)
def test_indices_grouped_by_tuple_with_lambda(self):
# GH 36158
df = DataFrame(
{
"Tuples": (
(x, y)
for x in [0, 1]
for y in np.random.default_rng(2).integers(3, 5, 5)
)
}
)
gb = df.groupby("Tuples")
gb_lambda = df.groupby(lambda x: df.iloc[x, 0])
expected = gb.indices
result = gb_lambda.indices
tm.assert_dict_equal(result, expected)
# grouping
# --------------------------------
| TestSelection |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/_line.py | {
"start": 233,
"end": 3488
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl"
_path_str = "scatterpolargl.line"
_valid_props = {"color", "dash", "width"}
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the style of the lines.
The 'dash' property is an enumeration that may be specified as:
- One of the following enumeration values:
['dash', 'dashdot', 'dot', 'longdash', 'longdashdot',
'solid']
Returns
-------
Any
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the style of the lines.
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolargl.Line`
color
Sets the line color.
dash
Sets the style of the lines.
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | huggingface__transformers | src/transformers/cache_utils.py | {
"start": 529,
"end": 2642
} | class ____(ABC):
"""Base, abstract class for a single layer's cache."""
is_compileable = False
def __init__(self):
self.keys: Optional[torch.Tensor] = None
self.values: Optional[torch.Tensor] = None
self.is_initialized = False
def __repr__(self):
return f"{self.__class__.__name__}"
@abstractmethod
def lazy_initialization(self, key_states: torch.Tensor): ...
@abstractmethod
def update(
self, key_states: torch.Tensor, value_states: torch.Tensor, cache_kwargs: Optional[dict[str, Any]] = None
) -> tuple[torch.Tensor, torch.Tensor]: ...
@abstractmethod
def get_mask_sizes(self, cache_position: torch.Tensor) -> tuple[int, int]: ...
@abstractmethod
def get_seq_length(self) -> int: ...
@abstractmethod
def get_max_cache_shape(self) -> int: ...
def offload(self):
"""Offload this layer's data to CPU device."""
if self.is_initialized:
self.keys = self.keys.to("cpu", non_blocking=True)
self.values = self.values.to("cpu", non_blocking=True)
def prefetch(self):
"""In case of layer offloading, this allows to move the data back to the layer's device ahead of time."""
if self.is_initialized and self.keys.device != self.device:
self.keys = self.keys.to(self.device, non_blocking=True)
self.values = self.values.to(self.device, non_blocking=True)
def reset(self) -> None:
"""Resets the cache values while preserving the objects"""
if self.is_initialized:
self.keys.zero_()
self.values.zero_()
# This attribute is set on several Layers
if hasattr(self, "cumulative_length"):
self.cumulative_length = 0
def reorder_cache(self, beam_idx: torch.LongTensor) -> None:
"""Reorders this layer's cache for beam search."""
if self.get_seq_length() > 0:
self.keys = self.keys.index_select(0, beam_idx.to(self.keys.device))
self.values = self.values.index_select(0, beam_idx.to(self.values.device))
| CacheLayerMixin |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/s3/sensor.py | {
"start": 206,
"end": 3442
} | class ____(Exception):
pass
def get_objects(
bucket: str,
prefix: str = "",
since_key: Optional[str] = None,
since_last_modified: Optional[datetime] = None,
client=None,
) -> list[ObjectTypeDef]:
"""Retrieves a list of object keys in S3 for a given `bucket`, `prefix`, and filter option.
Args:
bucket (str): s3 bucket
prefix (str): s3 object prefix
since_key (Optional[str]): retrieve objects modified after the last modified timestamp of this key
since_last_modified (Optional[datetime]): retrieve objects after this timestamp (non-inclusive)
client (Optional[boto3.Client]): s3 client
Returns:
List of object keys in S3.
"""
check.str_param(bucket, "bucket")
check.str_param(prefix, "prefix")
check.opt_str_param(since_key, "since_key")
check.opt_inst_param(since_last_modified, "since_last_modified", datetime)
if not client:
client = boto3.client("s3")
if not client:
raise ClientException("Failed to initialize s3 client")
paginator = client.get_paginator("list_objects_v2")
page_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, Delimiter="")
objects: list[ObjectTypeDef] = []
for page in page_iterator:
contents = page.get("Contents", [])
objects.extend([cast("ObjectTypeDef", obj) for obj in contents])
if since_key and not any(obj.get("Key") == since_key for obj in objects):
raise Exception("Provided `since_key` is not present in list of objects")
sorted_objects = [obj for obj in sorted(objects, key=lambda x: x.get("LastModified"))]
if since_key:
for idx, obj in enumerate(sorted_objects):
if obj.get("Key") == since_key:
return sorted_objects[idx + 1 :]
if since_last_modified:
for idx, obj in enumerate(sorted_objects):
if obj.get("LastModified") > since_last_modified:
return sorted_objects[idx:]
# If no new files are found, return an empty list.
return []
return sorted_objects
@deprecated(breaking_version="2.0", additional_warn_text="Use get_objects instead.")
def get_s3_keys(
bucket: str,
prefix: str = "",
since_key: Optional[str] = None,
s3_session: Optional[Any] = None,
) -> list[str]:
"""Retrieves a list of object keys in S3 for a given `bucket`, `prefix`, and filter option.
Note: when using the `since_key` it is possible to miss records if that key has been modified,
as sorting is done by the `LastModified` property of the S3 object. For more information, see
the following GitHub issue:
https://github.com/dagster-io/dagster/issues/22892
Args:
bucket (str): s3 bucket
prefix (str): s3 object prefix
since_key (Optional[str]): retrieve objects after the modified date of this key
since_last_modified (Optional[str]): retrieve objects after this timestamp
s3_session (Optional[boto3.Client]): s3 client
Returns:
List of object keys in S3.
"""
objects = get_objects(bucket=bucket, prefix=prefix, since_key=since_key, client=s3_session)
return [obj.get("Key") for obj in objects]
| ClientException |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 526,
"end": 763
} | class ____(str, Enum):
"""
The version of the MEV-share API to use.
"""
BETA1 = "beta-1"
"""
The beta-1 version of the API.
"""
V0_1 = "v0.1"
"""
The 0.1 version of the API.
"""
| ProtocolVersion |
python | neetcode-gh__leetcode | python/2215-find-the-difference-of-two-arrays.py | {
"start": 56,
"end": 822
} | class ____:
def findDifference(self, nums1: List[int], nums2: List[int]) -> List[List[int]]:
nums1 = set(nums1)
nums2 = set(nums2)
table = {}
for _, val in enumerate(nums2):
table[val] = 1
unik1 = []
unik2 = []
for i in nums1:
if i in table:
table[i] += 1
else:
unik1.append(i)
for key, val in table.items():
if val == 1:
unik2.append(key)
return [unik1, unik2]
# Time Complexity: O(m + n), we check each element of nums1Set and nums2Set
# Space Complexity: O(m + n), where m and n are length sets in worst case.
from typing import List # ignore this, just for typing
| Solution |
python | pytest-dev__pytest-django | tests/test_django_configurations.py | {
"start": 271,
"end": 4456
} | class ____(Configuration):
# At least one database must be configured
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
},
}
SECRET_KEY = 'foobar'
"""
def test_dc_env(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "tpkg.settings_env")
monkeypatch.setenv("DJANGO_CONFIGURATION", "MySettings")
pkg = pytester.mkpydir("tpkg")
settings = pkg.joinpath("settings_env.py")
settings.write_text(BARE_SETTINGS)
pytester.makepyfile(
"""
import os
def test_settings():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"django: version: *, settings: tpkg.settings_env (from env), "
"configuration: MySettings (from env)",
"* 1 passed*",
]
)
assert result.ret == 0
def test_dc_env_overrides_ini(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "tpkg.settings_env")
monkeypatch.setenv("DJANGO_CONFIGURATION", "MySettings")
pytester.makeini(
"""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
"""
)
pkg = pytester.mkpydir("tpkg")
settings = pkg.joinpath("settings_env.py")
settings.write_text(BARE_SETTINGS)
pytester.makepyfile(
"""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_env'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"django: version: *, settings: tpkg.settings_env (from env), "
"configuration: MySettings (from env)",
"* 1 passed*",
]
)
assert result.ret == 0
def test_dc_ini(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv("DJANGO_SETTINGS_MODULE")
pytester.makeini(
"""
[pytest]
DJANGO_SETTINGS_MODULE = tpkg.settings_ini
DJANGO_CONFIGURATION = MySettings
"""
)
pkg = pytester.mkpydir("tpkg")
settings = pkg.joinpath("settings_ini.py")
settings.write_text(BARE_SETTINGS)
pytester.makepyfile(
"""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_ini'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = pytester.runpytest_subprocess()
result.stdout.fnmatch_lines(
[
"django: version: *, settings: tpkg.settings_ini (from ini), "
"configuration: MySettings (from ini)",
"* 1 passed*",
]
)
assert result.ret == 0
def test_dc_option(pytester: pytest.Pytester, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("DJANGO_SETTINGS_MODULE", "DO_NOT_USE_env")
monkeypatch.setenv("DJANGO_CONFIGURATION", "DO_NOT_USE_env")
pytester.makeini(
"""
[pytest]
DJANGO_SETTINGS_MODULE = DO_NOT_USE_ini
DJANGO_CONFIGURATION = DO_NOT_USE_ini
"""
)
pkg = pytester.mkpydir("tpkg")
settings = pkg.joinpath("settings_opt.py")
settings.write_text(BARE_SETTINGS)
pytester.makepyfile(
"""
import os
def test_ds():
assert os.environ['DJANGO_SETTINGS_MODULE'] == 'tpkg.settings_opt'
assert os.environ['DJANGO_CONFIGURATION'] == 'MySettings'
"""
)
result = pytester.runpytest_subprocess("--ds=tpkg.settings_opt", "--dc=MySettings")
result.stdout.fnmatch_lines(
[
"django: version: *, settings: tpkg.settings_opt (from option),"
" configuration: MySettings (from option)",
"* 1 passed*",
]
)
assert result.ret == 0
| MySettings |
python | plotly__plotly.py | plotly/graph_objs/streamtube/_hoverlabel.py | {
"start": 233,
"end": 11262
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "streamtube"
_path_str = "streamtube.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.streamtube.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.streamtube.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.streamtube.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.streamtube.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.streamtube.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 14711,
"end": 15639
} | class ____(ProjectFormPrevalidateMixin, PrevalidatedForm):
def clean_prevalidation(self):
super().clean_prevalidation()
if settings.RTD_ALLOW_ORGANIZATIONS:
if self.user_is_nonowner_with_sso:
raise RichValidationError(
_(
"Projects cannot be manually configured when "
"single sign-on is enabled for your organization."
),
header=_("Organization single sign-on enabled"),
)
if not self.user_has_admin_permission:
raise RichValidationError(
_(
"You must be on a team with admin permissions "
"in order to add a new project to your organization."
),
header=_("Admin permission required"),
)
| ProjectManualForm |
python | django__django | tests/messages_tests/test_session.py | {
"start": 835,
"end": 2167
} | class ____(BaseTests, TestCase):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super().get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_no_session(self):
msg = (
"The session-based temporary message storage requires session "
"middleware to be installed, and come before the message "
"middleware in the MIDDLEWARE list."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.storage_class(HttpRequest())
def test_get(self):
storage = self.storage_class(self.get_request())
example_messages = ["test", "me"]
set_session_data(storage, example_messages)
self.assertEqual(list(storage), example_messages)
def test_safedata(self):
"""
A message containing SafeData keeps its safe status when retrieved from
the message storage.
"""
storage = self.get_storage()
message = Message(constants.DEBUG, mark_safe("<b>Hello Django!</b>"))
set_session_data(storage, [message])
self.assertIsInstance(list(storage)[0].message, SafeData)
| SessionTests |
python | jazzband__django-oauth-toolkit | tests/db_router.py | {
"start": 1041,
"end": 1644
} | class ____:
def db_for_read(self, model, **hints):
if model._meta.app_label in apps_in_beta:
return "beta"
return None
def db_for_write(self, model, **hints):
if model._meta.app_label in apps_in_beta:
return "beta"
return None
def allow_relation(self, obj1, obj2, **hints):
if obj1._state.db == "beta" and obj2._state.db == "beta":
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
if app_label in apps_in_beta:
return db == "beta"
| BetaRouter |
python | sqlalchemy__sqlalchemy | test/ext/asyncio/test_engine.py | {
"start": 2063,
"end": 7277
} | class ____:
@config.fixture(
params=[
(rollback, run_second_execute, begin_nested)
for rollback in (True, False)
for run_second_execute in (True, False)
for begin_nested in (True, False)
]
)
def async_trans_ctx_manager_fixture(self, request, metadata):
rollback, run_second_execute, begin_nested = request.param
t = Table("test", metadata, Column("data", Integer))
eng = getattr(self, "bind", None) or config.db
t.create(eng)
async def run_test(subject, trans_on_subject, execute_on_subject):
async with subject.begin() as trans:
if begin_nested:
if not config.requirements.savepoints.enabled:
config.skip_test("savepoints not enabled")
if execute_on_subject:
nested_trans = subject.begin_nested()
else:
nested_trans = trans.begin_nested()
async with nested_trans:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
# for nested trans, we always commit/rollback on the
# "nested trans" object itself.
# only Session(future=False) will affect savepoint
# transaction for session.commit/rollback
if rollback:
await nested_trans.rollback()
else:
await nested_trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction "
"inside context manager. Please complete the "
"context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(
t.insert(), {"data": 12}
)
else:
await trans.execute(
t.insert(), {"data": 12}
)
# outside the nested trans block, but still inside the
# transaction block, we can run SQL, and it will be
# committed
if execute_on_subject:
await subject.execute(t.insert(), {"data": 14})
else:
await trans.execute(t.insert(), {"data": 14})
else:
if execute_on_subject:
await subject.execute(t.insert(), {"data": 10})
else:
await trans.execute(t.insert(), {"data": 10})
if trans_on_subject:
if rollback:
await subject.rollback()
else:
await subject.commit()
else:
if rollback:
await trans.rollback()
else:
await trans.commit()
if run_second_execute:
with assertions.expect_raises_message(
exc.InvalidRequestError,
"Can't operate on closed transaction inside "
"context "
"manager. Please complete the context manager "
"before emitting further commands.",
):
if execute_on_subject:
await subject.execute(t.insert(), {"data": 12})
else:
await trans.execute(t.insert(), {"data": 12})
expected_committed = 0
if begin_nested:
# begin_nested variant, we inserted a row after the nested
# block
expected_committed += 1
if not rollback:
# not rollback variant, our row inserted in the target
# block itself would be committed
expected_committed += 1
if execute_on_subject:
eq_(
await subject.scalar(select(func.count()).select_from(t)),
expected_committed,
)
else:
with subject.connect() as conn:
eq_(
await conn.scalar(select(func.count()).select_from(t)),
expected_committed,
)
return run_test
| AsyncFixture |
python | plotly__plotly.py | plotly/graph_objs/bar/_selected.py | {
"start": 233,
"end": 3254
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar"
_path_str = "bar.selected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.bar.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.bar.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.bar.selected.Marker`
instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.bar.selected.Textfont`
instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.bar.Selected`
marker
:class:`plotly.graph_objects.bar.selected.Marker`
instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.bar.selected.Textfont`
instance or dict with compatible properties
Returns
-------
Selected
"""
super().__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.Selected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Selected |
python | cython__cython | tests/run/typing_module.py | {
"start": 740,
"end": 1066
} | class ____:
"""
>>> TestClassVar.cls
5
>>> TestClassVar.regular # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
AttributeError:
"""
regular: int
cls: typing.ClassVar[int] = 5 # this is a little redundant really because the assignment ensures it
| TestClassVar |
python | automl__auto-sklearn | test/test_pipeline/components/regression/test_decision_tree.py | {
"start": 157,
"end": 735
} | class ____(BaseRegressionComponentTest):
__test__ = True
res = dict()
res["default_boston"] = 0.35616796434879905
res["default_boston_iterative"] = None
res["default_boston_sparse"] = 0.18031669797027394
res["default_boston_iterative_sparse"] = None
res["default_diabetes"] = 0.1564592449511697
res["default_diabetes_iterative"] = None
res["default_diabetes_sparse"] = -0.020818312539637507
res["default_diabetes_iterative_sparse"] = None
sk_mod = sklearn.tree.DecisionTreeRegressor
module = DecisionTree
| DecisionTreeComponentTest |
python | pytorch__pytorch | torchgen/api/types/signatures.py | {
"start": 438,
"end": 4659
} | class ____:
"""
A CppSignature represents a single overload in the C++ API. For
any given function schema, there may be multiple CppSignatures
corresponding to it, based on how we desugar to C++. See also
CppSignatureGroup.
"""
# The schema this signature is derived from
func: FunctionSchema
# Is this a C++ signature for a method, i.e. Tensor::my_op(...)?
method: bool
# Is this a faithful C++ signature (i.e. following the JIT schema) or a convenience API
# (i.e. with a potential TensorOptions argument and out arguments in the front)
faithful: bool
# Is this a symint C++ signature. For BC reasons, functions that take
# SymInts still present as int64_t in C++, and the SymInt variant is
# offered at a different overload name
#
# NB: If a function RETURNS a SymInt, this is ALWAYS false
symint: bool
# The set of C++ arguments which should not have defaults applied to them
cpp_no_default_args: set[str]
# Is this a fallback C++ binding? Fallback bindings are enabled by
# manual_cpp_binding: True and are alternate, non-public API that
# lets manual C++ binding implementers access the binding that would
# have been automatically generated
fallback_binding: bool = False
# Return the unpacked argument structure of this signature,
# discarding information about which arguments are semantically
# related to each other.
def arguments(self) -> Sequence[Binding]:
return cpp.arguments(
self.func.arguments,
faithful=self.faithful,
symint=self.symint,
method=self.method,
cpp_no_default_args=self.cpp_no_default_args,
)
def name(self, *, suppress_symint_suffix: bool = False) -> str:
n = cpp.name(
self.func,
faithful_name_for_out_overloads=self.faithful,
symint_overload=False if suppress_symint_suffix else self.symint,
)
if self.fallback_binding:
n = f"__dispatch_{n}"
return n
# Render the C++ declaration for this signature
def decl(
self,
*,
name: str | None = None,
prefix: str = "",
is_redispatching_fn: bool = False,
suppress_symint_suffix: bool = False,
) -> str:
returns_type = cpp.returns_type(
self.func.returns, symint=self.symint
).cpp_type()
cpp_args = [a.decl() for a in self.arguments()]
if is_redispatching_fn:
cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
cpp_args_str = ", ".join(cpp_args)
if name is None:
name = prefix + self.name(suppress_symint_suffix=suppress_symint_suffix)
return f"{returns_type} {name}({cpp_args_str})"
# Render the C++ definition for this signature, not including
# the body (with curly braces)
def defn(
self,
*,
name: str | None = None,
prefix: str = "",
is_redispatching_fn: bool = False,
) -> str:
returns_type = cpp.returns_type(
self.func.returns, symint=self.symint
).cpp_type()
cpp_args = [a.defn() for a in self.arguments()]
if is_redispatching_fn:
cpp_args = ["c10::DispatchKeySet dispatchKeySet"] + cpp_args
cpp_args_str = ", ".join(cpp_args)
if name is None:
name = prefix + self.name()
return f"{returns_type} {name}({cpp_args_str})"
def ptr_type(self) -> str:
args_types_str = ", ".join(a.type for a in self.arguments())
return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_types_str})"
# Return the C++ function type, e.g., something like int(bool)
def type(self) -> str:
args_types_str = ", ".join(a.type for a in self.arguments())
return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} ({args_types_str})"
# Represents group of all CppSignatures associated with a
# FunctionSchema. Right now, that's the regular, user-visible
# signature, as well as a "faithful" signature which doesn't
# have grouping.
@dataclass(frozen=True)
| CppSignature |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 17006,
"end": 28138
} | class ____(_ThreadMode):
"""Type of default value returned by `_get_per_thread_mode()`.
Used when the thread-local stack is empty.
"""
def __init__(self):
_ThreadMode.__init__(self, _get_default_strategy(), None,
_get_default_replica_context())
def _get_per_thread_mode():
try:
return ops.get_default_graph()._distribution_strategy_stack[-1] # pylint: disable=protected-access
except (AttributeError, IndexError):
return _get_default_replica_mode()
_variable_sync_on_read_context = threading.local()
@tf_export("__internal__.distribute.variable_sync_on_read_context", v1=[])
@contextlib.contextmanager
def variable_sync_on_read_context():
"""A context that forces SyncOnReadVariable to aggregate upon reading.
This context is useful if one wants to read the aggregated value out of a
SyncOnReadVariable in replica context. By default the aggregation is turned
off per the definition of SyncOnReadVariable.
When reading a SyncOnReadVariable in cross-replica context, aggregation is
always turned on so there is no need for such context.
By reading a SyncOnReadVariable, we mean:
1. Convert the variable to a tensor using `convert_to_tensor`.
2. Calling `variable.value()` or `variable.read_value()`.
Example usage:
```
strategy = tf.distribute.MirroredStrategy(devices=["GPU:0", "GPU:1"])
with strategy.scope():
v = tf.Variable(1.0, synchronization=tf.VariableSynchronization.ON_READ,
aggregation=tf.VariableAggregation.SUM)
def replica_fn():
return v + 10.0
non_aggregated = strategy.run(replica_fn)
print(non_aggregated) # PerReplica: {0: 11.0, 1: 11.0}
def replica_fn():
with variable_sync_on_read_context():
return v + 10.0
aggregated = strategy.run(replica_fn)
print(aggregated) # PerReplica: {0: 12.0, 1: 12.0}
```
Yields:
Context manager for aggregating SyncOnReadVariable upon reading.
"""
try:
_variable_sync_on_read_context.entered = True
yield
finally:
_variable_sync_on_read_context.entered = False
def in_variable_sync_on_read_context():
try:
return _variable_sync_on_read_context.entered
except AttributeError:
return False
# ------------------------------------------------------------------------------
# Public API for accessing the current thread mode
@tf_export("distribute.get_replica_context")
def get_replica_context():
"""Returns the current `tf.distribute.ReplicaContext` or `None`.
Returns `None` if in a cross-replica context.
Note that execution:
1. starts in the default (single-replica) replica context (this function
will return the default `ReplicaContext` object);
2. switches to cross-replica context (in which case this will return
`None`) when entering a `with tf.distribute.Strategy.scope():` block;
3. switches to a (non-default) replica context inside `strategy.run(fn, ...)`;
4. if `fn` calls `get_replica_context().merge_call(merge_fn, ...)`, then
inside `merge_fn` you are back in the cross-replica context (and again
this function will return `None`).
Most `tf.distribute.Strategy` methods may only be executed in
a cross-replica context, in a replica context you should use the
API of the `tf.distribute.ReplicaContext` object returned by this
method instead.
```
assert tf.distribute.get_replica_context() is not None # default
with strategy.scope():
assert tf.distribute.get_replica_context() is None
def f():
replica_context = tf.distribute.get_replica_context() # for strategy
assert replica_context is not None
tf.print("Replica id: ", replica_context.replica_id_in_sync_group,
" of ", replica_context.num_replicas_in_sync)
strategy.run(f)
```
Returns:
The current `tf.distribute.ReplicaContext` object when in a replica context
scope, else `None`.
Within a particular block, exactly one of these two things will be true:
* `get_replica_context()` returns non-`None`, or
* `tf.distribute.is_cross_replica_context()` returns True.
"""
return _get_per_thread_mode().replica_context
def get_cross_replica_context():
"""Returns the current tf.distribute.Strategy if in a cross-replica context.
DEPRECATED: Please use `in_cross_replica_context()` and
`get_strategy()` instead.
Returns:
Returns the current `tf.distribute.Strategy` object in a cross-replica
context, or `None`.
Exactly one of `get_replica_context()` and `get_cross_replica_context()`
will return `None` in a particular block.
"""
return _get_per_thread_mode().cross_replica_context
@tf_export("distribute.in_cross_replica_context")
def in_cross_replica_context():
"""Returns `True` if in a cross-replica context.
See `tf.distribute.get_replica_context` for details.
```
assert not tf.distribute.in_cross_replica_context()
with strategy.scope():
assert tf.distribute.in_cross_replica_context()
def f():
assert not tf.distribute.in_cross_replica_context()
strategy.run(f)
```
Returns:
`True` if in a cross-replica context (`get_replica_context()` returns
`None`), or `False` if in a replica context (`get_replica_context()` returns
non-`None`).
"""
return _get_per_thread_mode().cross_replica_context is not None
@tf_export("distribute.get_strategy")
def get_strategy() -> "StrategyBase":
"""Returns the current `tf.distribute.Strategy` object.
Typically only used in a cross-replica context:
```
if tf.distribute.in_cross_replica_context():
strategy = tf.distribute.get_strategy()
...
```
Returns:
A `tf.distribute.Strategy` object. Inside a `with strategy.scope()` block,
it returns `strategy`, otherwise it returns the default (single-replica)
`tf.distribute.Strategy` object.
"""
return _get_per_thread_mode().strategy
@tf_export("distribute.has_strategy")
def has_strategy():
"""Return if there is a current non-default `tf.distribute.Strategy`.
```
assert not tf.distribute.has_strategy()
with strategy.scope():
assert tf.distribute.has_strategy()
```
Returns:
True if inside a `with strategy.scope():`.
"""
return get_strategy() is not _get_default_strategy()
def get_strategy_and_replica_context():
per_thread_mode = _get_per_thread_mode()
return (per_thread_mode.strategy, per_thread_mode.replica_context)
@tf_export("distribute.experimental_set_strategy")
def experimental_set_strategy(strategy):
"""Set a `tf.distribute.Strategy` as current without `with strategy.scope()`.
```
tf.distribute.experimental_set_strategy(strategy1)
f()
tf.distribute.experimental_set_strategy(strategy2)
g()
tf.distribute.experimental_set_strategy(None)
h()
```
is equivalent to:
```
with strategy1.scope():
f()
with strategy2.scope():
g()
h()
```
In general, you should use the `with strategy.scope():` API, but this
alternative may be convenient in notebooks where you would have to put
each cell in a `with strategy.scope():` block.
Note: This should only be called outside of any TensorFlow scope to
avoid improper nesting.
Args:
strategy: A `tf.distribute.Strategy` object or None.
Raises:
RuntimeError: If called inside a `with strategy.scope():`.
"""
old_scope = ops.get_default_graph()._global_distribute_strategy_scope # pylint: disable=protected-access
if old_scope is not None:
old_scope.__exit__(None, None, None)
ops.get_default_graph()._global_distribute_strategy_scope = None # pylint: disable=protected-access
if has_strategy():
raise RuntimeError(
"Must not be called inside a `tf.distribute.Strategy` scope.")
if strategy is not None:
new_scope = strategy.scope()
new_scope.__enter__()
ops.get_default_graph()._global_distribute_strategy_scope = new_scope # pylint: disable=protected-access
# ------------------------------------------------------------------------------
# Internal helpers.
@contextlib.contextmanager
def enter_or_assert_strategy(strategy):
if has_strategy():
_assert_strategy(strategy)
yield
else:
with strategy.scope():
yield
# ------------------------------------------------------------------------------
# Defaults that are used when no tf.distribute.Strategy is explicitly created.
# We create them lazily in a function so that we can workaround the circular
# dependency on distribute_lib. See lazy loader at the top of this file.
_defaults = {
"strategy": None,
"replica_context": None,
"replica_mode": None
}
# Note: These need to be different locks since _get_default_replica_context
# calls _get_default_strategy inside its lock, and them using the same lock
# can lead to deadlock.
_default_strategy_lock = threading.Lock()
_default_replica_context_lock = threading.Lock()
_default_replica_mode_lock = threading.Lock()
def _assert_strategy(strategy):
if not has_strategy():
raise RuntimeError('Need to be inside "with strategy.scope()" for %s' %
(strategy,))
current_strategy = get_strategy()
if current_strategy is not strategy:
raise RuntimeError(
"Mixing different tf.distribute.Strategy objects: %s is not %s" %
(current_strategy, strategy))
def _get_default_strategy():
if _defaults["strategy"] is None:
# Avoid race condition causing two defaults to be created
with _default_strategy_lock:
if _defaults["strategy"] is None:
# pylint: disable=protected-access
# Make sure distribute_lib module is loaded by accessing some member.
global _creating_default_strategy_singleton
_creating_default_strategy_singleton = True
if tf2.enabled():
_defaults["strategy"] = _DefaultDistributionStrategy()
else:
_defaults["strategy"] = (
_DefaultDistributionStrategyV1())
_creating_default_strategy_singleton = False
# pylint: enable=protected-access
return _defaults["strategy"]
def _get_default_replica_context():
if _defaults["replica_context"] is None:
# Avoid race condition causing two defaults to be created
with _default_replica_context_lock:
if _defaults["replica_context"] is None:
# pylint: disable=protected-access
_defaults["replica_context"] = _DefaultReplicaContext(
_get_default_strategy(), replica_id_in_sync_group=0)
# pylint: enable=protected-access
return _defaults["replica_context"]
def _get_default_replica_mode():
if _defaults["replica_mode"] is None:
# Avoid race condition causing two defaults to be created
with _default_replica_mode_lock:
if _defaults["replica_mode"] is None:
_defaults["replica_mode"] = _DefaultReplicaThreadMode()
return _defaults["replica_mode"]
# Aliases for compatibility with old names.
get_distribution_strategy = get_strategy
has_distribution_strategy = has_strategy
# ------------------------------------------------------------------------------
# Internal context managers used to implement the DistributionStrategy
# base class
| _DefaultReplicaThreadMode |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_authenticator_enroll.py | {
"start": 1098,
"end": 12929
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-authenticator-enroll"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.org = self.create_organization(owner=self.user, name="foo")
@mock.patch("sentry.auth.authenticators.TotpInterface.validate_otp", return_value=True)
def test_totp_can_enroll(self, validate_otp: mock.MagicMock) -> None:
# XXX: Pretend an unbound function exists.
validate_otp.__func__ = None
with mock.patch(
"sentry.auth.authenticators.base.generate_secret_key", return_value="Z" * 32
):
resp = self.get_success_response("me", "totp")
assert resp.data["secret"] == "Z" * 32
assert (
resp.data["qrcode"]
== "otpauth://totp/admin%40localhost?issuer=Sentry&secret=ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"
)
assert resp.data["form"]
assert resp.data["secret"]
# try to enroll
with self.tasks():
self.get_success_response(
"me", "totp", method="post", **{"secret": "secret12", "otp": "1234"}
)
assert validate_otp.call_count == 1
assert validate_otp.call_args == mock.call("1234")
interface = Authenticator.objects.get_interface(user=self.user, interface_id="totp")
assert isinstance(interface, TotpInterface)
assert interface.authenticator, "should have authenticator"
assert interface.secret == "secret12"
assert interface.config == {"secret": "secret12"}
# also enrolls in recovery codes
recovery = Authenticator.objects.get_interface(user=self.user, interface_id="recovery")
assert recovery.is_enrolled()
assert_security_email_sent("mfa-added")
# can rotate in place
self.get_success_response("me", "totp")
self.get_success_response(
"me", "totp", method="post", **{"secret": "secret56", "otp": "5678"}
)
assert validate_otp.call_args == mock.call("5678")
interface = Authenticator.objects.get_interface(user=self.user, interface_id="totp")
assert isinstance(interface, TotpInterface)
assert interface.secret == "secret56"
assert interface.config == {"secret": "secret56"}
@mock.patch("sentry.auth.authenticators.TotpInterface.validate_otp", return_value=True)
def test_totp_no_verified_primary_email(self, validate_otp: mock.MagicMock) -> None:
from urllib.parse import quote
user = self.create_user()
UserEmail.objects.filter(user=user, email=user.email).update(is_verified=False)
self.login_as(user)
# XXX: Pretend an unbound function exists.
validate_otp.__func__ = None
with mock.patch(
"sentry.auth.authenticators.base.generate_secret_key", return_value="Z" * 32
):
resp = self.get_success_response("me", "totp")
assert resp.data["secret"] == "Z" * 32
assert (
resp.data["qrcode"]
== f"otpauth://totp/{quote(user.email)}?issuer=Sentry&secret=ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ"
)
assert resp.data["form"]
assert resp.data["secret"]
# try to enroll
with self.tasks():
resp = self.get_error_response(
"me",
"totp",
method="post",
status_code=401,
**{"secret": "secret12", "otp": "1234"},
)
assert resp.data == {
"detail": {
"code": "primary-email-verification-required",
"message": "Primary email verification required.",
"extra": {"username": user.email},
}
}
@override_options({"totp.disallow-new-enrollment": True})
def test_totp_disallow_new_enrollment(self) -> None:
self.get_error_response(
"me",
"totp",
method="post",
**{"secret": "secret12", "otp": "1234"},
)
@mock.patch("sentry.auth.authenticators.TotpInterface.validate_otp", return_value=False)
def test_invalid_otp(self, validate_otp: mock.MagicMock) -> None:
# XXX: Pretend an unbound function exists.
validate_otp.__func__ = None
# try to enroll
with self.tasks():
self.get_error_response(
"me",
"totp",
method="post",
status_code=400,
**{"secret": "secret12", "otp": "1234"},
)
assert validate_otp.call_count == 1
assert validate_otp.call_args == mock.call("1234")
assert len(mail.outbox) == 0
@mock.patch("sentry.auth.authenticators.SmsInterface.validate_otp", return_value=True)
@mock.patch("sentry.auth.authenticators.SmsInterface.send_text", return_value=True)
@override_options({"sms.twilio-account": "twilio-account"})
def test_sms_can_enroll(self, send_text: mock.MagicMock, validate_otp: mock.MagicMock) -> None:
# XXX: Pretend an unbound function exists.
validate_otp.__func__ = None
resp = self.get_success_response("me", "sms")
assert resp.data["form"]
assert resp.data["secret"]
self.get_success_response(
"me", "sms", method="post", **{"secret": "secret12", "phone": "1231234"}
)
assert send_text.call_count == 1
assert validate_otp.call_count == 0
with self.tasks():
self.get_success_response(
"me",
"sms",
method="post",
**{"secret": "secret12", "phone": "1231234", "otp": "123123"},
)
assert validate_otp.call_count == 1
assert validate_otp.call_args == mock.call("123123")
interface = Authenticator.objects.get_interface(user=self.user, interface_id="sms")
assert isinstance(interface, SmsInterface)
assert interface.phone_number == "1231234"
assert_security_email_sent("mfa-added")
@override_options(
{"sms.twilio-account": "test-twilio-account", "sms.disallow-new-enrollment": True}
)
def test_sms_disallow_new_enrollment(self) -> None:
form_data = {"phone": "+12345678901"}
self.get_error_response("me", "sms", method="post", status_code=403, **form_data)
@override_options({"sms.twilio-account": "twilio-account"})
def test_sms_invalid_otp(self) -> None:
# OTP as None
self.get_error_response(
"me",
"sms",
method="post",
status_code=400,
**{"secret": "secret12", "phone": "1231234", "otp": None},
)
# OTP as empty string
self.get_error_response(
"me",
"sms",
method="post",
status_code=400,
**{"secret": "secret12", "phone": "1231234", "otp": ""},
)
@override_options({"sms.twilio-account": "twilio-account"})
def test_sms_no_verified_email(self) -> None:
user = self.create_user()
UserEmail.objects.filter(user=user, email=user.email).update(is_verified=False)
self.login_as(user)
resp = self.get_error_response(
"me",
"sms",
method="post",
status_code=401,
**{"secret": "secret12", "phone": "1231234", "otp": None},
)
assert resp.data == {
"detail": {
"code": "primary-email-verification-required",
"message": "Primary email verification required.",
"extra": {"username": user.email},
}
}
@mock.patch(
"sentry.users.api.endpoints.user_authenticator_enroll.ratelimiter.backend.is_limited",
return_value=True,
)
@mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll")
@override_options({"system.url-prefix": "https://testserver"})
def test_rate_limited(self, try_enroll: mock.MagicMock, is_limited: mock.MagicMock) -> None:
self.get_success_response("me", "u2f")
self.get_error_response(
"me",
"u2f",
method="post",
status_code=429,
**{
"deviceName": "device name",
"challenge": "challenge",
"response": "response",
},
)
assert try_enroll.call_count == 0
@mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True)
@override_options({"system.url-prefix": "https://testserver"})
def test_u2f_can_enroll(self, try_enroll: mock.MagicMock) -> None:
resp = self.get_success_response("me", "u2f")
assert resp.data["form"]
assert "secret" not in resp.data
assert "qrcode" not in resp.data
assert resp.data["challenge"]
with self.tasks():
self.get_success_response(
"me",
"u2f",
method="post",
**{
"deviceName": "device name",
"challenge": "challenge",
"response": "response",
},
)
assert try_enroll.call_count == 1
mock_challenge = try_enroll.call_args.args[3]["challenge"]
assert try_enroll.call_args == mock.call(
"challenge",
"response",
"device name",
{
"challenge": mock_challenge,
"user_verification": "discouraged",
},
)
assert_security_email_sent("mfa-added")
@override_options({"u2f.disallow-new-enrollment": True})
def test_u2f_disallow_new_enrollment(self) -> None:
self.get_error_response(
"me",
"u2f",
method="post",
**{
"deviceName": "device name",
"challenge": "challenge",
"response": "response",
},
)
@mock.patch("sentry.auth.authenticators.U2fInterface.try_enroll", return_value=True)
@override_options({"system.url-prefix": "https://testserver"})
def test_u2f_superuser_and_staff_cannot_enroll_other_user(
self, try_enroll: mock.MagicMock
) -> None:
elevated_user = self.create_user(is_superuser=True, is_staff=True)
self.login_as(user=elevated_user, superuser=True, staff=True)
resp = self.get_success_response(self.user.id, "u2f")
assert resp.data["form"]
assert "secret" not in resp.data
assert "qrcode" not in resp.data
assert resp.data["challenge"]
# check that the U2F device was enrolled for elevated_user
# and not self.user passed in the request body
assert not Authenticator.objects.filter(user=self.user).exists()
assert Authenticator.objects.get_interface(user=elevated_user, interface_id="u2f")
with self.tasks():
self.get_success_response(
self.user.id,
"u2f",
method="post",
**{
"deviceName": "device name",
"challenge": "challenge",
"response": "response",
},
)
assert try_enroll.call_count == 1
mock_challenge = try_enroll.call_args.args[3]["challenge"]
assert try_enroll.call_args == mock.call(
"challenge",
"response",
"device name",
{
"challenge": mock_challenge,
"user_verification": "discouraged",
},
)
assert_security_email_sent("mfa-added")
@control_silo_test(include_monolith_run=True)
| UserAuthenticatorEnrollTest |
python | aio-libs__aiohttp | aiohttp/web_urldispatcher.py | {
"start": 1754,
"end": 2046
} | class ____(TypedDict, total=False):
path: str
formatter: str
pattern: Pattern[str]
directory: Path
prefix: str
routes: Mapping[str, "AbstractRoute"]
app: "Application"
domain: str
rule: "AbstractRuleMatching"
http_exception: HTTPException
| _InfoDict |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/composition.py | {
"start": 4670,
"end": 4739
} | class ____(NamedTuple):
input_def: InputDefinition
| InputMappingNode |
python | ApeWorX__ape | tests/functional/test_dependencies.py | {
"start": 19883,
"end": 26333
} | class ____:
@pytest.fixture
def mock_client(self, mocker):
return mocker.MagicMock()
def test_ref_or_version_is_required(self):
expected = r"GitHub dependency must have either ref or version specified"
with pytest.raises(ValidationError, match=expected):
_ = GithubDependency(name="foo", github="asdf")
def test_name_from_github(self):
"""
When not given a name, it is derived from the github suffix.
"""
dependency = GithubDependency( # type: ignore
github="ApeWorX/ApeNotAThing", version="3.0.0"
)
assert dependency.name == "apenotathing"
def test_fetch_given_version(self, mock_client):
dependency = GithubDependency(
github="ApeWorX/ApeNotAThing", version="3.0.0", name="apetestdep"
)
dependency._github_client = mock_client
with create_tempdir() as path:
dependency.fetch(path)
mock_client.download_package.assert_called_once_with(
"ApeWorX", "ApeNotAThing", "3.0.0", path
)
def test_fetch_missing_v_prefix(self, mock_client):
"""
Show if the version expects a v-prefix but you don't
provide one that it still works.
"""
dependency = GithubDependency(
github="ApeWorX/ApeNotAThing", version="3.0.0", name="apetestdep"
)
dependency._github_client = mock_client
# Simulate only v-prefix succeeding from GH
def only_want_v(n0, n1, vers, pth):
if not vers.startswith("v"):
raise ValueError("nope")
mock_client.download_package.side_effect = only_want_v
with create_tempdir() as path:
dependency.fetch(path)
calls = mock_client.download_package.call_args_list
assert mock_client.download_package.call_count == 2
# Show it first tried w/o v
assert calls[0][0] == ("ApeWorX", "ApeNotAThing", "3.0.0", path)
# The second call has the v!
assert calls[1][0] == ("ApeWorX", "ApeNotAThing", "v3.0.0", path)
def test_fetch_unneeded_v_prefix(self, mock_client):
"""
Show if the version expects not to have a v-prefix but you
provide one that it still works.
"""
dependency = GithubDependency(
github="ApeWorX/ApeNotAThing", version="v3.0.0", name="apetestdep"
)
dependency._github_client = mock_client
# Simulate only non-v-prefix succeeding from GH
def only_want_non_v(n0, n1, vers, pth):
if vers.startswith("v"):
raise ValueError("nope")
mock_client.download_package.side_effect = only_want_non_v
with create_tempdir() as path:
dependency.fetch(path)
calls = mock_client.download_package.call_args_list
assert mock_client.download_package.call_count == 2
# Show it first tried with the v
assert calls[0][0] == ("ApeWorX", "ApeNotAThing", "v3.0.0", path)
# The second call does not have the v!
assert calls[1][0] == ("ApeWorX", "ApeNotAThing", "3.0.0", path)
def test_fetch_given_version_when_expects_reference(self, mock_client):
"""
Show that if a user configures `version:`, but version fails, it
tries `ref:` instead as a backup.
"""
dependency = GithubDependency(
github="ApeWorX/ApeNotAThing", version="v3.0.0", name="apetestdep"
)
dependency._github_client = mock_client
# Simulate no versions ever found on GH Api.
mock_client.download_package.side_effect = ValueError("nope")
# Simulate only the non-v prefix ref working (for a fuller flow)
def needs_non_v_prefix_ref(n0, n1, dst_path, branch, scheme):
# NOTE: This assertion is very important!
# We must only give it non-existing directories.
assert not dst_path.is_dir()
if branch.startswith("v"):
raise ValueError("nope")
mock_client.clone_repo.side_effect = needs_non_v_prefix_ref
with create_tempdir() as path:
dependency.fetch(path)
calls = mock_client.clone_repo.call_args_list
assert mock_client.clone_repo.call_count == 2
# Show it first tried with the v
assert calls[0][0] == ("ApeWorX", "ApeNotAThing", path)
assert calls[0][1] == {"branch": "v3.0.0", "scheme": "https"}
# The second call does not have the v!
assert calls[1][0] == ("ApeWorX", "ApeNotAThing", path)
assert calls[1][1] == {"branch": "3.0.0", "scheme": "https"}
def test_fetch_ref(self, mock_client):
"""
When specifying ref, it does not try version API at all.
"""
dependency = GithubDependency(github="ApeWorX/ApeNotAThing", ref="3.0.0", name="apetestdep")
dependency._github_client = mock_client
with create_tempdir() as path:
dependency.fetch(path)
assert mock_client.download_package.call_count == 0
mock_client.clone_repo.assert_called_once_with(
"ApeWorX",
"ApeNotAThing",
path,
branch="3.0.0",
scheme="https",
)
def test_fetch_existing_destination_with_read_only_files(self, mock_client):
"""
Show it handles when the destination contains read-only files already
"""
dependency = GithubDependency(github="ApeWorX/ApeNotAThing", ref="3.0.0", name="apetestdep")
dependency._github_client = mock_client
with create_tempdir() as path:
readonly_file = path / "readme.txt"
readonly_file.write_text("readme!")
# NOTE: This only makes a difference on Windows. If using a UNIX system,
# rmtree still deletes readonly files regardless. Windows is more restrictive.
os.chmod(readonly_file, 0o444) # Read-only permissions
dependency.fetch(path)
assert not readonly_file.is_file()
def test_ssh(self, mock_client):
dependency = GithubDependency(
github="ApeWorX/ApeNotAThing", ref="3.0.0", name="apetestdep", scheme="ssh"
)
dependency._github_client = mock_client
with create_tempdir() as path:
dependency.fetch(path)
assert mock_client.clone_repo.call_args[-1]["scheme"] == "ssh"
| TestGitHubDependency |
python | PrefectHQ__prefect | src/prefect/server/api/server.py | {
"start": 4841,
"end": 5331
} | class ____(StaticFiles):
"""
Implementation of `StaticFiles` for serving single page applications.
Adds `get_response` handling to ensure that when a resource isn't found the
application still returns the index.
"""
async def get_response(self, path: str, scope: Any) -> Response:
try:
return await super().get_response(path, scope)
except HTTPException:
return await super().get_response("./index.html", scope)
| SPAStaticFiles |
python | mahmoud__boltons | boltons/dictutils.py | {
"start": 30935,
"end": 35480
} | class ____:
"""
a dict-like entity that represents a many-to-many relationship
between two groups of objects
behaves like a dict-of-tuples; also has .inv which is kept
up to date which is a dict-of-tuples in the other direction
also, can be used as a directed graph among hashable python objects
"""
def __init__(self, items=None):
self.data = {}
if type(items) is tuple and items and items[0] is _PAIRING:
self.inv = items[1]
else:
self.inv = self.__class__((_PAIRING, self))
if items:
self.update(items)
return
def get(self, key, default=frozenset()):
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
return frozenset(self.data[key])
def __setitem__(self, key, vals):
vals = set(vals)
if key in self:
to_remove = self.data[key] - vals
vals -= self.data[key]
for val in to_remove:
self.remove(key, val)
for val in vals:
self.add(key, val)
def __delitem__(self, key):
for val in self.data.pop(key):
self.inv.data[val].remove(key)
if not self.inv.data[val]:
del self.inv.data[val]
def update(self, iterable):
"""given an iterable of (key, val), add them all"""
if type(iterable) is type(self):
other = iterable
for k in other.data:
if k not in self.data:
self.data[k] = other.data[k]
else:
self.data[k].update(other.data[k])
for k in other.inv.data:
if k not in self.inv.data:
self.inv.data[k] = other.inv.data[k]
else:
self.inv.data[k].update(other.inv.data[k])
elif callable(getattr(iterable, 'keys', None)):
for k in iterable.keys():
self.add(k, iterable[k])
else:
for key, val in iterable:
self.add(key, val)
return
def add(self, key, val):
if key not in self.data:
self.data[key] = set()
self.data[key].add(val)
if val not in self.inv.data:
self.inv.data[val] = set()
self.inv.data[val].add(key)
def remove(self, key, val):
self.data[key].remove(val)
if not self.data[key]:
del self.data[key]
self.inv.data[val].remove(key)
if not self.inv.data[val]:
del self.inv.data[val]
def replace(self, key, newkey):
"""
replace instances of key by newkey
"""
if key not in self.data:
return
self.data[newkey] = fwdset = self.data.pop(key)
for val in fwdset:
revset = self.inv.data[val]
revset.remove(key)
revset.add(newkey)
def iteritems(self):
for key in self.data:
for val in self.data[key]:
yield key, val
def keys(self):
return self.data.keys()
def __contains__(self, key):
return key in self.data
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return self.data.__len__()
def __eq__(self, other):
return type(self) == type(other) and self.data == other.data
def __repr__(self):
cn = self.__class__.__name__
return f'{cn}({list(self.iteritems())!r})'
def subdict(d, keep=None, drop=None):
"""Compute the "subdictionary" of a dict, *d*.
A subdict is to a dict what a subset is a to set. If *A* is a
subdict of *B*, that means that all keys of *A* are present in
*B*.
Returns a new dict with any keys in *drop* removed, and any keys
in *keep* still present, provided they were in the original
dict. *keep* defaults to all keys, *drop* defaults to empty, so
without one of these arguments, calling this function is
equivalent to calling ``dict()``.
>>> from pprint import pprint as pp
>>> pp(subdict({'a': 1, 'b': 2}))
{'a': 1, 'b': 2}
>>> subdict({'a': 1, 'b': 2, 'c': 3}, drop=['b', 'c'])
{'a': 1}
>>> pp(subdict({'a': 1, 'b': 2, 'c': 3}, keep=['a', 'c']))
{'a': 1, 'c': 3}
"""
if keep is None:
keep = d.keys()
if drop is None:
drop = []
keys = set(keep) - set(drop)
return type(d)([(k, v) for k, v in d.items() if k in keys])
| ManyToMany |
python | kamyu104__LeetCode-Solutions | Python/distribute-candies.py | {
"start": 29,
"end": 252
} | class ____(object):
def distributeCandies(self, candies):
"""
:type candies: List[int]
:rtype: int
"""
lookup = set(candies)
return min(len(lookup), len(candies)/2)
| Solution |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 2670,
"end": 2933
} | class ____(str, _Action, Enum):
CREATE = "create_aliases"
READ = "read_aliases"
UPDATE = "update_aliases"
DELETE = "delete_aliases"
@staticmethod
def values() -> List[str]:
return [action.value for action in AliasAction]
| AliasAction |
python | django__django | django/template/loaders/cached.py | {
"start": 313,
"end": 3716
} | class ____(BaseLoader):
def __init__(self, engine, loaders):
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super().__init__(engine)
def get_dirs(self):
for loader in self.loaders:
if hasattr(loader, "get_dirs"):
yield from loader.get_dirs()
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, skip=None):
"""
Perform the caching that gives this loader its name. Often many of the
templates attempted will be missing, so memory use is of concern here.
To keep it in check, caching behavior is a little complicated when a
template is not found. See ticket #26306 for more details.
With template debugging disabled, cache the TemplateDoesNotExist class
for every missing template and raise a new instance of it after
fetching it from the cache.
With template debugging enabled, a unique TemplateDoesNotExist object
is cached for each missing template to preserve debug data. When
raising an exception, Python sets __traceback__, __context__, and
__cause__ attributes on it. Those attributes can contain references to
all sorts of objects up the call chain and caching them creates a
memory leak. Thus, unraised copies of the exceptions are cached and
copies of those copies are raised after they're fetched from the cache.
"""
key = self.cache_key(template_name, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, type) and issubclass(cached, TemplateDoesNotExist):
raise cached(template_name)
elif isinstance(cached, TemplateDoesNotExist):
raise copy_exception(cached)
return cached
try:
template = super().get_template(template_name, skip)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = (
copy_exception(e) if self.engine.debug else TemplateDoesNotExist
)
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name):
for loader in self.loaders:
yield from loader.get_template_sources(template_name)
def cache_key(self, template_name, skip=None):
"""
Generate a cache key for the template name and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
skip_prefix = ""
if skip:
matching = [
origin.name for origin in skip if origin.template_name == template_name
]
if matching:
skip_prefix = self.generate_hash(matching)
return "-".join(s for s in (str(template_name), skip_prefix) if s)
def generate_hash(self, values):
return hashlib.sha1("|".join(values).encode()).hexdigest()
def reset(self):
"Empty the template cache."
self.get_template_cache.clear()
| Loader |
python | kamyu104__LeetCode-Solutions | Python/find-closest-person.py | {
"start": 36,
"end": 252
} | class ____(object):
def findClosest(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: int
"""
return range(3)[cmp(abs(y-z), abs(x-z))]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/apply-discount-to-prices.py | {
"start": 38,
"end": 917
} | class ____(object):
def discountPrices(self, sentence, discount):
"""
:type sentence: str
:type discount: int
:rtype: str
"""
result = []
i = 0
while i < len(sentence):
j = sentence.find(' ', i)
if j == -1: j = len(sentence)
if sentence[i] == '$' and j-(i+1) > 0 and all(sentence[k].isdigit() for k in xrange(i+1, j)):
cnt = reduce(lambda x, y: x*10+int(y), (sentence[k] for k in xrange(i+1, j)), 0)
result.append("${:d}.{:02d}".format(*divmod(cnt*(100-discount), 100)))
else:
for k in xrange(i, j):
result.append(sentence[k])
if j != len(sentence):
result.append(' ')
i = j+1
return "".join(result)
# Time: O(n)
# Space: O(n)
# string
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 8603,
"end": 8801
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("LOGIN", "REMOTE_CREATED_AT")
| EnterpriseServerUserAccountOrderField |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1530681,
"end": 1534290
} | class ____(sgqlc.types.Type, Node, Starrable):
"""A topic aggregates entities that are related to a subject."""
__schema__ = github_schema
__field_names__ = ("name", "related_topics", "repositories")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The topic's name."""
related_topics = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null("Topic"))),
graphql_name="relatedTopics",
args=sgqlc.types.ArgDict((("first", sgqlc.types.Arg(Int, graphql_name="first", default=3)),)),
)
"""A list of related topics, including aliases of this topic, sorted
with the most relevant first. Returns up to 10 Topics.
Arguments:
* `first` (`Int`): How many topics to return. (default: `3`)
"""
repositories = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="repositories",
args=sgqlc.types.ArgDict(
(
("privacy", sgqlc.types.Arg(RepositoryPrivacy, graphql_name="privacy", default=None)),
("order_by", sgqlc.types.Arg(RepositoryOrder, graphql_name="orderBy", default=None)),
("affiliations", sgqlc.types.Arg(sgqlc.types.list_of(RepositoryAffiliation), graphql_name="affiliations", default=None)),
(
"owner_affiliations",
sgqlc.types.Arg(
sgqlc.types.list_of(RepositoryAffiliation), graphql_name="ownerAffiliations", default=("OWNER", "COLLABORATOR")
),
),
("is_locked", sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("sponsorable_only", sgqlc.types.Arg(Boolean, graphql_name="sponsorableOnly", default=False)),
)
),
)
"""A list of repositories.
Arguments:
* `privacy` (`RepositoryPrivacy`): If non-null, filters
repositories according to privacy
* `order_by` (`RepositoryOrder`): Ordering options for
repositories returned from the connection
* `affiliations` (`[RepositoryAffiliation]`): Array of viewer's
affiliation options for repositories returned from the
connection. For example, OWNER will include only repositories
that the current viewer owns.
* `owner_affiliations` (`[RepositoryAffiliation]`): Array of
owner's affiliation options for repositories returned from the
connection. For example, OWNER will include only repositories
that the organization or user being viewed owns. (default:
`[OWNER, COLLABORATOR]`)
* `is_locked` (`Boolean`): If non-null, filters repositories
according to whether they have been locked
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `sponsorable_only` (`Boolean`): If true, only repositories whose
owner can be sponsored via GitHub Sponsors will be returned.
(default: `false`)
"""
| Topic |
python | ray-project__ray | rllib/models/action_dist.py | {
"start": 232,
"end": 3425
} | class ____:
"""The policy action distribution of an agent.
Attributes:
inputs: input vector to compute samples from.
model (ModelV2): reference to model producing the inputs.
"""
def __init__(self, inputs: List[TensorType], model: ModelV2):
"""Initializes an ActionDist object.
Args:
inputs: input vector to compute samples from.
model (ModelV2): reference to model producing the inputs. This
is mainly useful if you want to use model variables to compute
action outputs (i.e., for autoregressive action distributions,
see examples/autoregressive_action_dist.py).
"""
self.inputs = inputs
self.model = model
def sample(self) -> TensorType:
"""Draw a sample from the action distribution."""
raise NotImplementedError
def deterministic_sample(self) -> TensorType:
"""
Get the deterministic "sampling" output from the distribution.
This is usually the max likelihood output, i.e. mean for Normal, argmax
for Categorical, etc..
"""
raise NotImplementedError
def sampled_action_logp(self) -> TensorType:
"""Returns the log probability of the last sampled action."""
raise NotImplementedError
def logp(self, x: TensorType) -> TensorType:
"""The log-likelihood of the action distribution."""
raise NotImplementedError
def kl(self, other: "ActionDistribution") -> TensorType:
"""The KL-divergence between two action distributions."""
raise NotImplementedError
def entropy(self) -> TensorType:
"""The entropy of the action distribution."""
raise NotImplementedError
def multi_kl(self, other: "ActionDistribution") -> TensorType:
"""The KL-divergence between two action distributions.
This differs from kl() in that it can return an array for
MultiDiscrete. TODO(ekl) consider removing this.
"""
return self.kl(other)
def multi_entropy(self) -> TensorType:
"""The entropy of the action distribution.
This differs from entropy() in that it can return an array for
MultiDiscrete. TODO(ekl) consider removing this.
"""
return self.entropy()
@staticmethod
@OldAPIStack
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
"""Returns the required shape of an input parameter tensor for a
particular action space and an optional dict of distribution-specific
options.
Args:
action_space (gym.Space): The action space this distribution will
be used for, whose shape attributes will be used to determine
the required shape of the input parameter tensor.
model_config: Model's config dict (as defined in catalog.py)
Returns:
model_output_shape (int or np.ndarray of ints): size of the
required input vector (minus leading batch dimension).
"""
raise NotImplementedError
| ActionDistribution |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_sessions.py | {
"start": 79097,
"end": 98760
} | class ____(BaseMetricsTestCase, APITestCase):
def do_request(self, query, user=None, org=None):
self.login_as(user=user or self.user)
url = reverse(
"sentry-api-0-organization-sessions",
kwargs={"organization_id_or_slug": (org or self.organization).slug},
)
return self.client.get(url, query, format="json")
@freeze_time(MOCK_DATETIME)
def test_order_by_with_no_releases(self) -> None:
"""
Test that ensures if we have no releases in the preflight query when trying to order by
`release.timestamp`, we get no groups.
Essentially testing the empty preflight query filters branch.
"""
project_random = self.create_project()
for _ in range(0, 2):
self.store_session(make_session(project_random))
self.store_session(make_session(project_random, status="crashed"))
response = self.do_request(
{
"project": project_random.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["crash_free_rate(session)"],
"groupBy": ["release"],
"orderBy": "-release.timestamp",
"per_page": 3,
}
)
assert response.data["groups"] == []
def test_order_by_max_limit(self) -> None:
response = self.do_request(
{
"project": self.project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["crash_free_rate(session)"],
"groupBy": ["release"],
"orderBy": "-release.timestamp",
"per_page": 103,
}
)
assert response.data["detail"] == (
"This limit is too high for queries that requests a preflight query. "
"Please choose a limit below 100"
)
@freeze_time(MOCK_DATETIME)
def test_order_by(self) -> None:
"""
Test that ensures that we are able to get the crash_free_rate for the most 2 recent
releases when grouping by release
"""
# Step 1: Create 3 releases
release1b = self.create_release(version="1B")
release1c = self.create_release(version="1C")
release1d = self.create_release(version="1D")
# Step 2: Create crash free rate for each of those releases
# Release 1c -> 66.7% Crash free rate
for _ in range(0, 2):
self.store_session(make_session(self.project, release=release1c.version))
self.store_session(make_session(self.project, release=release1c.version, status="crashed"))
# Release 1b -> 33.3% Crash free rate
for _ in range(0, 2):
self.store_session(
make_session(self.project, release=release1b.version, status="crashed")
)
self.store_session(make_session(self.project, release=release1b.version))
# Create Sessions in each of these releases
# Release 1d -> 80% Crash free rate
for _ in range(0, 4):
self.store_session(make_session(self.project, release=release1d.version))
self.store_session(make_session(self.project, release=release1d.version, status="crashed"))
# Step 3: Make request
response = self.do_request(
{
"project": self.project.id, # project without users
"statsPeriod": "1d",
"interval": "1d",
"field": ["crash_free_rate(session)"],
"groupBy": ["release"],
"orderBy": "-release.timestamp",
"per_page": 3,
}
)
# Step 4: Validate Results
assert response.data["groups"] == [
{
"by": {"release": "1D"},
"totals": {"crash_free_rate(session)": 0.8},
"series": {"crash_free_rate(session)": [None, 0.8]},
},
{
"by": {"release": "1C"},
"totals": {"crash_free_rate(session)": 0.6666666666666667},
"series": {"crash_free_rate(session)": [None, 0.6666666666666667]},
},
{
"by": {"release": "1B"},
"totals": {"crash_free_rate(session)": 0.33333333333333337},
"series": {"crash_free_rate(session)": [None, 0.33333333333333337]},
},
]
@freeze_time(MOCK_DATETIME)
def test_order_by_with_session_status_groupby(self) -> None:
"""
Test that ensures we are able to group by session.status and order by `release.timestamp`
since `release.timestamp` is generated from a preflight query
"""
rando_project = self.create_project()
release_1a = self.create_release(project=rando_project, version="1A")
release_1b = self.create_release(project=rando_project, version="1B")
# Release 1B sessions
for _ in range(4):
self.store_session(
make_session(rando_project, release=release_1b.version, status="crashed")
)
self.store_session(
make_session(rando_project, release=release_1b.version, status="unhandled")
)
for _ in range(10):
self.store_session(make_session(rando_project, release=release_1b.version))
for _ in range(3):
self.store_session(make_session(rando_project, errors=1, release=release_1b.version))
# Release 1A sessions
for _ in range(0, 2):
self.store_session(
make_session(rando_project, release=release_1a.version, status="crashed")
)
self.store_session(
make_session(rando_project, release=release_1a.version, status="unhandled")
)
self.store_session(make_session(rando_project, release=release_1a.version))
for _ in range(3):
self.store_session(make_session(rando_project, errors=1, release=release_1a.version))
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"groupBy": ["release", "session.status"],
"orderBy": "-release.timestamp",
}
)
assert response.data["groups"] == [
{
"by": {"release": "1B", "session.status": "abnormal"},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1B", "session.status": "crashed"},
"totals": {"sum(session)": 4},
"series": {"sum(session)": [0, 4]},
},
{
"by": {"release": "1B", "session.status": "errored"},
"totals": {"sum(session)": 4},
"series": {"sum(session)": [0, 4]},
},
{
"by": {"release": "1B", "session.status": "healthy"},
"totals": {"sum(session)": 10},
"series": {"sum(session)": [0, 10]},
},
{
"by": {"release": "1B", "session.status": "unhandled"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
{
"by": {"release": "1A", "session.status": "abnormal"},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1A", "session.status": "crashed"},
"totals": {"sum(session)": 2},
"series": {"sum(session)": [0, 2]},
},
{
"by": {"release": "1A", "session.status": "errored"},
"totals": {"sum(session)": 4},
"series": {"sum(session)": [0, 4]},
},
{
"by": {"release": "1A", "session.status": "healthy"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
{
"by": {"release": "1A", "session.status": "unhandled"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
]
@freeze_time(MOCK_DATETIME)
def test_order_by_with_limit(self) -> None:
rando_project = self.create_project()
# Create two releases with no metrics data and then two releases with metric data
release_1a = self.create_release(project=rando_project, version="1A")
release_1b = self.create_release(project=rando_project, version="1B")
self.create_release(project=rando_project, version="1C")
self.create_release(project=rando_project, version="1D")
self.store_session(make_session(rando_project, release=release_1a.version))
self.store_session(make_session(rando_project, release=release_1b.version))
self.store_session(
make_session(rando_project, release=release_1b.version, status="crashed")
)
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"groupBy": ["release"],
"orderBy": "-release.timestamp",
"per_page": 3,
}
)
assert response.data["groups"] == [
{
"by": {"release": "1D"},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1C"},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1B"},
"totals": {"sum(session)": 2},
"series": {"sum(session)": [0, 2]},
},
]
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"groupBy": ["release", "session.status"],
"orderBy": "-release.timestamp",
"per_page": 4,
}
)
assert response.data["groups"] == [
{
"by": {"release": "1D", "session.status": None},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1C", "session.status": None},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1B", "session.status": "abnormal"},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1B", "session.status": "crashed"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
]
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"groupBy": ["release", "session.status", "project"],
"orderBy": "-release.timestamp",
"per_page": 2,
}
)
assert response.data["groups"] == [
{
"by": {"release": "1D", "session.status": None, "project": None},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
{
"by": {"release": "1C", "session.status": None, "project": None},
"totals": {"sum(session)": 0},
"series": {"sum(session)": [0, 0]},
},
]
@freeze_time(MOCK_DATETIME)
def test_order_by_with_limit_and_offset(self) -> None:
rando_project = self.create_project()
# Create two releases with no metrics data and then two releases with metric data
release_1a = self.create_release(project=rando_project, version="1A")
release_1b = self.create_release(project=rando_project, version="1B")
self.create_release(project=rando_project, version="1C")
self.create_release(project=rando_project, version="1D")
self.store_session(make_session(rando_project, release=release_1a.version))
self.store_session(make_session(rando_project, release=release_1b.version))
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"groupBy": ["release"],
"orderBy": "-release.timestamp",
"per_page": 3,
"cursor": Cursor(0, 1),
}
)
assert response.data["detail"] == (
"Passing an offset value greater than 0 when ordering by release.timestamp "
"is not permitted"
)
@freeze_time(MOCK_DATETIME)
def test_order_by_with_environment_filter_on_preflight(self) -> None:
rando_project = self.create_project()
rando_env = self.create_environment(name="rando_env", project=self.project)
# Create two releases with no metrics data and then two releases with metric data
release_1a = self.create_release(
project=rando_project, version="1A", environments=[rando_env]
)
release_1b = self.create_release(
project=rando_project, version="1B", environments=[rando_env]
)
release_1c = self.create_release(project=rando_project, version="1C")
release_1d = self.create_release(project=rando_project, version="1D")
self.store_session(
make_session(rando_project, release=release_1a.version, environment="rando_env")
)
self.store_session(
make_session(rando_project, release=release_1b.version, environment="rando_env")
)
self.store_session(make_session(rando_project, release=release_1c.version))
self.store_session(make_session(rando_project, release=release_1d.version))
# Test env condition with IN
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"query": "environment:[rando_env,rando_enc2]",
"groupBy": ["release", "environment"],
"orderBy": "-release.timestamp",
"per_page": 4,
}
)
assert response.data["groups"] == [
{
"by": {"release": "1B", "environment": "rando_env"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
{
"by": {"release": "1A", "environment": "rando_env"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
]
# Test env condition with NOT IN
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"query": "!environment:[rando_env,rando_enc2]",
"groupBy": ["release", "environment"],
"orderBy": "-release.timestamp",
"per_page": 4,
}
)
assert response.data["groups"] == [
{
"by": {"release": "1D", "environment": "production"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
{
"by": {"release": "1C", "environment": "production"},
"totals": {"sum(session)": 1},
"series": {"sum(session)": [0, 1]},
},
]
# Test env condition with invalid OR operation
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"field": ["sum(session)"],
"query": "environment:rando_env OR environment:rando_enc2",
"groupBy": ["release", "environment"],
"orderBy": "-release.timestamp",
"per_page": 4,
}
)
assert response.json()["detail"] == "Unable to parse condition with environment"
@freeze_time(MOCK_DATETIME)
def test_order_by_without_release_groupby(self) -> None:
rando_project = self.create_project()
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"query": "session.status:[crashed,errored]",
"field": ["sum(session)"],
"orderBy": "-release.timestamp",
"per_page": 2,
}
)
assert response.data["detail"] == (
"To sort by release.timestamp, tag release must be in the groupBy"
)
@freeze_time(MOCK_DATETIME)
def test_order_by_release_with_session_status_current_filter(self) -> None:
rando_project = self.create_project()
release_1a = self.create_release(project=rando_project, version="1A")
release_1b = self.create_release(project=rando_project, version="1B")
# Release 1B sessions
for _ in range(4):
self.store_session(
make_session(rando_project, release=release_1b.version, status="crashed")
)
for _ in range(10):
self.store_session(make_session(rando_project, release=release_1b.version))
for _ in range(3):
self.store_session(make_session(rando_project, errors=1, release=release_1b.version))
# Release 1A sessions
for _ in range(0, 2):
self.store_session(
make_session(rando_project, release=release_1a.version, status="crashed")
)
self.store_session(make_session(rando_project, release=release_1a.version))
for _ in range(3):
self.store_session(make_session(rando_project, errors=1, release=release_1a.version))
response = self.do_request(
{
"project": rando_project.id,
"statsPeriod": "1d",
"interval": "1d",
"query": "session.status:[crashed,errored]",
"field": ["sum(session)"],
"groupBy": ["release"],
"orderBy": "-release.timestamp",
}
)
assert response.data["groups"] == [
{
"by": {"release": "1B"},
"totals": {"sum(session)": 7},
"series": {"sum(session)": [0, 7]},
},
{
"by": {"release": "1A"},
"totals": {"sum(session)": 5},
"series": {"sum(session)": [0, 5]},
},
]
| SessionsMetricsSortReleaseTimestampTest |
python | tiangolo__fastapi | docs_src/header_param_models/tutorial003_an.py | {
"start": 158,
"end": 478
} | class ____(BaseModel):
host: str
save_data: bool
if_modified_since: Union[str, None] = None
traceparent: Union[str, None] = None
x_tag: List[str] = []
@app.get("/items/")
async def read_items(
headers: Annotated[CommonHeaders, Header(convert_underscores=False)],
):
return headers
| CommonHeaders |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-generated-string.py | {
"start": 109,
"end": 1995
} | class ____(object):
def generateString(self, str1, str2):
"""
:type str1: str
:type str2: str
:rtype: str
"""
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j+1 > 0 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
n, m = len(str1), len(str2)
candidate = ['*']*(n+m-1)
prefix = getPrefix(str2)
prev = -m
for i, x in enumerate(str1):
if x != 'T':
continue
diff = i-prev
if diff < m:
if prefix[m-1]+1 == m-diff:
candidate[prev+m:i+m] = str2[m-diff:]
else:
return ""
else:
candidate[i:i+m] = str2
prev = i
result = list(str2)+['#']+candidate
idxs = []
for i in xrange(m+1, len(result)):
if result[i] == '*':
result[i] = 'a'
idxs.append(i)
prefix = getPrefix(result)
dq = collections.deque()
i, j = m+1, 0
while i-(m+1) < n:
while dq and dq[0] < i:
dq.popleft()
while j < len(idxs) and idxs[j] <= i+(m-1):
dq.append(idxs[j])
j += 1
if str1[i-(m+1)] == 'F' and prefix[i+(m-1)]+1 == m:
if not dq:
return ""
result[dq[-1]] = 'b'
i += m
else:
i += 1
return "".join(result[m+1:])
# Time: O(n + m)
# Space: O(n + m)
import collections
# z-function, two pointers, sliding window, deque, greedy
| Solution |
python | pytorch__pytorch | test/distributed/_pycute/test_complement.py | {
"start": 2091,
"end": 3468
} | class ____(TestCase):
def helper_test_complement(self, layout):
layoutR = complement(layout)
_LOGGER.debug(f"{layout} => {layoutR}")
# Post-condition: test disjointedness of the codomains
for a in range(size(layout)):
for b in range(size(layoutR)):
assert (layout(a) != layoutR(b)) or (layout(a) == 0 and layoutR(b) == 0)
def test_complement(self):
test = Layout(1, 0)
self.helper_test_complement(test)
test = Layout(1, 1)
self.helper_test_complement(test)
test = Layout(4, 0)
self.helper_test_complement(test)
test = Layout((2, 4), (1, 2))
self.helper_test_complement(test)
test = Layout((2, 3), (1, 2))
self.helper_test_complement(test)
test = Layout((2, 4), (1, 4))
self.helper_test_complement(test)
test = Layout((2, 4, 8), (8, 1, 64))
self.helper_test_complement(test)
test = Layout(((2, 2), (2, 2)), ((1, 4), (8, 32)))
self.helper_test_complement(test)
test = Layout((2, (3, 4)), (3, (1, 6)))
self.helper_test_complement(test)
test = Layout((4, 6), (1, 6))
self.helper_test_complement(test)
test = Layout((4, 10), (1, 10))
self.helper_test_complement(test)
if __name__ == "__main__":
run_tests()
| TestComplement |
python | Textualize__textual | docs/examples/how-to/containers06.py | {
"start": 272,
"end": 655
} | class ____(App):
"""Simple app to play with containers."""
CSS = """
.with-border {
border: heavy green;
}
"""
def compose(self) -> ComposeResult:
with Horizontal(classes="with-border"):
for n in range(10):
yield Box(label=f"Box {n+1}")
if __name__ == "__main__":
app = ContainerApp()
app.run()
| ContainerApp |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 130734,
"end": 131255
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__sparse_driver_backend__ = True
def test_user_defined(self):
"""test that dialects pass the column through on DDL."""
class MyType(types.UserDefinedType):
def get_col_spec(self, **kw):
return "FOOB %s" % kw["type_expression"].name
m = MetaData()
t = Table("t", m, Column("bar", MyType, nullable=False))
self.assert_compile(ddl.CreateColumn(t.c.bar), "bar FOOB bar NOT NULL")
| TestKWArgPassThru |
python | google__flatbuffers | python/flatbuffers/reflection/Object.py | {
"start": 179,
"end": 6861
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Object()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsObject(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def ObjectBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x42\x46\x42\x53", size_prefixed=size_prefixed)
# Object
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Object
def Name(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Object
def Fields(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.Field import Field
obj = Field()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Object
def FieldsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Object
def FieldsIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
return o == 0
# Object
def IsStruct(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# Object
def Minalign(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Object
def Bytesize(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# Object
def Attributes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from reflection.KeyValue import KeyValue
obj = KeyValue()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Object
def AttributesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Object
def AttributesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
return o == 0
# Object
def Documentation(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
a = self._tab.Vector(o)
return self._tab.String(a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
return ""
# Object
def DocumentationLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Object
def DocumentationIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
return o == 0
# File that this Object is declared in.
# Object
def DeclarationFile(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def ObjectStart(builder):
builder.StartObject(8)
def Start(builder):
ObjectStart(builder)
def ObjectAddName(builder, name):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(name), 0)
def AddName(builder, name):
ObjectAddName(builder, name)
def ObjectAddFields(builder, fields):
builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(fields), 0)
def AddFields(builder, fields):
ObjectAddFields(builder, fields)
def ObjectStartFieldsVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartFieldsVector(builder, numElems):
return ObjectStartFieldsVector(builder, numElems)
def ObjectAddIsStruct(builder, isStruct):
builder.PrependBoolSlot(2, isStruct, 0)
def AddIsStruct(builder, isStruct):
ObjectAddIsStruct(builder, isStruct)
def ObjectAddMinalign(builder, minalign):
builder.PrependInt32Slot(3, minalign, 0)
def AddMinalign(builder, minalign):
ObjectAddMinalign(builder, minalign)
def ObjectAddBytesize(builder, bytesize):
builder.PrependInt32Slot(4, bytesize, 0)
def AddBytesize(builder, bytesize):
ObjectAddBytesize(builder, bytesize)
def ObjectAddAttributes(builder, attributes):
builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(attributes), 0)
def AddAttributes(builder, attributes):
ObjectAddAttributes(builder, attributes)
def ObjectStartAttributesVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartAttributesVector(builder, numElems):
return ObjectStartAttributesVector(builder, numElems)
def ObjectAddDocumentation(builder, documentation):
builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(documentation), 0)
def AddDocumentation(builder, documentation):
ObjectAddDocumentation(builder, documentation)
def ObjectStartDocumentationVector(builder, numElems):
return builder.StartVector(4, numElems, 4)
def StartDocumentationVector(builder, numElems):
return ObjectStartDocumentationVector(builder, numElems)
def ObjectAddDeclarationFile(builder, declarationFile):
builder.PrependUOffsetTRelativeSlot(7, flatbuffers.number_types.UOffsetTFlags.py_type(declarationFile), 0)
def AddDeclarationFile(builder, declarationFile):
ObjectAddDeclarationFile(builder, declarationFile)
def ObjectEnd(builder):
return builder.EndObject()
def End(builder):
return ObjectEnd(builder)
| Object |
python | pydantic__pydantic | pydantic/v1/types.py | {
"start": 25034,
"end": 25916
} | class ____(abc.ABC):
"""
Note: this should be implemented as a generic like `SecretField(ABC, Generic[T])`,
the `__init__()` should be part of the abstract class and the
`get_secret_value()` method should use the generic `T` type.
However Cython doesn't support very well generics at the moment and
the generated code fails to be imported (see
https://github.com/cython/cython/issues/2753).
"""
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and self.get_secret_value() == other.get_secret_value()
def __str__(self) -> str:
return '**********' if self.get_secret_value() else ''
def __hash__(self) -> int:
return hash(self.get_secret_value())
@abc.abstractmethod
def get_secret_value(self) -> Any: # pragma: no cover
...
| SecretField |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/tutorials/multi-asset-integration/integration.py | {
"start": 1725,
"end": 2765
} | class ____:
@public
def get_asset_key(self, table_definition: Mapping[str, str]) -> dg.AssetKey:
return dg.AssetKey(str(table_definition.get("name")))
def custom_replication_assets(
*,
replication_project: ReplicationProject,
name: Optional[str] = None,
group_name: Optional[str] = None,
translator: Optional[ReplicationTranslator] = None,
) -> Callable[[Callable[..., Any]], dg.AssetsDefinition]:
project = replication_project.load()
translator = (
check.opt_inst_param(translator, "translator", ReplicationTranslator)
or ReplicationTranslator()
)
return dg.multi_asset(
name=name,
group_name=group_name,
specs=[
dg.AssetSpec(
key=translator.get_asset_key(table),
metadata={
"replication_project": project,
"replication_translator": translator,
},
)
for table in project.get("tables")
],
)
| ReplicationTranslator |
python | pytorch__pytorch | tools/experimental/torchfuzz/operators/nn_functional.py | {
"start": 12209,
"end": 16095
} | class ____(Operator):
"""Operator for torch.nn.functional.layer_norm."""
def __init__(self):
super().__init__("torch.nn.functional.layer_norm")
@property
def torch_op_name(self) -> str | None:
"""Return the torch operation name."""
return "torch.nn.functional.layer_norm"
def can_produce(self, output_spec: Spec) -> bool:
"""LayerNorm can produce tensor outputs with floating point dtypes."""
if not isinstance(output_spec, TensorSpec):
return False
# LayerNorm needs at least 1 dimension to normalize over
if len(output_spec.size) == 0:
return False
return is_float_dtype(output_spec.dtype)
def fuzz_inputs_specs(self, output_spec: Spec) -> list[Spec]:
"""Generate input specs for layer_norm operation.
LayerNorm normalizes over the last dimensions specified by normalized_shape.
- input: input tensor
- weight: (normalized_shape,) [optional]
- bias: (normalized_shape,) [optional]
"""
if not isinstance(output_spec, TensorSpec):
raise ValueError("LayerNormOperator can only produce TensorSpec outputs")
if len(output_spec.size) == 0:
raise ValueError("LayerNorm output must have at least 1 dimension")
# Input tensor has same shape and dtype as output
input_spec = TensorSpec(
size=output_spec.size, stride=output_spec.stride, dtype=output_spec.dtype
)
# For simplicity, normalize over the last dimension
normalized_shape = output_spec.size[-1:]
# Weight and bias tensors (optional with 70% probability each)
specs = [input_spec]
if random.random() < 0.7:
# LayerNorm weight and bias parameters should match input tensor dtype
# for compatibility (conversion will be handled in codegen)
weight_spec = TensorSpec(
size=normalized_shape, stride=(1,), dtype=output_spec.dtype
)
specs.append(weight_spec)
if random.random() < 0.7:
bias_spec = TensorSpec(
size=normalized_shape, stride=(1,), dtype=output_spec.dtype
)
specs.append(bias_spec)
# Cast to list[Spec] to fix type checking
from typing import cast
return cast(list[Spec], specs)
def codegen(
self, output_name: str, input_names: list[str], output_spec: Spec
) -> str:
"""Generate code for layer_norm operation."""
if len(input_names) < 1 or len(input_names) > 3:
raise ValueError(
"LayerNorm requires 1-3 inputs: input, optional weight, optional bias"
)
if not isinstance(output_spec, TensorSpec):
raise ValueError("LayerNormOperator can only produce TensorSpec outputs")
# Normalize over the last dimension
normalized_shape = f"({output_spec.size[-1]},)"
# Ensure dtype compatibility by converting all inputs to the expected output dtype
target_dtype = str(output_spec.dtype)
input_name = input_names[0]
if len(input_names) == 1:
return f"{output_name} = torch.nn.functional.layer_norm({input_name}.to({target_dtype}), {normalized_shape})"
elif len(input_names) == 2:
weight_name = input_names[1]
return f"{output_name} = torch.nn.functional.layer_norm({input_name}.to({target_dtype}), {normalized_shape}, weight={weight_name}.to({target_dtype}))"
else: # len(input_names) == 3
weight_name, bias_name = input_names[1], input_names[2]
return f"{output_name} = torch.nn.functional.layer_norm({input_name}.to({target_dtype}), {normalized_shape}, weight={weight_name}.to({target_dtype}), bias={bias_name}.to({target_dtype}))"
| LayerNormOperator |
python | falconry__falcon | tests/test_before_hooks.py | {
"start": 5645,
"end": 11398
} | class ____(ZooResource):
def on_get(self, req, resp):
super().on_get(
req,
resp,
# Test passing a mixture of args and kwargs
'fluffy',
'not fluffy',
fish='slippery',
)
@pytest.fixture
def wrapped_aware_resource():
return ClassResourceWithAwareHooks()
@pytest.fixture
def wrapped_resource():
return WrappedClassResource()
@pytest.fixture
def resource():
return WrappedRespondersResource()
@pytest.fixture
def client(asgi, util, request, resource):
app = util.create_app(asgi)
app.add_route('/', resource)
return testing.TestClient(app)
@pytest.mark.parametrize('resource', [ZooResource(), ZooResourceChild()])
def test_multiple_resource_hooks(client, resource):
client.app.add_route('/', resource)
result = client.simulate_get('/')
assert 'not fluffy' == result.headers['X-Frogs']
assert 'fluffy' == result.headers['X-Bunnies']
assert 'fluffy' == resource.bunnies
assert 'not fluffy' == resource.frogs
assert 'slippery' == resource.fish
def test_input_validator(client):
result = client.simulate_put('/')
assert result.status_code == 400
def test_input_validator_inherited(client):
client.app.add_route('/', WrappedRespondersResourceChild())
result = client.simulate_put('/')
assert result.status_code == 400
result = client.simulate_get('/', query_string='x=1000')
assert result.status_code == 200
result = client.simulate_get('/', query_string='x=1001')
assert result.status_code == 400
def test_param_validator(client):
result = client.simulate_get('/', query_string='limit=10', body='{}')
assert result.status_code == 200
result = client.simulate_get('/', query_string='limit=101')
assert result.status_code == 400
@pytest.mark.parametrize(
'resource',
[
TestFieldResource(),
TestFieldResourceChild(),
TestFieldResourceChildToo(),
],
)
def test_field_validator(client, resource):
client.app.add_route('/queue/{id}/messages', resource)
result = client.simulate_get('/queue/10/messages')
assert result.status_code == 200
assert resource.id == 10
result = client.simulate_get('/queue/bogus/messages')
assert result.status_code == 400
@pytest.mark.parametrize(
'body,doc',
[
(json.dumps({'animal': 'falcon'}), {'animal': 'falcon'}),
('{}', {}),
('', None),
(None, None),
],
)
def test_parser_sync(body, doc):
app = falcon.App()
resource = WrappedRespondersBodyParserResource()
app.add_route('/', resource)
testing.simulate_get(app, '/', body=body)
assert resource.doc == doc
@pytest.mark.parametrize(
'body,doc',
[
(json.dumps({'animal': 'falcon'}), {'animal': 'falcon'}),
('{}', {}),
('', None),
(None, None),
],
)
def test_parser_async(body, doc, util):
with util.disable_asgi_non_coroutine_wrapping():
class WrappedRespondersBodyParserAsyncResource:
@falcon.before(validate_param_async, 'limit', 100)
@falcon.before(parse_body_async)
async def on_get(self, req, resp, doc=None):
self.doc = doc
@falcon.before(parse_body_async)
async def on_put(self, req, resp, doc=None):
self.doc = doc
app = util.create_app(asgi=True)
resource = WrappedRespondersBodyParserAsyncResource()
app.add_route('/', resource)
testing.simulate_get(app, '/', body=body)
assert resource.doc == doc
testing.simulate_put(app, '/', body=body)
assert resource.doc == doc
async def test_direct():
resource = WrappedRespondersBodyParserAsyncResource()
req = testing.create_asgi_req()
resp = util.create_resp(True)
await resource.on_get(req, resp, doc)
assert resource.doc == doc
falcon.async_to_sync(test_direct)
def test_wrapped_resource(client, wrapped_resource):
client.app.add_route('/wrapped', wrapped_resource)
result = client.simulate_patch('/wrapped')
assert result.status_code == 405
result = client.simulate_get('/wrapped', query_string='limit=10')
assert result.status_code == 200
assert 'fuzzy' == wrapped_resource.bunnies
result = client.simulate_head('/wrapped')
assert result.status_code == 200
assert 'fuzzy' == wrapped_resource.bunnies
result = client.simulate_post('/wrapped')
assert result.status_code == 200
assert 'slippery' == wrapped_resource.fish
result = client.simulate_get('/wrapped', query_string='limit=101')
assert result.status_code == 400
assert wrapped_resource.bunnies == 'fuzzy'
def test_wrapped_resource_with_hooks_aware_of_resource(client, wrapped_aware_resource):
client.app.add_route('/wrapped_aware', wrapped_aware_resource)
result = client.simulate_patch('/wrapped_aware')
assert result.status_code == 405
result = client.simulate_get('/wrapped_aware', query_string='limit=10')
assert result.status_code == 200
assert wrapped_aware_resource.bunnies == 'fuzzy'
for method in ('HEAD', 'PUT', 'POST'):
result = client.simulate_request(method, '/wrapped_aware')
assert result.status_code == 200
assert wrapped_aware_resource.bunnies == 'fuzzy'
result = client.simulate_get('/wrapped_aware', query_string='limit=11')
assert result.status_code == 400
assert wrapped_aware_resource.bunnies == 'fuzzy'
_another_fish = Fish()
def header_hook(req, resp, resource, params):
value = resp.get_header('X-Hook-Applied') or '0'
resp.set_header('X-Hook-Applied', str(int(value) + 1))
@falcon.before(header_hook)
| ZooResourceChild |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/nadam.py | {
"start": 1303,
"end": 9274
} | class ____(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the NAdam algorithm.
Much like Adam is essentially RMSprop with momentum, Nadam is Adam with
Nesterov momentum.
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
name: Optional name for the operations created when applying gradients.
Defaults to `"Nadam"`.
**kwargs: Keyword arguments. Allowed to be one of
`"clipnorm"` or `"clipvalue"`.
`"clipnorm"` (float) clips gradients by norm; `"clipvalue"` (float) clips
gradients by value.
Usage Example:
>>> opt = tf.keras.optimizers.Nadam(learning_rate=0.2)
>>> var1 = tf.Variable(10.0)
>>> loss = lambda: (var1 ** 2) / 2.0
>>> step_count = opt.minimize(loss, [var1]).numpy()
>>> "{:.1f}".format(var1.numpy())
9.8
Reference:
- [Dozat, 2015](http://cs229.stanford.edu/proj2015/054_report.pdf).
"""
_HAS_AGGREGATE_GRAD = True
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
name='Nadam',
**kwargs):
# Backwards compatibility with keras NAdam optimizer.
kwargs['decay'] = kwargs.pop('schedule_decay', 0.004)
learning_rate = kwargs.get('lr', learning_rate)
if isinstance(learning_rate, learning_rate_schedule.LearningRateSchedule):
raise ValueError('The Nadam optimizer does not support '
'tf.keras.optimizers.LearningRateSchedules as the '
'learning rate.')
super(Nadam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.epsilon = epsilon or backend_config.epsilon()
self._m_cache = None
def _create_slots(self, var_list):
var_dtype = var_list[0].dtype.base_dtype
if self._m_cache is None:
self._m_cache = self.add_weight(
'momentum_cache',
shape=[],
dtype=var_dtype,
initializer='ones',
trainable=False,
aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
self._weights.append(self._m_cache)
# Separate for-loops to respect the ordering of slot variables from v1.
for var in var_list:
# Create slots for the first moments.
self.add_slot(var, 'm')
for var in var_list:
# Create slots for the second moments.
self.add_slot(var, 'v')
def _prepare_local(self, var_device, var_dtype, apply_state):
lr_t = array_ops.identity(self._get_hyper('learning_rate', var_dtype))
beta_1_t = array_ops.identity(self._get_hyper('beta_1', var_dtype))
beta_2_t = array_ops.identity(self._get_hyper('beta_2', var_dtype))
local_step = math_ops.cast(self.iterations + 1, var_dtype)
next_step = math_ops.cast(self.iterations + 2, var_dtype)
decay_base = math_ops.cast(0.96, var_dtype)
m_t = beta_1_t * (1. - 0.5 * (
math_ops.pow(decay_base, self._initial_decay * local_step)))
m_t_1 = beta_1_t * (1. - 0.5 * (
math_ops.pow(decay_base, self._initial_decay * next_step)))
m_schedule_new = math_ops.cast(self._m_cache_read, var_dtype) * m_t
if var_dtype is self._m_cache.dtype:
m_schedule_new = array_ops.identity(state_ops.assign(
self._m_cache, m_schedule_new, use_locking=self._use_locking))
m_schedule_next = m_schedule_new * m_t_1
apply_state[(var_device, var_dtype)] = dict(
lr_t=lr_t,
neg_lr_t=-lr_t, # pylint: disable=invalid-unary-operand-type
epsilon=tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype
),
beta_1_t=beta_1_t,
beta_2_t=beta_2_t,
m_t=m_t,
m_t_1=m_t_1,
one_minus_beta_1_t=1 - beta_1_t,
one_minus_beta_2_t=1 - beta_2_t,
one_minus_m_t=1.0 - m_t,
one_minus_m_schedule_new=1.0 - m_schedule_new,
one_minus_m_schedule_next=1.0 - m_schedule_next,
v_t_prime_denominator=1.0 - math_ops.pow(beta_2_t, local_step),
)
def _prepare(self, var_list):
# Get the value of the momentum cache before starting to apply gradients.
self._m_cache_read = array_ops.identity(self._m_cache)
return super(Nadam, self)._prepare(var_list)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
g_prime = grad / coefficients['one_minus_m_schedule_new']
m_t = (coefficients['beta_1_t'] * m +
coefficients['one_minus_beta_1_t'] * grad)
m_t = state_ops.assign(m, m_t, use_locking=self._use_locking)
m_t_prime = m_t / coefficients['one_minus_m_schedule_next']
v_t = (coefficients['beta_2_t'] * v +
coefficients['one_minus_beta_2_t'] * math_ops.square(grad))
v_t = state_ops.assign(v, v_t, use_locking=self._use_locking)
v_t_prime = v_t / coefficients['v_t_prime_denominator']
m_t_bar = (coefficients['one_minus_m_t'] * g_prime +
coefficients['m_t_1'] * m_t_prime)
var_t = var - coefficients['lr_t'] * m_t_bar / (
math_ops.sqrt(v_t_prime) + coefficients['epsilon'])
return state_ops.assign(var, var_t, use_locking=self._use_locking).op
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
g_prime = grad / coefficients['one_minus_m_schedule_new']
# m_t = beta1 * m + (1 - beta1) * g_t
m_scaled_g_values = grad * coefficients['one_minus_beta_1_t']
m_t = state_ops.assign(m, m * coefficients['beta_1_t'],
use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_t_slice = array_ops.gather(m_t, indices)
m_t_prime = m_t_slice / coefficients['one_minus_m_schedule_next']
m_t_bar = (coefficients['one_minus_m_t'] * g_prime +
coefficients['m_t_1'] * m_t_prime)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v_scaled_g_values = (grad * grad) * coefficients['one_minus_beta_2_t']
v_t = state_ops.assign(v, v * coefficients['beta_2_t'],
use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
v_t_slice = array_ops.gather(v_t, indices)
v_t_prime = v_t_slice / coefficients['v_t_prime_denominator']
v_prime_sqrt_plus_eps = math_ops.sqrt(v_t_prime) + coefficients['epsilon']
var_update = self._resource_scatter_add(
var, indices,
coefficients['neg_lr_t'] * m_t_bar / v_prime_sqrt_plus_eps)
return control_flow_ops.group(*[var_update, m_t_bar, v_t])
def get_config(self):
config = super(Nadam, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._initial_decay,
'beta_1': self._serialize_hyperparameter('beta_1'),
'beta_2': self._serialize_hyperparameter('beta_2'),
'epsilon': self.epsilon,
})
return config
| Nadam |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 346073,
"end": 346308
} | class ____(Response):
"""
Response of tasks.move endpoint.
"""
_service = "tasks"
_action = "move"
_version = "2.20"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
| MoveResponse |
python | tensorflow__tensorflow | tensorflow/python/framework/function_test.py | {
"start": 55960,
"end": 58360
} | class ____(test.TestCase):
def _testSimpleModel(self, use_forward_func, use_resource=False):
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
@function.Defun()
def Model(x):
return _Model(x)
cvars = []
@function.Defun()
def Grad(x, y0):
if use_forward_func:
y = Model(x)
else:
y = _Model(x)
loss = math_ops.reduce_mean(
math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0)
arg_w, arg_b = function.get_extra_args()
self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64]))
self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64]))
dw, db = gradients_impl.gradients(loss, [arg_w, arg_b])
cvars.extend(function.get_extra_vars())
return loss, dw, db
g = ops.Graph()
with g.as_default():
x = random_ops.random_normal([64, 64], seed=100)
y0 = random_ops.random_normal([64, 64], seed=200)
with variable_scope.variable_scope("Foo"):
loss, dw, db = Grad(x, y0)
self.assertEqual(2, len(cvars))
w, b = cvars[:2]
self.assertEqual("Foo/w", w.op.name)
self.assertEqual("Foo/b", b.op.name)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
w, b, x, y0, loss, dw, db = self.evaluate([w, b, x, y0, loss, dw, db])
self.assertAllEqual(w.shape, (64, 64))
self.assertAllClose(np.sum(w), 2050.44)
self.assertAllEqual(b.shape, (64,))
self.assertAllClose(np.sum(b), 0.0)
self.assertAllClose(loss, -2.27, rtol=1e-2)
self.assertAllEqual(dw.shape, (64, 64))
self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2)
self.assertAllEqual(db.shape, (64,))
self.assertAllClose(np.sum(db), 0.509, rtol=1e-2)
@test_util.run_deprecated_v1
def testBasic(self):
self._testSimpleModel(False)
self._testSimpleModel(True)
@test_util.run_deprecated_v1
def testBasicResource(self):
self._testSimpleModel(False, use_resource=True)
self._testSimpleModel(True, use_resource=True)
| VariableHoistingTest |
python | numba__numba | numba/tests/test_withlifting.py | {
"start": 32886,
"end": 34119
} | class ____(BaseTestWithLifting):
def test_undefined_global(self):
the_ir = get_func_ir(lift_undefiend)
with self.assertRaises(errors.CompilerError) as raises:
with_lifting(
the_ir, self.typingctx, self.targetctx, self.flags, locals={},
)
self.assertIn(
"Undefined variable used as context manager",
str(raises.exception),
)
def test_invalid(self):
the_ir = get_func_ir(lift_invalid)
with self.assertRaises(errors.CompilerError) as raises:
with_lifting(
the_ir, self.typingctx, self.targetctx, self.flags, locals={},
)
self.assertIn(
"Unsupported context manager in use",
str(raises.exception),
)
def test_with_as_fails_gracefully(self):
@njit
def foo():
with bypass_context as bp:
pass
with self.assertRaises(errors.UnsupportedBytecodeError) as raises:
foo()
excstr = str(raises.exception)
msg = ("The 'with (context manager) as (variable):' construct is not "
"supported.")
self.assertIn(msg, excstr)
| TestBogusContext |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 3385,
"end": 3897
} | class ____(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
from types import FunctionType
from .Code import CCodeWriter
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m, CCodeWriter)
return super().__new__(cls, name, bases, attrs)
| VerboseCodeWriter |
python | kamyu104__LeetCode-Solutions | Python/remove-vowels-from-a-string.py | {
"start": 29,
"end": 235
} | class ____(object):
def removeVowels(self, S):
"""
:type S: str
:rtype: str
"""
lookup = set("aeiou")
return "".join(c for c in S if c not in lookup)
| Solution |
python | getsentry__sentry | src/sentry/incidents/grouptype.py | {
"start": 12527,
"end": 14167
} | class ____(GroupType):
type_id = 8001
slug = "metric_issue"
description = "Metric issue triggered"
category = GroupCategory.METRIC_ALERT.value
category_v2 = GroupCategory.METRIC.value
creation_quota = Quota(3600, 60, 100)
default_priority = PriorityLevel.HIGH
enable_auto_resolve = False
enable_escalation_detection = False
enable_status_change_workflow_notifications = False
enable_workflow_notifications = False
enable_user_status_and_priority_changes = False
detector_settings = DetectorSettings(
handler=MetricIssueDetectorHandler,
validator=MetricIssueDetectorValidator,
config_schema={
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "A representation of a metric detector config dict",
"type": "object",
"required": ["detection_type"],
"properties": {
"comparison_delta": {
"type": ["integer", "null"],
"enum": COMPARISON_DELTA_CHOICES,
},
"detection_type": {
"type": "string",
"enum": [detection_type.value for detection_type in AlertRuleDetectionType],
},
},
},
)
@classmethod
def allow_ingest(cls, organization: Organization) -> bool:
return True
@classmethod
def allow_post_process_group(cls, organization: Organization) -> bool:
return True
@classmethod
def build_visible_feature_name(cls) -> str:
return "organizations:workflow-engine-ui"
| MetricIssue |
python | PyCQA__pylint | tests/testutils/test_output_line.py | {
"start": 546,
"end": 5195
} | class ____(Protocol):
def __call__(self, confidence: Confidence = HIGH) -> Message: ...
@pytest.fixture()
def message() -> _MessageCallable:
def inner(confidence: Confidence = HIGH) -> Message:
return Message(
symbol="missing-docstring",
msg_id="C0123",
location=MessageLocationTuple(
"abspath", "path", "module", "obj", 1, 2, 1, 3
),
msg="msg",
confidence=confidence,
)
return inner
def test_output_line() -> None:
"""Test that the OutputLine NamedTuple is instantiated correctly."""
output_line = OutputLine(
symbol="missing-docstring",
lineno=1,
column=2,
end_lineno=1,
end_column=4,
object="",
msg="Missing docstring's bad.",
confidence=HIGH.name,
)
assert output_line.symbol == "missing-docstring"
assert output_line.lineno == 1
assert output_line.column == 2
assert output_line.end_lineno == 1
assert output_line.end_column == 4
assert output_line.object == ""
assert output_line.msg == "Missing docstring's bad."
assert output_line.confidence == "HIGH"
def test_output_line_from_message(message: _MessageCallable) -> None:
"""Test that the OutputLine NamedTuple is instantiated correctly with from_msg."""
output_line = OutputLine.from_msg(message())
assert output_line.symbol == "missing-docstring"
assert output_line.lineno == 1
assert output_line.column == 2
assert output_line.end_lineno == 1
assert output_line.end_column == 3
assert output_line.object == "obj"
assert output_line.msg == "msg"
assert output_line.confidence == "HIGH"
output_line_with_end = OutputLine.from_msg(message(), True)
assert output_line_with_end.symbol == "missing-docstring"
assert output_line_with_end.lineno == 1
assert output_line_with_end.column == 2
assert output_line_with_end.end_lineno == 1
assert output_line_with_end.end_column == 3
assert output_line_with_end.object == "obj"
assert output_line_with_end.msg == "msg"
assert output_line_with_end.confidence == "HIGH"
output_line_without_end = OutputLine.from_msg(message(), False)
assert output_line_without_end.symbol == "missing-docstring"
assert output_line_without_end.lineno == 1
assert output_line_without_end.column == 2
assert output_line_without_end.end_lineno is None
assert output_line_without_end.end_column is None
assert output_line_without_end.object == "obj"
assert output_line_without_end.msg == "msg"
assert output_line_without_end.confidence == "HIGH"
@pytest.mark.parametrize("confidence", [HIGH, INFERENCE])
def test_output_line_to_csv(confidence: Confidence, message: _MessageCallable) -> None:
"""Test that the OutputLine NamedTuple is instantiated correctly with from_msg
and then converted to csv.
"""
output_line = OutputLine.from_msg(message(confidence), True)
csv = output_line.to_csv()
assert csv == (
"missing-docstring",
"1",
"2",
"1",
"3",
"obj",
"msg",
confidence.name,
)
output_line_without_end = OutputLine.from_msg(message(confidence), False)
csv = output_line_without_end.to_csv()
assert csv == (
"missing-docstring",
"1",
"2",
"None",
"None",
"obj",
"msg",
confidence.name,
)
def test_output_line_from_csv() -> None:
"""Test that the OutputLine NamedTuple is instantiated correctly with from_csv.
Test OutputLine of length 8.
"""
proper_csv = ["missing-docstring", "1", "2", "1", "None", "obj", "msg", "HIGH"]
output_line = OutputLine.from_csv(proper_csv)
assert output_line == OutputLine(
symbol="missing-docstring",
lineno=1,
column=2,
end_lineno=1,
end_column=None,
object="obj",
msg="msg",
confidence="HIGH",
)
output_line_with_end = OutputLine.from_csv(proper_csv, True)
assert output_line_with_end == OutputLine(
symbol="missing-docstring",
lineno=1,
column=2,
end_lineno=1,
end_column=None,
object="obj",
msg="msg",
confidence="HIGH",
)
output_line_without_end = OutputLine.from_csv(proper_csv, False)
assert output_line_without_end == OutputLine(
symbol="missing-docstring",
lineno=1,
column=2,
end_lineno=None,
end_column=None,
object="obj",
msg="msg",
confidence="HIGH",
)
| _MessageCallable |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 11120,
"end": 11257
} | class ____(desc_sig_element, _sig_element=True):
"""Node for a general keyword in a signature."""
classes = ['k']
| desc_sig_keyword |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/utils_v1/mode_keys.py | {
"start": 798,
"end": 1260
} | class ____:
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training/fitting mode.
* `TEST`: testing/evaluation mode.
* `PREDICT`: prediction/inference mode.
"""
TRAIN = 'train'
TEST = 'test'
PREDICT = 'predict'
def is_predict(mode):
return mode == KerasModeKeys.PREDICT
def is_eval(mode):
return mode == KerasModeKeys.TEST
def is_train(mode):
return mode == KerasModeKeys.TRAIN
| KerasModeKeys |
python | pytorch__pytorch | test/quantization/pt2e/test_representation.py | {
"start": 694,
"end": 10168
} | class ____(QuantizationTestCase):
def _test_representation(
self,
model: torch.nn.Module,
example_inputs: tuple[Any, ...],
quantizer: Quantizer,
ref_node_occurrence: dict[ns, int],
non_ref_node_occurrence: dict[ns, int],
fixed_output_tol: Optional[float] = None,
output_scale_idx: int = 2,
) -> torch.nn.Module:
# resetting dynamo cache
torch._dynamo.reset()
model = export(model, example_inputs, strict=True).module()
model_copy = copy.deepcopy(model)
model = prepare_pt2e(model, quantizer)
# Calibrate
model(*example_inputs)
model = convert_pt2e(model, use_reference_representation=True)
self.checkGraphModuleNodes(model, expected_node_occurrence=ref_node_occurrence)
# make sure it runs
pt2e_quant_output = model(*example_inputs)
# TODO: torchdynamo times out when we do this, we can enable numerical checking
# after that is fixed
model_copy = prepare_pt2e(model_copy, quantizer)
# Calibrate
model_copy(*example_inputs)
model_copy = convert_pt2e(model_copy, use_reference_representation=False)
self.checkGraphModuleNodes(
model_copy, expected_node_occurrence=non_ref_node_occurrence
)
pt2e_quant_output_copy = model_copy(*example_inputs)
output_tol = None
if fixed_output_tol is not None:
output_tol = fixed_output_tol
else:
idx = 0
for n in model_copy.graph.nodes:
if (
n.target
== torch.ops.quantized_decomposed.quantize_per_tensor.default
):
idx += 1
if idx == output_scale_idx:
output_tol = n.args[1]
assert output_tol is not None
# make sure the result is off by one at most in the quantized integer representation
self.assertTrue(
torch.max(torch.abs(pt2e_quant_output_copy - pt2e_quant_output))
<= (2 * output_tol + 1e-5)
)
def test_static_linear(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
quantizer = XNNPACKQuantizer()
operator_config = get_symmetric_quantization_config(is_per_channel=False)
quantizer.set_global(operator_config)
example_inputs = (torch.randn(2, 5),)
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence={},
non_ref_node_occurrence={},
)
def test_dynamic_linear(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
quantizer = XNNPACKQuantizer()
operator_config = get_symmetric_quantization_config(
is_per_channel=False, is_dynamic=True
)
quantizer.set_global(operator_config)
example_inputs = (torch.randn(2, 5),)
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence={},
non_ref_node_occurrence={},
fixed_output_tol=1e-4,
)
def test_conv2d(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv2d = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.conv2d(x)
quantizer = XNNPACKQuantizer()
operator_config = get_symmetric_quantization_config(is_per_channel=False)
quantizer.set_global(operator_config)
example_inputs = (torch.randn(1, 3, 3, 3),)
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence={},
non_ref_node_occurrence={},
)
def test_add(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
return x + y
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
M().eval()
example_inputs = (
torch.randn(1, 3, 3, 3),
torch.randn(1, 3, 3, 3),
)
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence={},
non_ref_node_occurrence={},
)
def test_add_relu(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
out = x + y
out = torch.nn.functional.relu(out)
return out
quantizer = XNNPACKQuantizer()
operator_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(operator_config)
example_inputs = (
torch.randn(1, 3, 3, 3),
torch.randn(1, 3, 3, 3),
)
ref_node_occurrence = {
ns.call_function(out_dtype): 2,
}
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence=ref_node_occurrence,
non_ref_node_occurrence={},
)
def test_maxpool2d(self):
quantizer = XNNPACKQuantizer()
operator_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(operator_config)
m_eager = TestHelperModules.ConvMaxPool2d().eval()
example_inputs = (torch.randn(1, 2, 2, 2),)
self._test_representation(
m_eager,
example_inputs,
quantizer,
ref_node_occurrence={},
non_ref_node_occurrence={},
)
def test_qdq_per_channel(self):
"""Test representation for quantize_per_channel and dequantize_per_channel op"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(5, 5)
def forward(self, x):
return self.linear(x)
quantizer = XNNPACKQuantizer()
# use per channel quantization for weight
operator_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(operator_config)
M().eval()
inputs = [
(torch.randn(1, 5),),
(torch.randn(1, 3, 5),),
(torch.randn(1, 3, 3, 5),),
(torch.randn(1, 3, 3, 3, 5),),
]
for example_inputs in inputs:
ref_node_occurrence = {
ns.call_function(
torch.ops.quantized_decomposed.quantize_per_channel.default
): 0,
ns.call_function(
torch.ops.quantized_decomposed.dequantize_per_channel.default
): 0,
}
non_ref_node_occurrence = {
# quantize_per_channel is folded
ns.call_function(
torch.ops.quantized_decomposed.quantize_per_channel.default
): 0,
ns.call_function(
torch.ops.quantized_decomposed.dequantize_per_channel.default
): 1,
}
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence,
non_ref_node_occurrence,
output_scale_idx=2,
)
def test_qdq(self):
"""Test representation for quantize and dequantize op"""
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x, y):
return x + y
quantizer = XNNPACKQuantizer()
quantization_config = get_symmetric_quantization_config(is_per_channel=True)
quantizer.set_global(quantization_config)
M().eval()
example_inputs = (
torch.randn(1, 3, 3, 3),
torch.randn(1, 3, 3, 3),
)
ref_node_occurrence = {
ns.call_function(torch.ops.quantized_decomposed.quantize_per_tensor): 0,
ns.call_function(torch.ops.quantized_decomposed.dequantize_per_tensor): 0,
}
non_ref_node_occurrence = {
ns.call_function(
torch.ops.quantized_decomposed.quantize_per_tensor.default
): 3,
ns.call_function(
torch.ops.quantized_decomposed.dequantize_per_tensor.default
): 3,
}
self._test_representation(
M().eval(),
example_inputs,
quantizer,
ref_node_occurrence,
non_ref_node_occurrence,
)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
| TestPT2ERepresentation |
python | scipy__scipy | benchmarks/benchmarks/sparse.py | {
"start": 16372,
"end": 16959
} | class ____(Benchmark):
param_names = ['sparse_type', 'density', 'format']
params = [
['spmatrix', 'sparray'],
[0.05, 0.01],
['csr', 'csc', 'lil'],
]
def setup(self, sparse_type, density, format):
n = 500
k = 1000
if sparse_type == "sparray":
self.X = sparse.random_array((n, k), format=format, density=density)
else:
self.X = sparse.random(n, k, format=format, density=density)
def time_iteration(self, sparse_type, density, format):
for row in self.X:
pass
| Iteration |
python | plotly__plotly.py | plotly/graph_objs/contour/colorbar/_tickfont.py | {
"start": 233,
"end": 9918
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour.colorbar"
_path_str = "contour.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | huggingface__transformers | src/transformers/models/vit/configuration_vit.py | {
"start": 794,
"end": 5556
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ViTModel`]. It is used to instantiate an ViT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ViT
[google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
encoder_stride (`int`, *optional*, defaults to 16):
Factor to increase the spatial resolution by in the decoder head for masked image modeling.
pooler_output_size (`int`, *optional*):
Dimensionality of the pooler layer. If None, defaults to `hidden_size`.
pooler_act (`str`, *optional*, defaults to `"tanh"`):
The activation function to be used by the pooler.
Example:
```python
>>> from transformers import ViTConfig, ViTModel
>>> # Initializing a ViT vit-base-patch16-224 style configuration
>>> configuration = ViTConfig()
>>> # Initializing a model (with random weights) from the vit-base-patch16-224 style configuration
>>> model = ViTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "vit"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
image_size=224,
patch_size=16,
num_channels=3,
qkv_bias=True,
encoder_stride=16,
pooler_output_size=None,
pooler_act="tanh",
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.encoder_stride = encoder_stride
self.pooler_output_size = pooler_output_size if pooler_output_size else hidden_size
self.pooler_act = pooler_act
__all__ = ["ViTConfig"]
| ViTConfig |
python | huggingface__transformers | src/transformers/integrations/fbgemm_fp8.py | {
"start": 1012,
"end": 3157
} | class ____(torch.nn.Linear):
def __init__(self, in_features, out_features, bias, weight_dtype=torch.float32):
super().__init__(in_features, out_features, bias)
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.Parameter(torch.zeros((out_features, in_features), dtype=torch.float8_e4m3fn))
self.weight_scale = torch.nn.Parameter(torch.zeros((out_features, 1), dtype=weight_dtype))
self.register_buffer("input_scale_ub", torch.zeros([1], dtype=torch.float), persistent=False)
if bias:
self.bias = torch.nn.Parameter(torch.zeros((self.out_features), dtype=weight_dtype))
else:
self.bias = None
def forward(self, x):
# quantize_fp8_per_row will squash the leading dimensions, so save the desired shape here
output_shape = (*x.shape[:-1], -1)
# x_quantized and x_scale are not necessarily on the same device as x, this is an issue.
# https://github.com/pytorch/FBGEMM/blob/e08af8539c391437f447173863df0f3f6f6f1855/fbgemm_gpu/experimental/gen_ai/src/quantize/quantize.cu#L1237C3-L1237C45
x_quantized, x_scale = torch.ops.fbgemm.quantize_fp8_per_row(
x.view(-1, x.shape[-1]).contiguous(), scale_ub=self.input_scale_ub
)
# moving x_quantized, x_scale here creates glibberish output ... However, if we move the output, it works
# x_quantized, x_scale = x_quantized.to(x.device), x_scale.to(x.device)
# The computation still happens on the device where self.weight is even if x_quantized is not on the same device as self.weight
weight_scale_float32 = self.weight_scale.to(torch.float32)
output = torch.ops.fbgemm.f8f8bf16_rowwise(
x_quantized, self.weight, x_scale, weight_scale_float32, use_fast_accum=True
)
output = output + self.bias if self.bias is not None else output
# Hacky for now, we have the output to the device of x
output = output.to(x.device)
output = output.reshape(output_shape)
del x_quantized, x_scale
return output
| FbgemmFp8Linear |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 42215,
"end": 42780
} | class ____(sgqlc.types.Enum):
"""The state of an OAuth application when it was created.
Enumeration Choices:
* `ACTIVE`: The OAuth application was active and allowed to have
OAuth Accesses.
* `PENDING_DELETION`: The OAuth application was in the process of
being deleted.
* `SUSPENDED`: The OAuth application was suspended from generating
OAuth Accesses due to abuse or security concerns.
"""
__schema__ = github_schema
__choices__ = ("ACTIVE", "PENDING_DELETION", "SUSPENDED")
| OauthApplicationCreateAuditEntryState |
python | kamyu104__LeetCode-Solutions | Python/minimum-speed-to-arrive-on-time.py | {
"start": 58,
"end": 819
} | class ____(object):
def minSpeedOnTime(self, dist, hour):
"""
:type dist: List[int]
:type hour: float
:rtype: int
"""
def ceil(a, b):
return (a+b-1)//b
def total_time(dist, x):
return sum(ceil(dist[i], x) for i in xrange(len(dist)-1)) + float(dist[-1])/x
def check(dist, hour, x):
return total_time(dist, x) <= hour
MAX_SPEED = 10**7
if not check(dist, hour, MAX_SPEED):
return -1
left, right = 1, MAX_SPEED
while left <= right:
mid = left + (right-left)//2
if check(dist, hour, mid):
right = mid-1
else:
left = mid+1
return left
| Solution |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 1642,
"end": 1848
} | class ____(RequestHandler):
def prepare(self):
# For testing error handling of a redirect with no location header.
self.set_status(301)
self.finish()
| RedirectWithoutLocationHandler |
python | bokeh__bokeh | src/bokeh/models/callbacks.py | {
"start": 8452,
"end": 9038
} | class ____(Callback):
""" Open a dialog box. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
dialog = Required(Instance(".models.ui.Dialog"), help="""
A dialog instance to open.
This will either build and display an new dialog view or re-open an
existing dialog view if it was hidden.
.. note::
To display multiple instances of dialog, one needs to clone the
dialog's model and use another instance of ``OpenDialog``.
""")
| OpenDialog |
python | django-haystack__django-haystack | test_haystack/spatial/models.py | {
"start": 48,
"end": 986
} | class ____(models.Model):
username = models.CharField(max_length=255)
# We're going to do some non-GeoDjango action, since the setup is
# complex enough. You could just as easily do:
#
# location = models.PointField()
#
# ...and your ``search_indexes.py`` could be less complex.
latitude = models.FloatField()
longitude = models.FloatField()
comment = models.CharField(
max_length=140, blank=True, default="", help_text="Say something pithy."
)
created = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ["-created"]
# Again, with GeoDjango, this would be unnecessary.
def get_location(self):
# Nothing special about this Point, but ensure that's we don't have to worry
# about import paths.
from django.contrib.gis.geos import Point
pnt = Point(self.longitude, self.latitude)
return pnt
| Checkin |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 10943,
"end": 15628
} | class ____(BaseParam[T]):
"""Filter on attribute."""
def __init__(
self,
attribute: InstrumentedAttribute,
value: T | None = None,
filter_option: FilterOptionEnum = FilterOptionEnum.EQUAL,
skip_none: bool = True,
) -> None:
super().__init__(value, skip_none)
self.attribute: InstrumentedAttribute = attribute
self.value: T | None = value
self.filter_option: FilterOptionEnum = filter_option
def to_orm(self, select: Select) -> Select:
if isinstance(self.value, (list, str)) and not self.value and self.skip_none:
return select
if self.value is None and self.skip_none:
return select
if isinstance(self.value, list):
if self.filter_option == FilterOptionEnum.IN:
return select.where(self.attribute.in_(self.value))
if self.filter_option == FilterOptionEnum.NOT_IN:
return select.where(self.attribute.notin_(self.value))
if self.filter_option == FilterOptionEnum.ANY_EQUAL:
conditions = [self.attribute == val for val in self.value]
return select.where(or_(*conditions))
if self.filter_option == FilterOptionEnum.ALL_EQUAL:
conditions = [self.attribute == val for val in self.value]
return select.where(and_(*conditions))
raise HTTPException(
400, f"Invalid filter option {self.filter_option} for list value {self.value}"
)
if self.filter_option == FilterOptionEnum.EQUAL:
return select.where(self.attribute == self.value)
if self.filter_option == FilterOptionEnum.NOT_EQUAL:
return select.where(self.attribute != self.value)
if self.filter_option == FilterOptionEnum.LESS_THAN:
return select.where(self.attribute < self.value)
if self.filter_option == FilterOptionEnum.LESS_THAN_EQUAL:
return select.where(self.attribute <= self.value)
if self.filter_option == FilterOptionEnum.GREATER_THAN:
return select.where(self.attribute > self.value)
if self.filter_option == FilterOptionEnum.GREATER_THAN_EQUAL:
return select.where(self.attribute >= self.value)
if self.filter_option == FilterOptionEnum.IS_NONE:
if self.value is None:
return select
if self.value is False:
return select.where(self.attribute.is_not(None))
if self.value is True:
return select.where(self.attribute.is_(None))
if self.filter_option == FilterOptionEnum.CONTAINS:
# For JSON/JSONB columns, convert to text before applying LIKE
from sqlalchemy import Text, cast
if str(self.attribute.type).upper() in ("JSON", "JSONB"):
return select.where(cast(self.attribute, Text).contains(self.value))
return select.where(self.attribute.contains(self.value))
raise ValueError(f"Invalid filter option {self.filter_option} for value {self.value}")
@classmethod
def depends(cls, *args: Any, **kwargs: Any) -> Self:
raise NotImplementedError("Use filter_param_factory instead , depends is not implemented.")
def filter_param_factory(
attribute: ColumnElement | InstrumentedAttribute,
_type: type,
filter_option: FilterOptionEnum = FilterOptionEnum.EQUAL,
filter_name: str | None = None,
default_value: T | None = None,
default_factory: Callable[[], T | None] | None = None,
skip_none: bool = True,
transform_callable: Callable[[T | None], Any] | None = None,
*,
description: str | None = None,
) -> Callable[[T | None], FilterParam[T | None]]:
# if filter_name is not provided, use the attribute name as the default
filter_name = filter_name or getattr(attribute, "name", str(attribute))
# can only set either default_value or default_factory
query = (
Query(alias=filter_name, default_factory=default_factory, description=description)
if default_factory is not None
else Query(alias=filter_name, default=default_value, description=description)
)
def depends_filter(value: T | None = query) -> FilterParam[T | None]:
if transform_callable:
value = transform_callable(value)
# Cast to InstrumentedAttribute for type compatibility
attr = cast("InstrumentedAttribute", attribute)
return FilterParam(attr, value, filter_option, skip_none)
# add type hint to value at runtime
depends_filter.__annotations__["value"] = _type
return depends_filter
| FilterParam |
python | allegroai__clearml | clearml/utilities/dicts.py | {
"start": 897,
"end": 1714
} | class ____(dict):
"""
Overloading getitem so that the 'data' copy is only done when the dictionary item is accessed.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(BlobsDict, self).__init__(*args, **kwargs)
def __getitem__(self, k: Any) -> Any:
val = super(BlobsDict, self).__getitem__(k)
if isinstance(val, dict):
return BlobsDict(val)
# We need to ask isinstance without actually importing blob here
# so we accept that in order to appreciate beauty in life we must have a dash of ugliness.
# ans instead of -
# elif isinstance(val, Blob):
# we ask:
elif hasattr(val, "__class__") and val.__class__.__name__ == "Blob":
return val.data
else:
return val
| BlobsDict |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/descriptor1.py | {
"start": 212,
"end": 1281
} | class ____:
@property
def prop1(self) -> int | None: ...
@prop1.setter
def prop1(self, val: int | None) -> None: ...
@property
def prop2(self) -> int | None: ...
@prop2.setter
def prop2(self, val: int) -> None: ...
@prop2.deleter
def prop2(self) -> None: ...
@property
def prop3(self) -> int: ...
@prop3.setter
def prop3(self, val: int | None) -> None: ...
@prop3.deleter
def prop3(self) -> None: ...
def func1(obj: A) -> Literal[3]:
obj.prop1 = None
b: None = obj.prop1
obj.prop1 = 3
v1 = obj.prop1 + 1
return obj.prop1
def func2(obj: A) -> Literal[3]:
obj.prop2 = 3
# This should generate an error because prop2 isn't
# narrowed in this case.
b: int = obj.prop2
# This should generate an error because prop2 isn't
# narrowed in this case.
return obj.prop2
def func3(obj: A) -> Literal[3]:
obj.prop3 = 3
b: int = obj.prop3
# This should generate an error because prop2 isn't
# narrowed in this case.
return obj.prop3
| A |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_glue.py | {
"start": 1170,
"end": 2170
} | class ____(BaseAwsLinksTestCase):
link_class = GlueJobRunDetailsLink
def test_extra_link(self, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "ap-southeast-2",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"job_run_id": "11111",
"job_name": "test_job_name",
},
)
self.assert_extra_link_url(
expected_url=(
"https://console.aws.amazon.com/gluestudio/home"
"?region=ap-southeast-2#/job/test_job_name/run/11111"
),
region_name="ap-southeast-2",
aws_partition="aws",
job_run_id="11111",
job_name="test_job_name",
)
| TestGlueJobRunDetailsLink |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 35194,
"end": 36576
} | class ____(test.TestCase):
def _validateInTopK(self, predictions, target, k, expected):
np_ans = np.array(expected, np.bool)
with self.cached_session(use_gpu=True) as _:
output = nn_ops.in_top_k(predictions, target, k)
nn_ans = self.evaluate(output)
self.assertAllEqual(np_ans, nn_ans)
self.assertShapeEqual(np_ans, output)
def testInTop1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [3, 2]
self._validateInTopK(predictions, target, 1, [True, False])
def testInTop2(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [2, 2]
self._validateInTopK(predictions, target, 2, [False, True])
def testInTop2Tie(self):
# Class 2 and 3 tie for 2nd, so both are considered in top 2.
predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
target = [2, 3]
self._validateInTopK(predictions, target, 2, [True, True])
def testInTop2_int64Target(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = numpy_compat.np_asarray([0, 2]).astype(np.int64)
self._validateInTopK(predictions, target, 2, [False, True])
def testTensorK(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
target = [0, 2]
k = constant_op.constant(3)
self._validateInTopK(predictions, target, k, [False, True])
| InTopKTest |
python | pytorch__pytorch | test/distributed/tensor/test_dtensor_dispatch_overhead.py | {
"start": 2019,
"end": 5346
} | class ____(DTensorTestBase):
@property
def world_size(self) -> int:
return 4
@skip_if_lt_x_gpu(4)
@with_comms
def test_dtensor_add_op_dispatch_overhead(self):
if torch.cuda.is_available():
device_props = torch.cuda.get_device_name(0)
gpu_name = device_props
logger.info("running on %s", gpu_name)
# TODO: adjust `expected_propagate_time` and `expected_dispatch_time` to target different hardware
else:
self.skipTest("CUDA not available")
expected_propagate_time = 880 # noqa: F841
expected_dispatch_time = 90 # noqa: F841
diff_percent_threshold = 0.20 # noqa: F841
propagator = DTensor._op_dispatcher.sharding_propagator
device_mesh = init_device_mesh("cuda", (self.world_size,))
input_data = torch.rand(512, 512, device="cuda")
a = distribute_tensor(input_data, device_mesh, [Shard(0)])
# warm up
with TimeCaptureMode() as tcm:
for _ in range(100):
propagator.propagate_op_sharding.cache.cache_clear()
_ = a + a
# record number
propagator.propagate_op_sharding.cache.cache_clear()
_ = a + a
add_dispatch_cache_miss, add_dispatch_cache_hit = tcm.op_to_time[
"add.Tensor"
][-1]
all_miss_performance = [0] * self.world_size
all_hit_performance = [0] * self.world_size
torch.distributed.all_gather_object(
all_miss_performance, add_dispatch_cache_miss
)
torch.distributed.all_gather_object(all_hit_performance, add_dispatch_cache_hit)
if self.rank == 0:
logger.info(
"add op dispatch cache miss from %s ranks: %s us, \n"
"add op dispatch cache hit from %s ranks: %s us",
self.world_size,
all_miss_performance,
self.world_size,
all_hit_performance,
)
# compare median with expected range
miss_performance = statistics.median(all_miss_performance)
hit_performance = statistics.median(all_hit_performance)
extra_time_spend_on_strategy_propagate = miss_performance - hit_performance # noqa: F841
# Do not enabling the assertion check due to flaky performance concern
# self.assertTrue(
# (extra_time_spend_on_strategy_propagate - expected_propagate_time)
# / expected_propagate_time
# < diff_percent_threshold,
# msg=(
# f"extra time spend on strategy propagate is {extra_time_spend_on_strategy_propagate} us, "
# f"performance diff is {diff_percent_threshold * 100}% greater than expected {expected_propagate_time} us"
# ),
# )
# self.assertTrue(
# (hit_performance - expected_dispatch_time) / expected_dispatch_time
# < diff_percent_threshold,
# msg=(
# f"DTensor dispatch time is {hit_performance} us, "
# f"performance diff is {diff_percent_threshold * 100}% greater than "
# f"expected {expected_dispatch_time} us"
# ),
# )
if __name__ == "__main__":
run_tests()
| DistOpDispatchOverHead |
python | tiangolo__fastapi | tests/test_response_model_sub_types.py | {
"start": 128,
"end": 5379
} | class ____(BaseModel):
name: str
app = FastAPI()
@app.get("/valid1", responses={"500": {"model": int}})
def valid1():
pass
@app.get("/valid2", responses={"500": {"model": List[int]}})
def valid2():
pass
@app.get("/valid3", responses={"500": {"model": Model}})
def valid3():
pass
@app.get("/valid4", responses={"500": {"model": List[Model]}})
def valid4():
pass
client = TestClient(app)
def test_path_operations():
response = client.get("/valid1")
assert response.status_code == 200, response.text
response = client.get("/valid2")
assert response.status_code == 200, response.text
response = client.get("/valid3")
assert response.status_code == 200, response.text
response = client.get("/valid4")
assert response.status_code == 200, response.text
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/valid1": {
"get": {
"summary": "Valid1",
"operationId": "valid1_valid1_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"500": {
"description": "Internal Server Error",
"content": {
"application/json": {
"schema": {
"title": "Response 500 Valid1 Valid1 Get",
"type": "integer",
}
}
},
},
},
}
},
"/valid2": {
"get": {
"summary": "Valid2",
"operationId": "valid2_valid2_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"500": {
"description": "Internal Server Error",
"content": {
"application/json": {
"schema": {
"title": "Response 500 Valid2 Valid2 Get",
"type": "array",
"items": {"type": "integer"},
}
}
},
},
},
}
},
"/valid3": {
"get": {
"summary": "Valid3",
"operationId": "valid3_valid3_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"500": {
"description": "Internal Server Error",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Model"}
}
},
},
},
}
},
"/valid4": {
"get": {
"summary": "Valid4",
"operationId": "valid4_valid4_get",
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"500": {
"description": "Internal Server Error",
"content": {
"application/json": {
"schema": {
"title": "Response 500 Valid4 Valid4 Get",
"type": "array",
"items": {"$ref": "#/components/schemas/Model"},
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Model": {
"title": "Model",
"required": ["name"],
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
}
},
}
| Model |
python | numba__numba | numba/tests/test_conditions_as_predicates.py | {
"start": 142,
"end": 5275
} | class ____(TestCase):
def test_scalars(self):
# checks that scalar types can be used as predicates
dts = [np.int8, np.uint16, np.int64, np.float32, np.float64,
np.complex128, int, float, complex, str, bool]
for dt in dts:
for c in 1, 0:
x = dt(c)
@njit
def foo():
if x:
return 10
else:
return 20
self.assertEqual(foo(), foo.py_func())
self.assertEqual(foo(), 10 if c == 1 or dt is str else 20)
# empty string
@njit
def foo(x):
if x:
return 10
else:
return 20
s = ""
self.assertEqual(foo(s), foo.py_func(s))
def test_typed_list(self):
@njit
def foo(x):
if x:
return 10
else:
return 20
# empty list
z = List.empty_list(types.int64)
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 20)
# non-empty list
z.append(1)
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 10)
def test_reflected_list(self):
# non-empty
@njit
def foo(x):
if x:
return 10
else:
return 20
z = [1]
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 10)
# non-empty local
@njit
def foo():
y = [1, 2]
if y:
return 10
else:
return 20
self.assertEqual(foo(), foo.py_func())
self.assertEqual(foo.py_func(), 10)
# empty local
@njit
def foo():
y = [1, 2]
y.pop()
y.pop()
assert len(y) == 0
if y:
return 10
else:
return 20
self.assertEqual(foo(), foo.py_func())
self.assertEqual(foo.py_func(), 20)
def test_reflected_set(self):
# non-empty
@njit
def foo(x):
if x:
return 10
else:
return 20
z = {1}
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 10)
# non-empty local
@njit
def foo():
y = {1, 2}
if y:
return 10
else:
return 20
self.assertEqual(foo(), foo.py_func())
self.assertEqual(foo.py_func(), 10)
# empty local
@njit
def foo():
y = {1, 2}
y.pop()
y.pop()
assert len(y) == 0
if y:
return 10
else:
return 20
self.assertEqual(foo(), foo.py_func())
self.assertEqual(foo.py_func(), 20)
def test_typed_dict(self):
@njit
def foo(x):
if x:
return 10
else:
return 20
# empty
z = Dict.empty(types.int64, types.int64)
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 20)
# non-empty
z[2] = 3
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 10)
def test_arrays(self):
@njit
def foo(x):
if x:
return 10
else:
return 20
# non-empty 0d, True
z = np.array(1)
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 10)
# non-empty 0d, False
z = np.array(0)
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 20)
# non-empty nd True
z = np.array([[[1]]])
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 10)
# non-empty nd False
z = np.array([[[0]]])
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 20)
# various problems:
# empty, NumPy warns or raises if NumPy >= 2.2
z = np.empty(0)
if numpy_support.numpy_version >= (2, 2):
with self.assertRaises(ValueError) as raises:
foo(z)
msg = ("The truth value of an empty array is ambiguous."
" Use `array.size > 0` to check that an array is not empty.")
self.assertIn(msg, str(raises.exception))
else:
self.assertEqual(foo(z), foo.py_func(z))
self.assertEqual(foo.py_func(z), 20)
# nd, NumPy raises
z = np.array([1, 2])
with self.assertRaises(ValueError) as raises:
foo(z)
msg = ("The truth value of an array with more than one element "
"is ambiguous. Use a.any() or a.all()")
self.assertIn(msg, str(raises.exception))
| TestConditionsAsPredicates |
python | Netflix__metaflow | metaflow/plugins/airflow/airflow_decorator.py | {
"start": 516,
"end": 1781
} | class ____(StepDecorator):
name = "airflow_internal"
def task_pre_step(
self,
step_name,
task_datastore,
metadata,
run_id,
task_id,
flow,
graph,
retry_count,
max_user_code_retries,
ubf_context,
inputs,
):
meta = {}
meta["airflow-dag-run-id"] = os.environ["METAFLOW_AIRFLOW_DAG_RUN_ID"]
meta["airflow-job-id"] = os.environ["METAFLOW_AIRFLOW_JOB_ID"]
entries = [
MetaDatum(
field=k, value=v, type=k, tags=["attempt_id:{0}".format(retry_count)]
)
for k, v in meta.items()
]
# Register book-keeping metadata for debugging.
metadata.register_metadata(run_id, step_name, task_id, entries)
def task_finished(
self, step_name, flow, graph, is_task_ok, retry_count, max_user_code_retries
):
# This will pass the xcom when the task finishes.
xcom_values = {
TASK_ID_XCOM_KEY: os.environ["METAFLOW_AIRFLOW_TASK_ID"],
}
if graph[step_name].type == "foreach":
xcom_values[FOREACH_CARDINALITY_XCOM_KEY] = flow._foreach_num_splits
push_xcom_values(xcom_values)
| AirflowInternalDecorator |
python | ansible__ansible | test/units/module_utils/basic/test_exit_json.py | {
"start": 3933,
"end": 7480
} | class ____:
"""
Test that ExitJson and FailJson remove password-like values
"""
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
DATA = (
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
@pytest.mark.parametrize('am, stdin, return_val, expected',
(({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e)
for s, r, e in DATA),
indirect=['am', 'stdin'])
def test_exit_json_removes_values(self, am, capfd, return_val, expected):
with pytest.raises(SystemExit):
am.exit_json(**return_val)
out, err = capfd.readouterr()
assert json.loads(out) == expected
@pytest.mark.parametrize('am, stdin, return_val, expected',
(({'username': {}, 'password': {'no_log': True}, 'token': {'no_log': True}}, s, r, e)
for s, r, e in DATA),
indirect=['am', 'stdin'])
def test_fail_json_removes_values(self, am, capfd, return_val, expected):
expected['failed'] = True
with pytest.raises(SystemExit):
am.fail_json(**return_val)
out, err = capfd.readouterr()
assert json.loads(out) == expected
def test_record_module_result(self, mocker: pytest_mock.MockerFixture, stdin) -> None:
"""Ensure that the temporary _record_module_result hook is called correctly."""
recorded_result = None
expected_result = dict(changed=False, worked="yay")
def _record_module_result(_self, o: object) -> None:
assert isinstance(o, dict)
nonlocal recorded_result
recorded_result = o
from ansible.module_utils.basic import AnsibleModule
mocker.patch.object(AnsibleModule, '_record_module_result', _record_module_result)
am = AnsibleModule(argument_spec=dict())
with pytest.raises(SystemExit):
am.exit_json(**expected_result)
assert expected_result.items() <= recorded_result.items()
| TestAnsibleModuleExitValuesRemoved |
python | django-extensions__django-extensions | tests/templatetags/test_syntax_color.py | {
"start": 227,
"end": 3652
} | class ____(TestCase):
"""Tests for syntax_color tags."""
@classmethod
def setUpClass(cls):
cls.tmpdir = mkdtemp()
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir)
def test_should_generate_pygments_css_file_in_temp_directory(self):
generate_pygments_css(self.tmpdir)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, "pygments.css")))
def test_pygments_css_should_return_highlight_css(self):
content = """{% load syntax_color %}
{% pygments_css %}
"""
result = Template(content).render(Context())
self.assertIn(".highlight .hll", result)
def test_should_colorize_with_default_lexer(self):
ctx = Context({"code_string": "<h1>TEST</h1>"})
content = """{% load syntax_color %}
{{ code_string|colorize }}
"""
expected_result = """<div class="highlight"><pre><span></span><span class="nt"><h1></span>TEST<span class="nt"></h1></span>
</pre></div>"""
result = Template(content).render(ctx)
self.assertHTMLEqual(result, expected_result)
def test_colorize_should_return_value_if_lexer_class_not_found(self):
ctx = Context({"code_string": "<h1>TEST</h1>"})
content = """{% load syntax_color %}
{{ code_string|colorize:'invalid_lexer' }}
"""
expected_result = "<h1>TEST</h1>"
result = Template(content).render(ctx)
self.assertHTMLEqual(html.unescape(result), expected_result)
def test_should_colorize_table_with_default_lexer(self):
ctx = Context({"code_string": "<h1>TEST</h1>"})
content = """{% load syntax_color %}
{{ code_string|colorize_table }}
"""
result = Template(content).render(ctx)
self.assertIn('<table class="highlighttable">', result)
self.assertIn('<td class="linenos">', result)
self.assertIn(">1</", result)
self.assertIn(
'<span class="nt"><h1></span>TEST<span class="nt"></h1></span>',
result,
)
def test_colorize_table_should_return_value_if_lexer_class_not_found(self):
ctx = Context({"code_string": "<h1>TEST</h1>"})
content = """{% load syntax_color %}
{{ code_string|colorize_table:'invalid_lexer' }}
"""
expected_result = "<h1>TEST</h1>"
result = Template(content).render(ctx)
self.assertHTMLEqual(html.unescape(result), expected_result)
def test_should_colorize_noclasses_with_default_lexer(self):
ctx = Context({"code_string": "<h1>TEST</h1>"})
content = """{% load syntax_color %}
{{ code_string|colorize_noclasses }}
"""
expected_result = """<div class="highlight" style="background: #f8f8f8"><pre style="line-height: 125%;"><span></span><span style="color: #008000; font-weight: bold"><h1></span>TEST<span style="color: #008000; font-weight: bold"></h1></span>
</pre></div>"""
result = Template(content).render(ctx)
self.assertHTMLEqual(result, expected_result)
def test_colorize_noclasses_should_return_value_if_lexer_class_not_found(self):
ctx = Context({"code_string": "<h1>TEST</h1>"})
content = """{% load syntax_color %}
{{ code_string|colorize_noclasses:'invalid_lexer' }}
"""
expected_result = "<h1>TEST</h1>"
result = Template(content).render(ctx)
self.assertHTMLEqual(html.unescape(result), expected_result)
| SyntaxColorTagTests |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/quantization_test.py | {
"start": 988,
"end": 2075
} | class ____(op_bench.TorchBenchmarkBase):
r"""Benchmarks both quantization and dequantization."""
def init(self, C, M, N, dtype, mode):
assert mode in ("Q", "D")
self.input = torch.rand(C, M, N)
self.dtype = dtype
self.op = nnq.Quantize(scale=1.0, zero_point=0, dtype=dtype)
self.set_module_name("QuantizePerTensor")
if mode == "D":
self.input = self.op(self.input)
self.op = nnq.DeQuantize()
self.set_module_name("DequantizePerTensor")
self.inputs = {"input": self.input}
def forward(self, input):
return self.op(input)
op_bench.generate_pt_test(
quantize_per_tensor_configs_short + quantize_per_tensor_configs_long,
QuantizePerTensorBenchmark,
)
# === Per Channel quantization ===
quantize_per_channel_configs_short = op_bench.config_list(
cross_product_configs={"axis": (0,)}, **quantize_configs_short_dict
)
quantize_per_channel_configs_long = op_bench.cross_product_configs(
axis=(0, 1, 2), **quantize_configs_long_dict
)
| QuantizePerTensorBenchmark |
python | getsentry__sentry | src/sentry/users/services/user/model.py | {
"start": 540,
"end": 638
} | class ____(RpcModel):
id: int = 0
email: str = ""
is_verified: bool = False
| RpcUserEmail |
python | python-visualization__folium | folium/raster_layers.py | {
"start": 5597,
"end": 8143
} | class ____(Layer):
"""
Creates a Web Map Service (WMS) layer.
Parameters
----------
url : str
The url of the WMS server.
layers : str
Comma-separated list of WMS layers to show.
styles : str, optional
Comma-separated list of WMS styles.
fmt : str, default 'image/jpeg'
The format of the service output. Ex: 'image/png'
transparent: bool, default False
Whether the layer shall allow transparency.
version : str, default '1.1.1'
Version of the WMS service to use.
attr : str, default ''
The attribution of the service.
Will be displayed in the bottom right corner.
name : string, optional
The name of the Layer, as it will appear in LayerControls
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening.
**kwargs : additional keyword arguments
Passed through to the underlying tileLayer.wms object and can be used
for setting extra tileLayer.wms parameters or as extra parameters in
the WMS request.
See https://leafletjs.com/reference.html#tilelayer-wms
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.tileLayer.wms(
{{ this.url|tojson }},
{{ this.options|tojson }}
);
{% endmacro %}
"""
) # noqa
def __init__(
self,
url: str,
layers: str,
styles: str = "",
fmt: str = "image/jpeg",
transparent: bool = False,
version: str = "1.1.1",
attr: str = "",
name: Optional[str] = None,
overlay: bool = True,
control: bool = True,
show: bool = True,
**kwargs,
):
super().__init__(name=name, overlay=overlay, control=control, show=show)
self.url = url
kwargs["format"] = fmt
cql_filter = kwargs.pop("cql_filter", None)
self.options = parse_options(
layers=layers,
styles=styles,
transparent=transparent,
version=version,
attribution=attr,
**kwargs,
)
# special parameter that shouldn't be camelized
if cql_filter:
self.options["cql_filter"] = cql_filter
| WmsTileLayer |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 59425,
"end": 61945
} | class ____:
# sfx is sf(x). The values were computed with mpmath:
#
# from mpmath import mp
# mp.dps = 100
# def halfnorm_sf(x):
# return 2*(1 - mp.ncdf(x))
#
# E.g.
#
# >>> float(halfnorm_sf(1))
# 0.3173105078629141
#
@pytest.mark.parametrize('x, sfx', [(1, 0.3173105078629141),
(10, 1.523970604832105e-23)])
def test_sf_isf(self, x, sfx):
assert_allclose(stats.halfnorm.sf(x), sfx, rtol=1e-14)
assert_allclose(stats.halfnorm.isf(sfx), x, rtol=1e-14)
# reference values were computed via mpmath
# from mpmath import mp
# mp.dps = 100
# def halfnorm_cdf_mpmath(x):
# x = mp.mpf(x)
# return float(mp.erf(x/mp.sqrt(2.)))
@pytest.mark.parametrize('x, ref', [(1e-40, 7.978845608028653e-41),
(1e-18, 7.978845608028654e-19),
(8, 0.9999999999999988)])
def test_cdf(self, x, ref):
assert_allclose(stats.halfnorm.cdf(x), ref, rtol=1e-15)
@pytest.mark.parametrize("rvs_loc", [1e-5, 1e10])
@pytest.mark.parametrize("rvs_scale", [1e-2, 100, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale,
fix_loc, fix_scale):
rng = np.random.default_rng(6762668991392531563)
data = stats.halfnorm.rvs(loc=rvs_loc, scale=rvs_scale, size=1000,
random_state=rng)
if fix_loc and fix_scale:
error_msg = ("All parameters fixed. There is nothing to "
"optimize.")
with pytest.raises(RuntimeError, match=error_msg):
stats.halflogistic.fit(data, floc=rvs_loc, fscale=rvs_scale)
return
kwds = {}
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
# Numerical result may equal analytical result if the initial guess
# computed from moment condition is already optimal.
_assert_less_or_close_loglike(stats.halfnorm, data, **kwds,
maybe_identical=True)
def test_fit_error(self):
# `floc` bigger than the minimal data point
with pytest.raises(FitDataError):
stats.halfnorm.fit([1, 2, 3], floc=2)
| TestHalfNorm |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1587558,
"end": 1587740
} | class ____(sgqlc.types.Union):
"""Used for return value of Repository.issueOrPullRequest."""
__schema__ = github_schema
__types__ = (Issue, PullRequest)
| IssueOrPullRequest |
python | numba__numba | numba/core/types/containers.py | {
"start": 14754,
"end": 15956
} | class ____(Container):
"""
Type class for homogeneous sets.
"""
mutable = True
def __init__(self, dtype, reflected=False):
assert isinstance(dtype, (Hashable, Undefined))
self.dtype = dtype
self.reflected = reflected
cls_name = "reflected set" if reflected else "set"
name = "%s(%s)" % (cls_name, self.dtype)
super(Set, self).__init__(name=name)
@property
def key(self):
return self.dtype, self.reflected
@property
def iterator_type(self):
return SetIter(self)
def is_precise(self):
return self.dtype.is_precise()
def copy(self, dtype=None, reflected=None):
if dtype is None:
dtype = self.dtype
if reflected is None:
reflected = self.reflected
return Set(dtype, reflected)
def unify(self, typingctx, other):
if isinstance(other, Set):
dtype = typingctx.unify_pairs(self.dtype, other.dtype)
reflected = self.reflected or other.reflected
if dtype is not None:
return Set(dtype, reflected)
def __repr__(self):
return f"Set({self.dtype}, {self.reflected})"
| Set |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 173115,
"end": 173626
} | class ____(ZarrBase):
@contextlib.contextmanager
def create_zarr_target(self):
# TODO the zarr version would need to be >3.08 for the supports_consolidated_metadata property to have any effect
yield NoConsolidatedMetadataSupportStore(
zarr.storage.MemoryStore({}, read_only=False)
)
@requires_zarr
@pytest.mark.skipif(
ON_WINDOWS,
reason="Very flaky on Windows CI. Can re-enable assuming it starts consistently passing.",
)
| TestZarrNoConsolidatedMetadataSupport |
python | google__jax | tests/pallas/fusion_test.py | {
"start": 8419,
"end": 8507
} | class ____:
x0: jax.Array
x1: jax.Array
@dataclasses.dataclass(frozen=True)
| ArrayTuple |
python | numba__llvmlite | llvmlite/ir/types.py | {
"start": 10737,
"end": 11008
} | class ____(_BaseFloatType):
"""
The type for single-precision floats.
"""
null = '0.0'
intrinsic_name = 'f32'
def __str__(self):
return 'float'
def format_constant(self, value):
return _format_double(_as_float(value))
| FloatType |
python | pandas-dev__pandas | pandas/core/computation/expr.py | {
"start": 23476,
"end": 23893
} | class ____(BaseExprVisitor):
def __init__(
self,
env,
engine,
parser,
preparser=partial(
_preparse,
f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks),
),
) -> None:
super().__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(["Not"]))
| PandasExprVisitor |
python | sqlalchemy__sqlalchemy | test/sql/test_labels.py | {
"start": 16901,
"end": 27183
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "DefaultDialect"
table1 = table(
"some_large_named_table",
column("this_is_the_primarykey_column"),
column("this_is_the_data_column"),
)
table2 = table(
"table_with_exactly_29_characs",
column("this_is_the_primarykey_column"),
column("this_is_the_data_column"),
)
def test_adjustable_1(self):
table1 = self.table1
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias("foo")
)
x = select(q)
compile_dialect = default.DefaultDialect(label_length=10)
self.assert_compile(
x,
"SELECT "
"foo.this_1, foo.this_2 "
"FROM ("
"SELECT "
"some_large_named_table.this_is_the_primarykey_column "
"AS this_1, "
"some_large_named_table.this_is_the_data_column "
"AS this_2 "
"FROM "
"some_large_named_table "
"WHERE "
"some_large_named_table.this_is_the_primarykey_column "
"= :this_1"
") "
"AS foo",
dialect=compile_dialect,
)
def test_adjustable_2(self):
table1 = self.table1
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias("foo")
)
x = select(q)
compile_dialect = default.DefaultDialect(label_length=10)
self.assert_compile(
x,
"SELECT "
"foo.this_1, foo.this_2 "
"FROM ("
"SELECT "
"some_large_named_table.this_is_the_primarykey_column "
"AS this_1, "
"some_large_named_table.this_is_the_data_column "
"AS this_2 "
"FROM "
"some_large_named_table "
"WHERE "
"some_large_named_table.this_is_the_primarykey_column "
"= :this_1"
") "
"AS foo",
dialect=compile_dialect,
)
def test_adjustable_3(self):
table1 = self.table1
compile_dialect = default.DefaultDialect(label_length=4)
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias("foo")
)
x = select(q)
self.assert_compile(
x,
"SELECT "
"foo._1, foo._2 "
"FROM ("
"SELECT "
"some_large_named_table.this_is_the_primarykey_column "
"AS _1, "
"some_large_named_table.this_is_the_data_column "
"AS _2 "
"FROM "
"some_large_named_table "
"WHERE "
"some_large_named_table.this_is_the_primarykey_column "
"= :_1"
") "
"AS foo",
dialect=compile_dialect,
)
def test_adjustable_4(self):
table1 = self.table1
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias()
)
x = select(q).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
compile_dialect = default.DefaultDialect(label_length=10)
self.assert_compile(
x,
"SELECT "
"anon_1.this_2 AS anon_1, "
"anon_1.this_4 AS anon_3 "
"FROM ("
"SELECT "
"some_large_named_table.this_is_the_primarykey_column "
"AS this_2, "
"some_large_named_table.this_is_the_data_column "
"AS this_4 "
"FROM "
"some_large_named_table "
"WHERE "
"some_large_named_table.this_is_the_primarykey_column "
"= :this_1"
") "
"AS anon_1",
dialect=compile_dialect,
)
def test_adjustable_5(self):
table1 = self.table1
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias()
)
x = select(q).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
compile_dialect = default.DefaultDialect(label_length=4)
self.assert_compile(
x,
"SELECT "
"_1._2 AS _1, "
"_1._4 AS _3 "
"FROM ("
"SELECT "
"some_large_named_table.this_is_the_primarykey_column "
"AS _2, "
"some_large_named_table.this_is_the_data_column "
"AS _4 "
"FROM "
"some_large_named_table "
"WHERE "
"some_large_named_table.this_is_the_primarykey_column "
"= :_1"
") "
"AS _1",
dialect=compile_dialect,
)
def test_adjustable_result_schema_column_1(self):
table1 = self.table1
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("foo")
)
dialect = default.DefaultDialect(label_length=10)
compiled = q.compile(dialect=dialect)
assert set(compiled._create_result_map()["some_2"][1]).issuperset(
[
table1.c.this_is_the_data_column,
"some_large_named_table_this_is_the_data_column",
"some_2",
]
)
assert set(compiled._create_result_map()["some_1"][1]).issuperset(
[
table1.c.this_is_the_primarykey_column,
"some_large_named_table_this_is_the_primarykey_column",
"some_1",
]
)
def test_adjustable_result_schema_column_2(self):
table1 = self.table1
q = (
table1.select()
.where(table1.c.this_is_the_primarykey_column == 4)
.alias("foo")
)
x = select(q)
dialect = default.DefaultDialect(label_length=10)
compiled = x.compile(dialect=dialect)
assert set(compiled._create_result_map()["this_2"][1]).issuperset(
[
q.corresponding_column(table1.c.this_is_the_data_column),
"this_is_the_data_column",
"this_2",
]
)
assert set(compiled._create_result_map()["this_1"][1]).issuperset(
[
q.corresponding_column(table1.c.this_is_the_primarykey_column),
"this_is_the_primarykey_column",
"this_1",
]
)
def test_table_plus_column_exceeds_length(self):
"""test that the truncation only occurs when tablename + colname are
concatenated, if they are individually under the label length.
"""
compile_dialect = default.DefaultDialect(label_length=30)
a_table = table("thirty_characters_table_xxxxxx", column("id"))
other_table = table(
"other_thirty_characters_table_",
column("id"),
column("thirty_characters_table_id"),
)
anon = a_table.alias()
j1 = other_table.outerjoin(
anon, anon.c.id == other_table.c.thirty_characters_table_id
)
self.assert_compile(
select(other_table, anon)
.select_from(j1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT "
"other_thirty_characters_table_.id "
"AS other_thirty_characters__1, "
"other_thirty_characters_table_.thirty_characters_table_id "
"AS other_thirty_characters__2, "
"thirty_characters_table__1.id "
"AS thirty_characters_table__3 "
"FROM "
"other_thirty_characters_table_ "
"LEFT OUTER JOIN "
"thirty_characters_table_xxxxxx AS thirty_characters_table__1 "
"ON thirty_characters_table__1.id = "
"other_thirty_characters_table_.thirty_characters_table_id",
dialect=compile_dialect,
)
def test_colnames_longer_than_labels_lowercase(self):
t1 = table("a", column("abcde"))
self._test_colnames_longer_than_labels(t1)
def test_colnames_longer_than_labels_uppercase(self):
m = MetaData()
t1 = Table("a", m, Column("abcde", Integer))
self._test_colnames_longer_than_labels(t1)
def _test_colnames_longer_than_labels(self, t1):
dialect = default.DefaultDialect(label_length=4)
a1 = t1.alias(name="asdf")
# 'abcde' is longer than 4, but rendered as itself
# needs to have all characters
s = select(a1)
self.assert_compile(
select(a1), "SELECT asdf.abcde FROM a AS asdf", dialect=dialect
)
compiled = s.compile(dialect=dialect)
assert set(compiled._create_result_map()["abcde"][1]).issuperset(
["abcde", a1.c.abcde, "abcde"]
)
# column still there, but short label
s = select(a1).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
s, "SELECT asdf.abcde AS _1 FROM a AS asdf", dialect=dialect
)
compiled = s.compile(dialect=dialect)
assert set(compiled._create_result_map()["_1"][1]).issuperset(
["asdf_abcde", a1.c.abcde, "_1"]
)
def test_label_overlap_unlabeled(self):
"""test that an anon col can't overlap with a fixed name, #3396"""
table1 = table(
"tablename", column("columnname_one"), column("columnn_1")
)
stmt = select(table1).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
dialect = default.DefaultDialect(label_length=23)
self.assert_compile(
stmt,
"SELECT tablename.columnname_one AS tablename_columnn_1, "
"tablename.columnn_1 AS tablename_columnn_2 FROM tablename",
dialect=dialect,
)
compiled = stmt.compile(dialect=dialect)
eq_(
set(compiled._create_result_map()),
{"tablename_columnn_1", "tablename_columnn_2"},
)
| LabelLengthTest |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_process_event.py | {
"start": 2470,
"end": 10792
} | class ____(TestCase):
# We may only want to change this for TestTaskBehavior when we add support
# for other providers
provider = "github"
domain_name = "github.com"
def setUp(self) -> None:
self.integration = self.create_integration(
organization=self.organization,
provider=self.provider,
external_id=self.organization.id,
metadata={"domain_name": f"{self.domain_name}/test-org"},
)
def create_event(self, frames: Sequence[Mapping[str, str | bool]], platform: str) -> GroupEvent:
"""Helper function to prevent creating an event without a platform."""
test_data = {"platform": platform, "stacktrace": {"frames": frames}}
# XXX: In the future fix store_event to return the correct type
return cast(GroupEvent, self.store_event(data=test_data, project_id=self.project.id))
def create_repo_and_code_mapping(
self,
repo_name: str,
stack_root: str,
source_root: str,
automatically_generated: bool = False,
default_branch: str = "master",
) -> None:
with assume_test_silo_mode_of(OrganizationIntegration):
organization_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration=self.integration
)
repository = Repository.objects.create(
name=repo_name,
organization_id=self.organization.id,
integration_id=self.integration.id,
)
RepositoryProjectPathConfig.objects.create(
project_id=self.project.id,
stack_root=stack_root,
source_root=source_root,
default_branch=default_branch,
repository=repository,
organization_integration_id=organization_integration.id,
integration_id=organization_integration.integration_id,
organization_id=organization_integration.organization_id,
automatically_generated=automatically_generated,
)
def _process_and_assert_configuration_changes(
self,
*, # Force keyword arguments
repo_trees: Mapping[str, Sequence[str]],
frames: Sequence[Mapping[str, str | bool | Any]],
platform: str,
expected_new_code_mappings: Sequence[ExpectedCodeMapping] | None = None,
expected_new_in_app_stack_trace_rules: list[str] | None = None,
) -> GroupEvent:
platform_config = PlatformConfig(platform)
dry_run = platform_config.is_dry_run_platform(self.organization)
tags = {"dry_run": dry_run, "platform": platform}
with (
patch(f"{CLIENT}.get_tree", side_effect=create_mock_get_tree(repo_trees)),
patch(f"{CLIENT}.get_remaining_api_requests", return_value=500),
patch(
f"{REPO_TREES_INTEGRATION}._populate_repositories",
return_value=mock_populate_repositories(),
),
patch("sentry.utils.metrics.incr") as mock_incr,
):
starting_enhancements = self.project.get_option(DERIVED_ENHANCEMENTS_OPTION_KEY)
starting_repositories_count = Repository.objects.all().count()
starting_code_mappings_count = RepositoryProjectPathConfig.objects.all().count()
event = self.create_event(frames, platform)
code_mappings, in_app_stack_trace_rules = process_event(
self.project.id, event.group_id, event.event_id
)
current_code_mappings = RepositoryProjectPathConfig.objects.all()
current_repositories = Repository.objects.all()
current_enhancements = self.project.get_option(DERIVED_ENHANCEMENTS_OPTION_KEY)
if dry_run:
# If dry run, no configurations should have been created
assert starting_code_mappings_count == current_code_mappings.count()
assert starting_repositories_count == current_repositories.count()
assert current_enhancements == starting_enhancements
if expected_new_code_mappings:
assert len(code_mappings) == len(expected_new_code_mappings)
for cm, expected_cm in zip(code_mappings, expected_new_code_mappings):
assert cm.stacktrace_root == expected_cm["stack_root"]
assert cm.source_path == expected_cm["source_root"]
assert cm.repo.name == expected_cm["repo_name"]
if expected_new_in_app_stack_trace_rules:
assert sorted(in_app_stack_trace_rules) == sorted(
expected_new_in_app_stack_trace_rules
)
assert (
"\n".join(expected_new_in_app_stack_trace_rules) not in current_enhancements
)
mock_incr.assert_any_call(
key=f"{METRIC_PREFIX}.in_app_stack_trace_rules.created",
amount=len(expected_new_in_app_stack_trace_rules),
tags=tags,
sample_rate=1.0,
)
else:
if expected_new_code_mappings:
assert current_code_mappings.count() == starting_code_mappings_count + len(
expected_new_code_mappings
)
for expected_cm in expected_new_code_mappings:
code_mapping = current_code_mappings.get(
project_id=self.project.id, stack_root=expected_cm["stack_root"]
)
assert code_mapping is not None
assert code_mapping.source_root == expected_cm["source_root"]
assert code_mapping.repository.name == expected_cm["repo_name"]
else:
assert current_code_mappings.count() == starting_code_mappings_count
if expected_new_in_app_stack_trace_rules:
rules = (
starting_enhancements.split("\n") + expected_new_in_app_stack_trace_rules
if starting_enhancements
else expected_new_in_app_stack_trace_rules
)
assert current_enhancements == "\n".join(sorted(rules))
mock_incr.assert_any_call(
key=f"{METRIC_PREFIX}.in_app_stack_trace_rules.created",
amount=len(expected_new_in_app_stack_trace_rules),
tags=tags,
sample_rate=1.0,
)
else:
assert current_enhancements == starting_enhancements
if (current_repositories.count() > starting_repositories_count) or dry_run:
mock_incr.assert_any_call(
key=f"{METRIC_PREFIX}.repository.created", tags=tags, sample_rate=1.0
)
if (current_code_mappings.count() > starting_code_mappings_count) or dry_run:
mock_incr.assert_any_call(
key=f"{METRIC_PREFIX}.code_mapping.created", tags=tags, sample_rate=1.0
)
# Returning this to inspect in tests
return event
def frame(
self,
filename: str,
in_app: bool = True,
) -> dict[str, str | bool]:
return {"filename": filename, "in_app": in_app}
def frame_from_module(
self,
module: str,
abs_path: str,
in_app: bool = False,
) -> dict[str, str | bool | Any]:
frame: dict[str, str | bool | Any] = {}
if module:
frame["module"] = module
if abs_path:
frame["abs_path"] = abs_path
if in_app and in_app is not None:
frame["in_app"] = in_app
return frame
def code_mapping(
self,
stack_root: str,
source_root: str,
repo_name: str = REPO1,
) -> ExpectedCodeMapping:
return {"stack_root": stack_root, "source_root": source_root, "repo_name": repo_name}
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
| BaseDeriveCodeMappings |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 3389,
"end": 4244
} | class ____(ConstraintViolationException):
"""Indicates that a column constraint has been violated."""
def __init__(self, constraint_name, constraint_description, column_name, offending_rows=None):
self.constraint_name = constraint_name
self.constraint_description = constraint_description
self.column_name = column_name
self.offending_rows = offending_rows
super().__init__(self.construct_message())
def construct_message(self):
base_message = f'Violated "{self.constraint_name}" for column "{self.column_name}" - {self.constraint_description}'
if self.offending_rows is not None:
base_message += (
f"The offending (index, row values) are the following: {self.offending_rows}"
)
return base_message
@beta
| ColumnConstraintViolationException |
python | django__django | django/utils/autoreload.py | {
"start": 13343,
"end": 14719
} | class ____(BaseReloader):
SLEEP_TIME = 1 # Check for changes once per second.
def tick(self):
mtimes = {}
while True:
for filepath, mtime in self.snapshot_files():
old_time = mtimes.get(filepath)
mtimes[filepath] = mtime
if old_time is None:
logger.debug("File %s first seen with mtime %s", filepath, mtime)
continue
elif mtime > old_time:
logger.debug(
"File %s previous mtime: %s, current mtime: %s",
filepath,
old_time,
mtime,
)
self.notify_file_changed(filepath)
time.sleep(self.SLEEP_TIME)
yield
def snapshot_files(self):
# watched_files may produce duplicate paths if globs overlap.
seen_files = set()
for file in self.watched_files():
if file in seen_files:
continue
try:
mtime = file.stat().st_mtime
except OSError:
# This is thrown when the file does not exist.
continue
seen_files.add(file)
yield file, mtime
@classmethod
def check_availability(cls):
return True
| StatReloader |
python | davidhalter__jedi | test/completion/decorators.py | {
"start": 2762,
"end": 3032
} | class ____:
@not_found_decorator2
def a(self):
return 1
#? ['__call__']
JustAClass().a.__call__
#? int()
JustAClass().a()
#? ['__call__']
JustAClass.a.__call__
#? int()
JustAClass.a()
# -----------------
# illegal decorators
# -----------------
| JustAClass |
python | django__django | django/db/models/functions/window.py | {
"start": 1567,
"end": 2204
} | class ____(Func):
function = "NTH_VALUE"
window_compatible = True
def __init__(self, expression, nth=1, **extra):
if expression is None:
raise ValueError(
"%s requires a non-null source expression." % self.__class__.__name__
)
if nth is None or nth <= 0:
raise ValueError(
"%s requires a positive integer as for nth." % self.__class__.__name__
)
super().__init__(expression, nth, **extra)
def _resolve_output_field(self):
sources = self.get_source_expressions()
return sources[0].output_field
| NthValue |
python | google__pytype | pytype/rewrite/abstract/functions_test.py | {
"start": 5375,
"end": 6407
} | class ____(test_utils.ContextfulTestBase):
def test_call(self):
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=_get_const('def f(self): ...'),
enclosing_scope=(), parent_frame=FakeFrame(self.ctx))
callself = self.ctx.consts[42]
bound_f = f.bind_to(callself)
frame = bound_f.call(functions.Args())
assert_type(frame, FakeFrame)
argdict = frame.child_frames[0][1]
self.assertEqual(argdict, {'self': callself.to_variable()})
def test_analyze(self):
f = functions.InterpreterFunction(
ctx=self.ctx, name='f', code=_get_const('def f(self): ...'),
enclosing_scope=(), parent_frame=FakeFrame(self.ctx))
callself = self.ctx.consts[42]
bound_f = f.bind_to(callself)
frames = bound_f.analyze()
assert_type(frames, Sequence[FakeFrame])
self.assertEqual(len(frames), 1)
argdict = frames[0].child_frames[0][1]
self.assertEqual(argdict, {'self': callself.to_variable()})
if __name__ == '__main__':
unittest.main()
| BoundFunctionTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.