language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 61676,
"end": 61830
} | class ____(_PrintableStructure):
_fields_ = [
('referenceTime', c_ulonglong),
('violationTime', c_ulonglong),
]
| c_nvmlViolationTime_t |
python | sqlalchemy__sqlalchemy | test/orm/test_assorted_eager.py | {
"start": 26123,
"end": 29643
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"companies",
metadata,
Column(
"company_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_name", String(40)),
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_id", Integer, ForeignKey("companies.company_id")),
Column("address", String(40)),
)
Table(
"phone_numbers",
metadata,
Column(
"phone_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("address_id", Integer, ForeignKey("addresses.address_id")),
Column("type", String(20)),
Column("number", String(10)),
)
Table(
"invoices",
metadata,
Column(
"invoice_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_id", Integer, ForeignKey("companies.company_id")),
Column("date", sa.DateTime),
)
@classmethod
def setup_classes(cls):
class Company(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Phone(cls.Comparable):
pass
class Invoice(cls.Comparable):
pass
def test_load_m2o_attached_to_o2(self):
"""
Tests eager load of a many-to-one attached to a one-to-many. this
testcase illustrated the bug, which is that when the single Company is
loaded, no further processing of the rows occurred in order to load
the Company's second Address object.
"""
addresses, invoices, Company, companies, Invoice, Address = (
self.tables.addresses,
self.tables.invoices,
self.classes.Company,
self.tables.companies,
self.classes.Invoice,
self.classes.Address,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Company,
companies,
properties={"addresses": relationship(Address, lazy="joined")},
)
self.mapper_registry.map_imperatively(
Invoice,
invoices,
properties={"company": relationship(Company, lazy="joined")},
)
a1 = Address(address="a1 address")
a2 = Address(address="a2 address")
c1 = Company(company_name="company 1", addresses=[a1, a2])
i1 = Invoice(date=datetime.datetime.now(), company=c1)
session = fixture_session()
session.add(i1)
session.flush()
company_id = c1.company_id
invoice_id = i1.invoice_id
session.expunge_all()
c = session.get(Company, company_id)
session.expunge_all()
i = session.get(Invoice, invoice_id)
def go():
eq_(c, i.company)
eq_(c.addresses, i.company.addresses)
self.assert_sql_count(testing.db, go, 0)
| EagerTest7 |
python | google__pytype | pytype/tests/test_typing2.py | {
"start": 132,
"end": 22382
} | class ____(test_base.BaseTest):
"""Tests for typing.py."""
_TEMPLATE = """
import collections
import typing
def f(s: %(annotation)s):%(disables)s
return s
f(%(arg)s)
"""
def _test_match(self, arg, annotation, disables=""):
self.Check(self._TEMPLATE % locals())
def _test_no_match(self, arg, annotation, disables=""):
code = (self._TEMPLATE % locals()).rstrip() + " # wrong-arg-types"
self.InferWithErrors(code)
def test_list_match(self):
self._test_match("[1, 2, 3]", "typing.List")
self._test_match("[1, 2, 3]", "typing.List[int]")
self._test_match("[1, 2, 3.1]", "typing.List[typing.Union[int, float]]")
self._test_no_match("[1.1, 2.1, 3.1]", "typing.List[int]")
def test_sequence_match(self):
self._test_match("[1, 2, 3]", "typing.Sequence")
self._test_match("[1, 2, 3]", "typing.Sequence[int]")
self._test_match("(1, 2, 3.1)", "typing.Sequence[typing.Union[int, float]]")
self._test_no_match("[1.1, 2.1, 3.1]", "typing.Sequence[int]")
def test_generator(self):
self.Check("""
from typing import Generator
def f() -> Generator[int, None, None]:
for i in range(3):
yield i
""")
def test_type(self):
ty, errors = self.InferWithErrors("""
from typing import Type
class Foo:
x = 1
def f1(foo: Type[Foo]):
return foo.x
def f2(foo: Type[Foo]):
return foo.y # attribute-error[e]
def f3(foo: Type[Foo]):
return foo.mro()
def f4(foo: Type[Foo]):
return foo()
v1 = f1(Foo)
v2 = f2(Foo)
v3 = f3(Foo)
v4 = f4(Foo)
""")
self.assertErrorRegexes(errors, {"e": r"y.*Foo"})
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Type
class Foo:
x = ... # type: int
def f1(foo: Type[Foo]) -> int: ...
def f2(foo: Type[Foo]) -> Any: ...
def f3(foo: Type[Foo]) -> list: ...
def f4(foo: Type[Foo]) -> Foo: ...
v1 = ... # type: int
v2 = ... # type: Any
v3 = ... # type: list
v4 = ... # type: Foo
""",
)
def test_type_union(self):
errors = self.CheckWithErrors("""
from typing import Type, Union
class Foo:
bar = ... # type: int
def f1(x: Type[Union[int, Foo]]):
# Currently not an error, since attributes on Unions are retrieved
# differently. See get_attribute() in attribute.py.
x.bar
def f2(x: Union[Type[int], Type[Foo]]):
x.bar # attribute-error[e]
f1(x)
def f3(x: Type[Union[int, Foo]]):
f1(x)
f2(x)
""")
self.assertErrorRegexes(errors, {"e": r"bar.*int"})
def test_use_type_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List
MyType = List[str]
""",
)
self.Check(
"""
import foo
def f(x: foo.MyType):
pass
f([""])
""",
pythonpath=[d.path],
)
def test_callable(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Callable
def f() -> Callable: ...
""",
)
self.Check(
"""
from typing import Callable
import foo
def f() -> Callable:
return foo.f()
def g() -> Callable:
return int
""",
pythonpath=[d.path],
)
def test_callable_parameters(self):
ty, errors = self.InferWithErrors("""
from typing import Any, Callable
# The below are all valid.
def f1(x: Callable[[int, str], bool]): ...
def f2(x: Callable[..., bool]): ...
def f3(x: Callable[[], bool]): ...
def g1(x: Callable[int, bool]): ... # _ARGS not a list # invalid-annotation[e1]
lst = [int] if __random__ else [str]
def g2(x: Callable[lst, bool]): ... # _ARGS ambiguous # invalid-annotation[e2] # invalid-annotation[e3]
# bad: _RET ambiguous
def g3(x: Callable[[], bool if __random__ else str]): ... # invalid-annotation[e4]
# bad: _ARGS[0] ambiguous
def g4(x: Callable[[int if __random__ else str], bool]): ... # invalid-annotation[e5]
lst = None # type: list[int]
def g5(x: Callable[lst, bool]): ... # _ARGS not a constant # invalid-annotation[e6]
def g6(x: Callable[[42], bool]): ... # _ARGS[0] not a type # invalid-annotation[e7]
def g7(x: Callable[[], bool, int]): ... # Too many params # invalid-annotation[e8]
def g8(x: Callable[Any, bool]): ... # Any is not allowed # invalid-annotation[e9]
def g9(x: Callable[[]]) -> None: ... # invalid-annotation[e10]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Callable, List, Type
lst = ... # type: List[int]
def f1(x: Callable[[int, str], bool]) -> None: ...
def f2(x: Callable[Any, bool]) -> None: ...
def f3(x: Callable[[], bool]) -> None: ...
def g1(x: Callable[Any, bool]) -> None: ...
def g2(x: Callable[Any, bool]) -> None: ...
def g3(x: Callable[[], Any]) -> None: ...
def g4(x: Callable[[Any], bool]) -> None: ...
def g5(x: Callable[Any, bool]) -> None: ...
def g6(x: Callable[[Any], bool]) -> None: ...
def g7(x: Callable[[], bool]) -> None: ...
def g8(x: Callable[Any, bool]) -> None: ...
def g9(x: Callable[[], Any]) -> None: ...
""",
)
self.assertErrorRegexes(
errors,
{
"e1": r"'int'.*must be a list of argument types or ellipsis",
"e2": r"\[int\] or \[str\].*Must be constant",
"e3": r"'Any'.*must be a list of argument types or ellipsis",
"e4": r"bool or str.*Must be constant",
"e5": r"int or str.*Must be constant",
"e6": r"instance of list\[int\].*Must be constant",
"e7": r"instance of int",
"e8": r"Callable.*expected 2.*got 3",
"e9": r"'Any'.*must be a list of argument types or ellipsis",
"e10": r"Callable\[_ARGS, _RET].*2.*1",
},
)
def test_callable_bad_args(self):
ty, errors = self.InferWithErrors("""
from typing import Callable
lst1 = [str]
lst1[0] = int
def g1(x: Callable[lst1, bool]): ... # invalid-annotation[e1]
lst2 = [str]
while __random__:
lst2.append(int)
def g2(x: Callable[lst2, bool]): ... # invalid-annotation[e2]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, List, Type, Union
lst1 = ... # type: List[Type[Union[int, str]]]
lst2 = ... # type: List[Type[Union[int, str]]]
def g1(x: Callable[..., bool]) -> None: ...
def g2(x: Callable[..., bool]) -> None: ...
""",
)
# For the first error, it would be more precise to say [str or int], since
# the mutation is simple enough that we could keep track of the change to
# the constant, but we don't do that yet.
self.assertErrorRegexes(
errors,
{
"e1": (
r"instance of list\[type\[Union\[int, str\]\]\].*"
r"Must be constant"
),
"e2": (
r"instance of list\[type\[Union\[int, str\]\]\].*Must be"
r" constant"
),
},
)
def test_generics(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Dict
K = TypeVar("K")
V = TypeVar("V")
class CustomDict(Dict[K, V]): ...
""",
)
self.Check(
"""
import typing
import foo
def f(x: typing.Callable[..., int]): pass
def f(x: typing.Iterator[int]): pass
def f(x: typing.Iterable[int]): pass
def f(x: typing.Container[int]): pass
def f(x: typing.Sequence[int]): pass
def f(x: typing.Tuple[int, str]): pass
def f(x: typing.MutableSequence[int]): pass
def f(x: typing.List[int]): pass
def f(x: typing.Deque[int]): pass
def f(x: typing.IO[str]): pass
def f(x: typing.Collection[str]): pass
def f(x: typing.Mapping[int, str]): pass
def f(x: typing.MutableMapping[int, str]): pass
def f(x: typing.Dict[int, str]): pass
def f(x: typing.AbstractSet[int]): pass
def f(x: typing.FrozenSet[int]): pass
def f(x: typing.MutableSet[int]): pass
def f(x: typing.Set[int]): pass
def f(x: typing.Reversible[int]): pass
def f(x: typing.SupportsAbs[int]): pass
def f(x: typing.Optional[int]): pass
def f(x: typing.Generator[int, None, None]): pass
def f(x: typing.Type[int]): pass
def f(x: typing.Pattern[str]): pass
def f(x: typing.Match[str]): pass
def f(x: foo.CustomDict[int, str]): pass
""",
pythonpath=[d.path],
)
def test_generator_iterator_match(self):
self.Check("""
from typing import Iterator
def f(x: Iterator[int]):
pass
f(x for x in [42])
""")
def test_name_conflict(self):
ty = self.Infer("""
import typing
def f() -> typing.Any:
return __any_object__
class Any:
pass
def g() -> Any:
return __any_object__
""")
self.assertTypesMatchPytd(
ty,
"""
import typing
def f() -> typing.Any: ...
def g() -> Any: ...
class Any:
pass
""",
)
def test_callable_call(self):
ty, errors = self.InferWithErrors("""
from typing import Callable
f = ... # type: Callable[[int], str]
v1 = f() # wrong-arg-count[e1]
v2 = f(True) # ok
v3 = f(42.0) # wrong-arg-types[e2]
v4 = f(1, 2) # wrong-arg-count[e3]
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Callable
f = ... # type: Callable[[int], str]
v1 = ... # type: Any
v2 = ... # type: str
v3 = ... # type: Any
v4 = ... # type: Any
""",
)
self.assertErrorRegexes(
errors, {"e1": r"1.*0", "e2": r"int.*float", "e3": r"1.*2"}
)
def test_callable_call_with_type_parameters(self):
ty, errors = self.InferWithErrors("""
from typing import Callable, TypeVar
T = TypeVar("T")
def f(g: Callable[[T, T], T], y, z):
return g(y, z) # wrong-arg-types[e]
v1 = f(__any_object__, 42, 3.14) # ok
v2 = f(__any_object__, 42, "hello world")
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Callable, TypeVar, Union
T = TypeVar("T")
def f(g: Callable[[T, T], T], y, z): ...
v1 = ... # type: Union[int, float]
v2 = ... # type: Any
""",
)
self.assertErrorRegexes(errors, {"e": r"int.*str"})
def test_callable_call_with_return_only(self):
ty = self.Infer("""
from typing import Callable
f = ... # type: Callable[..., int]
v = f()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable
f = ... # type: Callable[..., int]
v = ... # type: int
""",
)
def test_callable_call_with_varargs_and_kwargs(self):
errors = self.CheckWithErrors("""
from typing import Callable
f = ... # type: Callable[[], int]
f(x=3) # wrong-keyword-args[e1]
f(*(42,)) # wrong-arg-count[e2]
f(**{"x": "hello", "y": "world"}) # wrong-keyword-args[e3]
f(*(42,), **{"hello": "world"}) # wrong-keyword-args[e4]
""")
self.assertErrorRegexes(
errors, {"e1": r"x", "e2": r"0.*1", "e3": r"x, y", "e4": r"hello"}
)
def test_callable_attribute(self):
self.Check("""
from typing import Any, Callable
def foo(fn: Callable[[Any], Any]):
fn.foo # pytype: disable=attribute-error
""")
def test_items_view(self):
self.Check("""
from typing import ItemsView
def f(x: ItemsView[str, int]): ...
""")
def test_new_type(self):
ty = self.Infer("""
from typing import NewType
MyInt = NewType('MyInt', int)
class A:
pass
MyA = NewType('MyA', A)
MySpecialA = NewType('MySpecialA', MyA)
MyStr1 = NewType(*('MyStr1', str))
MyStr2 = NewType(**{'tp':str, 'name':'MyStr2'})
MyAnyType = NewType('MyAnyType', tp=str if __random__ else int)
MyFunnyNameType = NewType('Foo' if __random__ else 'Bar', tp=str)
def func1(i: MyInt) -> MyInt:
return i
def func2(i: MyInt) -> int:
return i
def func3(a: MyA) -> MyA:
return a
def func4(a: MyA) -> A:
return a
def func5(a: MySpecialA) -> MySpecialA:
return a
def func6(a: MySpecialA) -> MyA:
return a
def func7(a: MySpecialA) -> A:
return a
v = 123
func1(MyInt(v))
func2(MyInt(v))
my_a = MyA(A())
func3(my_a)
func4(my_a)
my_special_a = MySpecialA(my_a)
func5(my_special_a)
func6(my_special_a)
func7(my_special_a)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
class A:
pass
class MyInt(int):
def __init__(self, val: int): ...
class MyA(A):
def __init__(self, val: A): ...
class MySpecialA(MyA):
def __init__(self, val: MyA): ...
class MyStr1(str):
def __init__(self, val: str): ...
class MyStr2(str):
def __init__(self, val: str): ...
MyAnyType = ... # type: Any
class MyFunnyNameType(str):
def __init__(self, val:str): ...
def func1(i: MyInt) -> MyInt: ...
def func2(i: MyInt) -> int: ...
def func3(a: MyA) -> MyA: ...
def func4(a: MyA) -> A: ...
def func5(a: MySpecialA) -> MySpecialA: ...
def func6(a: MySpecialA) -> MyA: ...
def func7(a: MySpecialA) -> A: ...
v = ... # type: int
my_a = ... # type: MyA
my_special_a = ... # type: MySpecialA
""",
)
def test_new_type_error(self):
errors = self.CheckWithErrors("""
from typing import NewType
MyInt = NewType('MyInt', int)
MyStr = NewType('MyStr', str)
def func1(i: MyInt) -> MyInt:
return i
def func2(i: int) -> MyInt:
return i # bad-return-type[e1]
def func3(s: MyStr) -> MyStr:
return s
func1(123) # wrong-arg-types[e2]
func3(MyStr(123)) # wrong-arg-types[e3]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Expected: MyInt\nActually returned: int",
"e2": r".*Expected: \(i: MyInt\)\nActually passed: \(i: int\)",
"e3": r".*Expected:.*val: str\)\nActually passed:.*val: int\)",
},
)
def test_new_type_not_abstract(self):
# At runtime, the 'class' created by NewType is simply an identity function,
# so it ignores abstract-ness.
self.Check("""
from typing import Mapping, NewType
X = NewType('X', Mapping)
def f() -> X:
return X({})
""")
def test_maybe_return(self):
self.Check("""
def f() -> int:
if __random__:
return 42
else:
raise ValueError()
""")
def test_no_return_against_str(self):
ty = self.Infer("""
def f() -> str:
raise ValueError()
def g():
return f()
""")
self.assertTypesMatchPytd(
ty,
"""
def f() -> str: ...
def g() -> str: ...
""",
)
def test_called_no_return_against_str(self):
self.Check("""
def f():
raise ValueError()
def g() -> str:
return f()
""")
def test_union_ellipsis(self):
errors = self.CheckWithErrors("""
from typing import Union
MyUnion = Union[int, ...] # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"Ellipsis.*index 1.*Union"})
def test_list_ellipsis(self):
errors = self.CheckWithErrors("""
from typing import List
MyList = List[int, ...] # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"Ellipsis.*index 1.*List"})
def test_multiple_ellipses(self):
errors = self.CheckWithErrors("""
from typing import Union
MyUnion = Union[..., int, ..., str, ...] # invalid-annotation[e]
""")
self.assertErrorRegexes(errors, {"e": r"Ellipsis.*indices 0, 2, 4.*Union"})
def test_bad_tuple_ellipsis(self):
errors = self.CheckWithErrors("""
from typing import Tuple
MyTuple1 = Tuple[..., ...] # invalid-annotation[e1]
MyTuple2 = Tuple[...] # invalid-annotation[e2]
""")
self.assertErrorRegexes(
errors,
{"e1": r"Ellipsis.*index 0.*Tuple", "e2": r"Ellipsis.*index 0.*Tuple"},
)
def test_bad_callable_ellipsis(self):
errors = self.CheckWithErrors("""
from typing import Callable
MyCallable1 = Callable[..., ...] # invalid-annotation[e1]
MyCallable2 = Callable[[int], ...] # invalid-annotation[e2]
MyCallable3 = Callable[[...], int] # invalid-annotation[e3]
MyCallable4 = Callable[[int], int, int] # invalid-annotation[e4]
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Ellipsis.*index 1.*Callable",
"e2": r"Ellipsis.*index 1.*Callable",
"e3": r"Ellipsis.*index 0.*list",
"e4": r"Callable\[_ARGS, _RET].*2.*3",
},
)
def test_optional_parameters(self):
errors = self.CheckWithErrors("""
from typing import Optional
def func1(x: Optional[int]):
pass
def func2(x: Optional): # invalid-annotation[e1]
pass
def func3(x: Optional[int, float, str]): # invalid-annotation[e2]
pass
""")
self.assertErrorRegexes(
errors,
{
"e1": r"Not a type",
"e2": r"typing\.Optional can only contain one type parameter",
},
)
def test_noreturn_possible_return(self):
errors = self.CheckWithErrors("""
from typing import NoReturn
def func(x) -> NoReturn:
if x > 1:
raise ValueError() # bad-return-type[e]
""")
self.assertErrorSequences(
errors, {"e": ["Expected: Never", "Actually returned: None"]}
)
def test_noreturn(self):
errors = self.CheckWithErrors("""
from typing import Any, List, NoReturn
def func0() -> NoReturn:
raise ValueError()
def func1() -> List[NoReturn]:
return [None] # bad-return-type[e1]
def func2(x: NoReturn):
pass
func2(0) # wrong-arg-types[e2]
def func3(x: List[NoReturn]):
pass
func3([0]) # wrong-arg-types[e3]
def func4():
x: List[NoReturn] = []
x.append(0) # container-type-mismatch[e4]
""")
self.assertErrorSequences(
errors,
{
"e1": ["Expected: list[nothing]", "Actually returned: list[None]"],
"e2": ["Expected: (x: Never)", "Actually passed: (x: int)"],
"e3": [
"Expected: (x: list[nothing])",
"Actually passed: (x: list[int])",
],
"e4": ["Allowed", "_T: Never", "New", "_T: int"],
},
)
def test_noreturn_pyi(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import NoReturn
def f(x: NoReturn): ...
""",
)]):
errors = self.CheckWithErrors("""
import foo
foo.f(0) # wrong-arg-types[e]
""")
self.assertErrorSequences(
errors, {"e": ["Expected: (x: empty)", "Actually passed: (x: int)"]}
)
def test_noreturn_in_tuple(self):
self.Check("""
from typing import NoReturn
def _returns(annotations) -> bool:
return annotations["return"] not in (None, NoReturn)
""")
def test_SupportsComplex(self):
self.Check("""
from typing import SupportsComplex
def foo(x: SupportsComplex):
pass
foo(1j)
""")
def test_mutable_set_sub(self):
self.Check("""
from typing import MutableSet
def f(x: MutableSet) -> MutableSet:
return x - {0}
""")
def test_union_of_classes(self):
ty = self.Infer("""
from typing import Type, Union
class Foo:
def __getitem__(self, x) -> int:
return 0
class Bar:
def __getitem__(self, x) -> str:
return ''
def f(x: Union[Type[Foo], Type[Bar]]):
return x.__getitem__
def g(x: Type[Union[Foo, Bar]]):
return x.__getitem__
""")
# The inferred return type of `g` is technically incorrect: it is inferred
# from the type of abstract.Union.getitem_slot, which is a NativeFunction,
# so its type defaults to a plain Callable. We should instead look up
# Foo.__getitem__ and Bar.__getitem__ as we do for `f`, but it is currently
# not possible to distinguish between using Union.getitem_slot and accessing
# the actual __getitem__ method on a union's options. Inferring `Callable`
# should generally be safe, since __getitem__ is a method by convention.
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Callable, Type, Union
class Foo:
def __getitem__(self, x) -> int: ...
class Bar:
def __getitem__(self, x) -> str: ...
def f(x: Type[Union[Foo, Bar]]) -> Callable[[Any, Any], Union[int, str]]: ...
def g(x: Type[Union[Foo, Bar]]) -> Callable: ...
""",
)
def test_bytestring(self):
self.Check("""
from typing import ByteString, Union
def f(x: Union[bytes, bytearray, memoryview]):
pass
x = None # type: ByteString
f(x)
""")
def test_forwardref(self):
# From https://docs.python.org/3/library/typing.html#typing.ForwardRef:
# Class used for internal typing representation of string forward
# references. [...] ForwardRef should not be instantiated by a user
self.CheckWithErrors("""
from typing import ForwardRef
X = ForwardRef("Y") # not-callable
""")
| TypingTest |
python | keras-team__keras | keras/src/metrics/probabilistic_metrics.py | {
"start": 419,
"end": 1692
} | class ____(reduction_metrics.MeanMetricWrapper):
"""Computes Kullback-Leibler divergence metric between `y_true` and
`y_pred`.
Formula:
```python
metric = y_true * log(y_true / y_pred)
```
`y_true` and `y_pred` are expected to be probability
distributions, with values between 0 and 1. They will get
clipped to the `[0, 1]` range.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.KLDivergence()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
>>> m.result()
0.45814306
>>> m.reset_state()
>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
... sample_weight=[1, 0])
>>> m.result()
0.9162892
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[keras.metrics.KLDivergence()])
```
"""
def __init__(self, name="kl_divergence", dtype=None):
super().__init__(fn=kl_divergence, name=name, dtype=dtype)
def get_config(self):
return {"name": self.name, "dtype": self.dtype}
@keras_export("keras.metrics.Poisson")
| KLDivergence |
python | python__mypy | mypy/plugins/attrs.py | {
"start": 2908,
"end": 39102
} | class ____:
"""The value of an attr.ib() call."""
def __init__(
self,
name: str,
alias: str | None,
info: TypeInfo,
has_default: bool,
init: bool,
kw_only: bool,
converter: Converter | None,
context: Context,
init_type: Type | None,
) -> None:
self.name = name
self.alias = alias
self.info = info
self.has_default = has_default
self.init = init
self.kw_only = kw_only
self.converter = converter
self.context = context
self.init_type = init_type
def argument(self, ctx: mypy.plugin.ClassDefContext) -> Argument:
"""Return this attribute as an argument to __init__."""
assert self.init
init_type: Type | None = None
if self.converter:
if self.converter.init_type:
init_type = self.converter.init_type
if init_type and self.init_type and self.converter.ret_type:
# The converter return type should be the same type as the attribute type.
# Copy type vars from attr type to converter.
converter_vars = get_type_vars(self.converter.ret_type)
init_vars = get_type_vars(self.init_type)
if converter_vars and len(converter_vars) == len(init_vars):
variables = {
binder.id: arg for binder, arg in zip(converter_vars, init_vars)
}
init_type = expand_type(init_type, variables)
else:
ctx.api.fail("Cannot determine __init__ type from converter", self.context)
init_type = AnyType(TypeOfAny.from_error)
else: # There is no converter, the init type is the normal type.
init_type = self.init_type or self.info[self.name].type
unannotated = False
if init_type is None:
unannotated = True
# Convert type not set to Any.
init_type = AnyType(TypeOfAny.unannotated)
else:
proper_type = get_proper_type(init_type)
if isinstance(proper_type, AnyType):
if proper_type.type_of_any == TypeOfAny.unannotated:
unannotated = True
if unannotated and ctx.api.options.disallow_untyped_defs:
# This is a compromise. If you don't have a type here then the
# __init__ will be untyped. But since the __init__ is added it's
# pointing at the decorator. So instead we also show the error in the
# assignment, which is where you would fix the issue.
node = self.info[self.name].node
assert node is not None
ctx.api.msg.need_annotation_for_var(node, self.context)
if self.kw_only:
arg_kind = ARG_NAMED_OPT if self.has_default else ARG_NAMED
else:
arg_kind = ARG_OPT if self.has_default else ARG_POS
# Attrs removes leading underscores when creating the __init__ arguments.
name = self.alias or self.name.lstrip("_")
return Argument(Var(name, init_type), init_type, None, arg_kind)
def serialize(self) -> JsonDict:
"""Serialize this object so it can be saved and restored."""
return {
"name": self.name,
"alias": self.alias,
"has_default": self.has_default,
"init": self.init,
"kw_only": self.kw_only,
"has_converter": self.converter is not None,
"converter_init_type": (
self.converter.init_type.serialize()
if self.converter and self.converter.init_type
else None
),
"context_line": self.context.line,
"context_column": self.context.column,
"init_type": self.init_type.serialize() if self.init_type else None,
}
@classmethod
def deserialize(
cls, info: TypeInfo, data: JsonDict, api: SemanticAnalyzerPluginInterface
) -> Attribute:
"""Return the Attribute that was serialized."""
raw_init_type = data["init_type"]
init_type = deserialize_and_fixup_type(raw_init_type, api) if raw_init_type else None
raw_converter_init_type = data["converter_init_type"]
converter_init_type = (
deserialize_and_fixup_type(raw_converter_init_type, api)
if raw_converter_init_type
else None
)
return Attribute(
data["name"],
data["alias"],
info,
data["has_default"],
data["init"],
data["kw_only"],
Converter(converter_init_type) if data["has_converter"] else None,
Context(line=data["context_line"], column=data["context_column"]),
init_type,
)
def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None:
"""Expands type vars in the context of a subtype when an attribute is inherited
from a generic super type."""
if self.init_type:
self.init_type = map_type_from_supertype(self.init_type, sub_type, self.info)
else:
self.init_type = None
def _determine_eq_order(ctx: mypy.plugin.ClassDefContext) -> bool:
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
value of order.
"""
cmp = _get_decorator_optional_bool_argument(ctx, "cmp")
eq = _get_decorator_optional_bool_argument(ctx, "eq")
order = _get_decorator_optional_bool_argument(ctx, "order")
if cmp is not None and any((eq is not None, order is not None)):
ctx.api.fail('Don\'t mix "cmp" with "eq" and "order"', ctx.reason)
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
return cmp
# If left None, equality is on and ordering mirrors equality.
if eq is None:
eq = True
if order is None:
order = eq
if eq is False and order is True:
ctx.api.fail("eq must be True if order is True", ctx.reason)
return order
def _get_decorator_optional_bool_argument(
ctx: mypy.plugin.ClassDefContext, name: str, default: bool | None = None
) -> bool | None:
"""Return the Optional[bool] argument for the decorator.
This handles both @decorator(...) and @decorator.
"""
if isinstance(ctx.reason, CallExpr):
attr_value = _get_argument(ctx.reason, name)
if attr_value:
if isinstance(attr_value, NameExpr):
if attr_value.fullname == "builtins.True":
return True
if attr_value.fullname == "builtins.False":
return False
if attr_value.fullname == "builtins.None":
return None
ctx.api.fail(
f'"{name}" argument must be a True, False, or None literal',
ctx.reason,
code=LITERAL_REQ,
)
return default
return default
else:
return default
def attr_tag_callback(ctx: mypy.plugin.ClassDefContext) -> None:
"""Record that we have an attrs class in the main semantic analysis pass.
The later pass implemented by attr_class_maker_callback will use this
to detect attrs classes in base classes.
"""
# The value is ignored, only the existence matters.
ctx.cls.info.metadata["attrs_tag"] = {}
def attr_class_maker_callback(
ctx: mypy.plugin.ClassDefContext,
auto_attribs_default: bool | None = False,
frozen_default: bool = False,
slots_default: bool = False,
) -> bool:
"""Add necessary dunder methods to classes decorated with attr.s.
attrs is a package that lets you define classes without writing dull boilerplate code.
At a quick glance, the decorator searches the class body for assignments of `attr.ib`s (or
annotated variables if auto_attribs=True), then depending on how the decorator is called,
it will add an __init__ or all the compare methods.
For frozen=True it will turn the attrs into properties.
Hashability will be set according to https://www.attrs.org/en/stable/hashing.html.
See https://www.attrs.org/en/stable/how-does-it-work.html for information on how attrs works.
If this returns False, some required metadata was not ready yet, and we need another
pass.
"""
with state.strict_optional_set(ctx.api.options.strict_optional):
# This hook is called during semantic analysis, but it uses a bunch of
# type-checking ops, so it needs the strict optional set properly.
return attr_class_maker_callback_impl(
ctx, auto_attribs_default, frozen_default, slots_default
)
def attr_class_maker_callback_impl(
ctx: mypy.plugin.ClassDefContext,
auto_attribs_default: bool | None,
frozen_default: bool,
slots_default: bool,
) -> bool:
info = ctx.cls.info
init = _get_decorator_bool_argument(ctx, "init", True)
frozen = _get_frozen(ctx, frozen_default)
order = _determine_eq_order(ctx)
slots = _get_decorator_bool_argument(ctx, "slots", slots_default)
auto_attribs = _get_decorator_optional_bool_argument(ctx, "auto_attribs", auto_attribs_default)
kw_only = _get_decorator_bool_argument(ctx, "kw_only", False)
match_args = _get_decorator_bool_argument(ctx, "match_args", True)
for super_info in ctx.cls.info.mro[1:-1]:
if "attrs_tag" in super_info.metadata and "attrs" not in super_info.metadata:
# Super class is not ready yet. Request another pass.
return False
attributes = _analyze_class(ctx, auto_attribs, kw_only)
# Check if attribute types are ready.
for attr in attributes:
node = info.get(attr.name)
if node is None:
# This name is likely blocked by some semantic analysis error that
# should have been reported already.
_add_empty_metadata(info)
return True
_add_attrs_magic_attribute(ctx, [(attr.name, info[attr.name].type) for attr in attributes])
if slots:
_add_slots(ctx, attributes)
if match_args and ctx.api.options.python_version[:2] >= (3, 10):
# `.__match_args__` is only added for python3.10+, but the argument
# exists for earlier versions as well.
_add_match_args(ctx, attributes)
# Save the attributes so that subclasses can reuse them.
ctx.cls.info.metadata["attrs"] = {
"attributes": [attr.serialize() for attr in attributes],
"frozen": frozen,
}
adder = MethodAdder(ctx)
# If __init__ is not being generated, attrs still generates it as __attrs_init__ instead.
_add_init(ctx, attributes, adder, "__init__" if init else ATTRS_INIT_NAME)
if order:
_add_order(ctx, adder)
if frozen:
_make_frozen(ctx, attributes)
# Frozen classes are hashable by default, even if inheriting from non-frozen ones.
hashable: bool | None = _get_decorator_bool_argument(
ctx, "hash", True
) and _get_decorator_bool_argument(ctx, "unsafe_hash", True)
else:
hashable = _get_decorator_optional_bool_argument(ctx, "unsafe_hash")
if hashable is None: # unspecified
hashable = _get_decorator_optional_bool_argument(ctx, "hash")
eq = _get_decorator_optional_bool_argument(ctx, "eq")
has_own_hash = "__hash__" in ctx.cls.info.names
if has_own_hash or (hashable is None and eq is False):
pass # Do nothing.
elif hashable:
# We copy the `__hash__` signature from `object` to make them hashable.
ctx.cls.info.names["__hash__"] = ctx.cls.info.mro[-1].names["__hash__"]
else:
_remove_hashability(ctx)
return True
def _get_frozen(ctx: mypy.plugin.ClassDefContext, frozen_default: bool) -> bool:
"""Return whether this class is frozen."""
if _get_decorator_bool_argument(ctx, "frozen", frozen_default):
return True
# Subclasses of frozen classes are frozen so check that.
for super_info in ctx.cls.info.mro[1:-1]:
if "attrs" in super_info.metadata and super_info.metadata["attrs"]["frozen"]:
return True
return False
def _analyze_class(
ctx: mypy.plugin.ClassDefContext, auto_attribs: bool | None, kw_only: bool
) -> list[Attribute]:
"""Analyze the class body of an attr maker, its parents, and return the Attributes found.
auto_attribs=True means we'll generate attributes from type annotations also.
auto_attribs=None means we'll detect which mode to use.
kw_only=True means that all attributes created here will be keyword only args in __init__.
"""
own_attrs: dict[str, Attribute] = {}
if auto_attribs is None:
auto_attribs = _detect_auto_attribs(ctx)
# Walk the body looking for assignments and decorators.
for stmt in ctx.cls.defs.body:
if isinstance(stmt, AssignmentStmt):
for attr in _attributes_from_assignment(ctx, stmt, auto_attribs, kw_only):
# When attrs are defined twice in the same body we want to use the 2nd definition
# in the 2nd location. So remove it from the OrderedDict.
# Unless it's auto_attribs in which case we want the 2nd definition in the
# 1st location.
if not auto_attribs and attr.name in own_attrs:
del own_attrs[attr.name]
own_attrs[attr.name] = attr
elif isinstance(stmt, Decorator):
_cleanup_decorator(stmt, own_attrs)
for attribute in own_attrs.values():
# Even though these look like class level assignments we want them to look like
# instance level assignments.
if attribute.name in ctx.cls.info.names:
node = ctx.cls.info.names[attribute.name].node
if isinstance(node, PlaceholderNode):
# This node is not ready yet.
continue
assert isinstance(node, Var), node
node.is_initialized_in_class = False
# Traverse the MRO and collect attributes from the parents.
taken_attr_names = set(own_attrs)
super_attrs = []
for super_info in ctx.cls.info.mro[1:-1]:
if "attrs" in super_info.metadata:
# Each class depends on the set of attributes in its attrs ancestors.
ctx.api.add_plugin_dependency(make_wildcard_trigger(super_info.fullname))
for data in super_info.metadata["attrs"]["attributes"]:
# Only add an attribute if it hasn't been defined before. This
# allows for overwriting attribute definitions by subclassing.
if data["name"] not in taken_attr_names:
a = Attribute.deserialize(super_info, data, ctx.api)
a.expand_typevar_from_subtype(ctx.cls.info)
super_attrs.append(a)
taken_attr_names.add(a.name)
attributes = super_attrs + list(own_attrs.values())
# Check the init args for correct default-ness. Note: This has to be done after all the
# attributes for all classes have been read, because subclasses can override parents.
last_default = False
for i, attribute in enumerate(attributes):
if not attribute.init:
continue
if attribute.kw_only:
# Keyword-only attributes don't care whether they are default or not.
continue
# If the issue comes from merging different classes, report it
# at the class definition point.
context = attribute.context if i >= len(super_attrs) else ctx.cls
if not attribute.has_default and last_default:
ctx.api.fail("Non-default attributes not allowed after default attributes.", context)
last_default |= attribute.has_default
return attributes
def _add_empty_metadata(info: TypeInfo) -> None:
"""Add empty metadata to mark that we've finished processing this class."""
info.metadata["attrs"] = {"attributes": [], "frozen": False}
def _detect_auto_attribs(ctx: mypy.plugin.ClassDefContext) -> bool:
"""Return whether auto_attribs should be enabled or disabled.
It's disabled if there are any unannotated attribs()
"""
for stmt in ctx.cls.defs.body:
if isinstance(stmt, AssignmentStmt):
for lvalue in stmt.lvalues:
lvalues, rvalues = _parse_assignments(lvalue, stmt)
if len(lvalues) != len(rvalues):
# This means we have some assignment that isn't 1 to 1.
# It can't be an attrib.
continue
for lhs, rvalue in zip(lvalues, rvalues):
# Check if the right hand side is a call to an attribute maker.
if (
isinstance(rvalue, CallExpr)
and isinstance(rvalue.callee, RefExpr)
and rvalue.callee.fullname in attr_attrib_makers
and not stmt.new_syntax
):
# This means we have an attrib without an annotation and so
# we can't do auto_attribs=True
return False
return True
def _attributes_from_assignment(
ctx: mypy.plugin.ClassDefContext, stmt: AssignmentStmt, auto_attribs: bool, kw_only: bool
) -> Iterable[Attribute]:
"""Return Attribute objects that are created by this assignment.
The assignments can look like this:
x = attr.ib()
x = y = attr.ib()
x, y = attr.ib(), attr.ib()
or if auto_attribs is enabled also like this:
x: type
x: type = default_value
x: type = attr.ib(...)
"""
for lvalue in stmt.lvalues:
lvalues, rvalues = _parse_assignments(lvalue, stmt)
if len(lvalues) != len(rvalues):
# This means we have some assignment that isn't 1 to 1.
# It can't be an attrib.
continue
for lhs, rvalue in zip(lvalues, rvalues):
# Check if the right hand side is a call to an attribute maker.
if (
isinstance(rvalue, CallExpr)
and isinstance(rvalue.callee, RefExpr)
and rvalue.callee.fullname in attr_attrib_makers
):
attr = _attribute_from_attrib_maker(ctx, auto_attribs, kw_only, lhs, rvalue, stmt)
if attr:
yield attr
elif auto_attribs and stmt.type and stmt.new_syntax and not is_class_var(lhs):
yield _attribute_from_auto_attrib(ctx, kw_only, lhs, rvalue, stmt)
def _cleanup_decorator(stmt: Decorator, attr_map: dict[str, Attribute]) -> None:
"""Handle decorators in class bodies.
`x.default` will set a default value on x
`x.validator` and `x.default` will get removed to avoid throwing a type error.
"""
remove_me = []
for func_decorator in stmt.decorators:
if (
isinstance(func_decorator, MemberExpr)
and isinstance(func_decorator.expr, NameExpr)
and func_decorator.expr.name in attr_map
):
if func_decorator.name == "default":
attr_map[func_decorator.expr.name].has_default = True
if func_decorator.name in ("default", "validator"):
# These are decorators on the attrib object that only exist during
# class creation time. In order to not trigger a type error later we
# just remove them. This might leave us with a Decorator with no
# decorators (Emperor's new clothes?)
# TODO: It would be nice to type-check these rather than remove them.
# default should be Callable[[], T]
# validator should be Callable[[Any, 'Attribute', T], Any]
# where T is the type of the attribute.
remove_me.append(func_decorator)
for dec in remove_me:
stmt.decorators.remove(dec)
def _attribute_from_auto_attrib(
ctx: mypy.plugin.ClassDefContext,
kw_only: bool,
lhs: NameExpr,
rvalue: Expression,
stmt: AssignmentStmt,
) -> Attribute:
"""Return an Attribute for a new type assignment."""
name = unmangle(lhs.name)
# `x: int` (without equal sign) assigns rvalue to TempNode(AnyType())
has_rhs = not isinstance(rvalue, TempNode)
sym = ctx.cls.info.names.get(name)
init_type = sym.type if sym else None
return Attribute(name, None, ctx.cls.info, has_rhs, True, kw_only, None, stmt, init_type)
def _attribute_from_attrib_maker(
ctx: mypy.plugin.ClassDefContext,
auto_attribs: bool,
kw_only: bool,
lhs: NameExpr,
rvalue: CallExpr,
stmt: AssignmentStmt,
) -> Attribute | None:
"""Return an Attribute from the assignment or None if you can't make one."""
if auto_attribs and not stmt.new_syntax:
# auto_attribs requires an annotation on *every* attr.ib.
assert lhs.node is not None
ctx.api.msg.need_annotation_for_var(lhs.node, stmt)
return None
if len(stmt.lvalues) > 1:
ctx.api.fail("Too many names for one attribute", stmt)
return None
# This is the type that belongs in the __init__ method for this attrib.
init_type = stmt.type
# Read all the arguments from the call.
init = _get_bool_argument(ctx, rvalue, "init", True)
# Note: If the class decorator says kw_only=True the attribute is ignored.
# See https://github.com/python-attrs/attrs/issues/481 for explanation.
kw_only |= _get_bool_argument(ctx, rvalue, "kw_only", False)
# TODO: Check for attr.NOTHING
attr_has_default = bool(_get_argument(rvalue, "default"))
attr_has_factory = bool(_get_argument(rvalue, "factory"))
if attr_has_default and attr_has_factory:
ctx.api.fail('Can\'t pass both "default" and "factory".', rvalue)
elif attr_has_factory:
attr_has_default = True
# If the type isn't set through annotation but is passed through `type=` use that.
type_arg = _get_argument(rvalue, "type")
if type_arg and not init_type:
try:
un_type = expr_to_unanalyzed_type(type_arg, ctx.api.options, ctx.api.is_stub_file)
except TypeTranslationError:
ctx.api.fail("Invalid argument to type", type_arg)
else:
init_type = ctx.api.anal_type(un_type)
if init_type and isinstance(lhs.node, Var) and not lhs.node.type:
# If there is no annotation, add one.
lhs.node.type = init_type
lhs.is_inferred_def = False
# Note: convert is deprecated but works the same as converter.
converter = _get_argument(rvalue, "converter")
convert = _get_argument(rvalue, "convert")
if convert and converter:
ctx.api.fail('Can\'t pass both "convert" and "converter".', rvalue)
elif convert:
ctx.api.fail("convert is deprecated, use converter", rvalue)
converter = convert
converter_info = _parse_converter(ctx, converter)
# Custom alias might be defined:
alias = None
alias_expr = _get_argument(rvalue, "alias")
if alias_expr:
alias = ctx.api.parse_str_literal(alias_expr)
if alias is None:
ctx.api.fail(
'"alias" argument to attrs field must be a string literal',
rvalue,
code=LITERAL_REQ,
)
name = unmangle(lhs.name)
return Attribute(
name, alias, ctx.cls.info, attr_has_default, init, kw_only, converter_info, stmt, init_type
)
def _parse_converter(
ctx: mypy.plugin.ClassDefContext, converter_expr: Expression | None
) -> Converter | None:
"""Return the Converter object from an Expression."""
# TODO: Support complex converters, e.g. lambdas, calls, etc.
if not converter_expr:
return None
converter_info = Converter()
if (
isinstance(converter_expr, CallExpr)
and isinstance(converter_expr.callee, RefExpr)
and converter_expr.callee.fullname in attr_optional_converters
and converter_expr.args
and converter_expr.args[0]
):
# Special handling for attr.converters.optional(type)
# We extract the type and add make the init_args Optional in Attribute.argument
converter_expr = converter_expr.args[0]
is_attr_converters_optional = True
else:
is_attr_converters_optional = False
converter_type: Type | None = None
if isinstance(converter_expr, RefExpr) and converter_expr.node:
if isinstance(converter_expr.node, FuncDef):
if converter_expr.node.type and isinstance(converter_expr.node.type, FunctionLike):
converter_type = converter_expr.node.type
else: # The converter is an unannotated function.
converter_info.init_type = AnyType(TypeOfAny.unannotated)
return converter_info
elif isinstance(converter_expr.node, OverloadedFuncDef) and is_valid_overloaded_converter(
converter_expr.node
):
converter_type = converter_expr.node.type
elif isinstance(converter_expr.node, TypeInfo):
converter_type = type_object_type(converter_expr.node, ctx.api.named_type)
elif (
isinstance(converter_expr, IndexExpr)
and isinstance(converter_expr.analyzed, TypeApplication)
and isinstance(converter_expr.base, RefExpr)
and isinstance(converter_expr.base.node, TypeInfo)
):
# The converter is a generic type.
converter_type = type_object_type(converter_expr.base.node, ctx.api.named_type)
if isinstance(converter_type, CallableType):
converter_type = apply_generic_arguments(
converter_type,
converter_expr.analyzed.types,
ctx.api.msg.incompatible_typevar_value,
converter_type,
)
else:
converter_type = None
if isinstance(converter_expr, LambdaExpr):
# TODO: should we send a fail if converter_expr.min_args > 1?
converter_info.init_type = AnyType(TypeOfAny.unannotated)
return converter_info
if not converter_type:
# Signal that we have an unsupported converter.
ctx.api.fail(
"Unsupported converter, only named functions, types and lambdas are currently "
"supported",
converter_expr,
)
converter_info.init_type = AnyType(TypeOfAny.from_error)
return converter_info
converter_type = get_proper_type(converter_type)
if isinstance(converter_type, CallableType) and converter_type.arg_types:
converter_info.init_type = converter_type.arg_types[0]
if not is_attr_converters_optional:
converter_info.ret_type = converter_type.ret_type
elif isinstance(converter_type, Overloaded):
types: list[Type] = []
for item in converter_type.items:
# Walk the overloads looking for methods that can accept one argument.
num_arg_types = len(item.arg_types)
if not num_arg_types:
continue
if num_arg_types > 1 and any(kind == ARG_POS for kind in item.arg_kinds[1:]):
continue
types.append(item.arg_types[0])
# Make a union of all the valid types.
if types:
converter_info.init_type = make_simplified_union(types)
if is_attr_converters_optional and converter_info.init_type:
# If the converter was attr.converter.optional(type) then add None to
# the allowed init_type.
converter_info.init_type = UnionType.make_union([converter_info.init_type, NoneType()])
return converter_info
def is_valid_overloaded_converter(defn: OverloadedFuncDef) -> bool:
return all(
(not isinstance(item, Decorator) or isinstance(item.func.type, FunctionLike))
for item in defn.items
)
def _parse_assignments(
lvalue: Expression, stmt: AssignmentStmt
) -> tuple[list[NameExpr], list[Expression]]:
"""Convert a possibly complex assignment expression into lists of lvalues and rvalues."""
lvalues: list[NameExpr] = []
rvalues: list[Expression] = []
if isinstance(lvalue, (TupleExpr, ListExpr)):
if all(isinstance(item, NameExpr) for item in lvalue.items):
lvalues = cast(list[NameExpr], lvalue.items)
if isinstance(stmt.rvalue, (TupleExpr, ListExpr)):
rvalues = stmt.rvalue.items
elif isinstance(lvalue, NameExpr):
lvalues = [lvalue]
rvalues = [stmt.rvalue]
return lvalues, rvalues
def _add_order(ctx: mypy.plugin.ClassDefContext, adder: MethodAdder) -> None:
"""Generate all the ordering methods for this class."""
bool_type = ctx.api.named_type("builtins.bool")
object_type = ctx.api.named_type("builtins.object")
# Make the types be:
# AT = TypeVar('AT')
# def __lt__(self: AT, other: AT) -> bool
# This way comparisons with subclasses will work correctly.
fullname = f"{ctx.cls.info.fullname}.{SELF_TVAR_NAME}"
tvd = TypeVarType(
SELF_TVAR_NAME,
fullname,
# Namespace is patched per-method below.
id=TypeVarId(-1, namespace=""),
values=[],
upper_bound=object_type,
default=AnyType(TypeOfAny.from_omitted_generics),
)
self_tvar_expr = TypeVarExpr(
SELF_TVAR_NAME, fullname, [], object_type, AnyType(TypeOfAny.from_omitted_generics)
)
ctx.cls.info.names[SELF_TVAR_NAME] = SymbolTableNode(MDEF, self_tvar_expr)
for method in ["__lt__", "__le__", "__gt__", "__ge__"]:
namespace = f"{ctx.cls.info.fullname}.{method}"
tvd = tvd.copy_modified(id=TypeVarId(tvd.id.raw_id, namespace=namespace))
args = [Argument(Var("other", tvd), tvd, None, ARG_POS)]
adder.add_method(method, args, bool_type, self_type=tvd, tvd=tvd)
def _make_frozen(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None:
"""Turn all the attributes into properties to simulate frozen classes."""
for attribute in attributes:
if attribute.name in ctx.cls.info.names:
# This variable belongs to this class so we can modify it.
node = ctx.cls.info.names[attribute.name].node
if not isinstance(node, Var):
# The superclass attribute was overridden with a non-variable.
# No need to do anything here, override will be verified during
# type checking.
continue
node.is_property = True
else:
# This variable belongs to a super class so create new Var so we
# can modify it.
var = Var(attribute.name, attribute.init_type)
var.info = ctx.cls.info
var._fullname = f"{ctx.cls.info.fullname}.{var.name}"
ctx.cls.info.names[var.name] = SymbolTableNode(MDEF, var)
var.is_property = True
def _add_init(
ctx: mypy.plugin.ClassDefContext,
attributes: list[Attribute],
adder: MethodAdder,
method_name: Literal["__init__", "__attrs_init__"],
) -> None:
"""Generate an __init__ method for the attributes and add it to the class."""
# Convert attributes to arguments with kw_only arguments at the end of
# the argument list
pos_args = []
kw_only_args = []
sym_table = ctx.cls.info.names
for attribute in attributes:
if not attribute.init:
continue
if attribute.kw_only:
kw_only_args.append(attribute.argument(ctx))
else:
pos_args.append(attribute.argument(ctx))
# If the attribute is Final, present in `__init__` and has
# no default, make sure it doesn't error later.
if not attribute.has_default and attribute.name in sym_table:
sym_node = sym_table[attribute.name].node
if isinstance(sym_node, Var) and sym_node.is_final:
sym_node.final_set_in_init = True
args = pos_args + kw_only_args
if all(
# We use getattr rather than instance checks because the variable.type
# might be wrapped into a Union or some other type, but even non-Any
# types reliably track the fact that the argument was not annotated.
getattr(arg.variable.type, "type_of_any", None) == TypeOfAny.unannotated
for arg in args
):
# This workaround makes --disallow-incomplete-defs usable with attrs,
# but is definitely suboptimal as a long-term solution.
# See https://github.com/python/mypy/issues/5954 for discussion.
for a in args:
a.variable.type = AnyType(TypeOfAny.implementation_artifact)
a.type_annotation = AnyType(TypeOfAny.implementation_artifact)
adder.add_method(method_name, args, NoneType())
def _add_attrs_magic_attribute(
ctx: mypy.plugin.ClassDefContext, attrs: list[tuple[str, Type | None]]
) -> None:
any_type = AnyType(TypeOfAny.explicit)
attributes_types: list[Type] = [
ctx.api.named_type_or_none("attr.Attribute", [attr_type or any_type]) or any_type
for _, attr_type in attrs
]
fallback_type = ctx.api.named_type(
"builtins.tuple", [ctx.api.named_type_or_none("attr.Attribute", [any_type]) or any_type]
)
attr_name = MAGIC_ATTR_CLS_NAME_TEMPLATE.format(ctx.cls.fullname.replace(".", "_"))
ti = ctx.api.basic_new_typeinfo(attr_name, fallback_type, 0)
for (name, _), attr_type in zip(attrs, attributes_types):
var = Var(name, attr_type)
var._fullname = name
var.is_property = True
proper_type = get_proper_type(attr_type)
if isinstance(proper_type, Instance):
var.info = proper_type.type
ti.names[name] = SymbolTableNode(MDEF, var, plugin_generated=True)
attributes_type = Instance(ti, [])
# We need to stash the type of the magic attribute so it can be
# loaded on cached runs.
ctx.cls.info.names[attr_name] = SymbolTableNode(MDEF, ti, plugin_generated=True)
add_attribute_to_class(
ctx.api,
ctx.cls,
MAGIC_ATTR_NAME,
TupleType(attributes_types, fallback=attributes_type),
fullname=f"{ctx.cls.fullname}.{MAGIC_ATTR_NAME}",
override_allow_incompatible=True,
is_classvar=True,
)
def _add_slots(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None:
if any(p.slots is None for p in ctx.cls.info.mro[1:-1]):
# At least one type in mro (excluding `self` and `object`)
# does not have concrete `__slots__` defined. Ignoring.
return
# Unlike `@dataclasses.dataclass`, `__slots__` is rewritten here.
ctx.cls.info.slots = {attr.name for attr in attributes}
# Also, inject `__slots__` attribute to class namespace:
slots_type = TupleType(
[ctx.api.named_type("builtins.str") for _ in attributes],
fallback=ctx.api.named_type("builtins.tuple"),
)
add_attribute_to_class(api=ctx.api, cls=ctx.cls, name="__slots__", typ=slots_type)
def _add_match_args(ctx: mypy.plugin.ClassDefContext, attributes: list[Attribute]) -> None:
if (
"__match_args__" not in ctx.cls.info.names
or ctx.cls.info.names["__match_args__"].plugin_generated
):
str_type = ctx.api.named_type("builtins.str")
match_args = TupleType(
[
str_type.copy_modified(last_known_value=LiteralType(attr.name, fallback=str_type))
for attr in attributes
if not attr.kw_only and attr.init
],
fallback=ctx.api.named_type("builtins.tuple"),
)
add_attribute_to_class(api=ctx.api, cls=ctx.cls, name="__match_args__", typ=match_args)
def _remove_hashability(ctx: mypy.plugin.ClassDefContext) -> None:
"""Remove hashability from a class."""
add_attribute_to_class(
ctx.api, ctx.cls, "__hash__", NoneType(), is_classvar=True, overwrite_existing=True
)
| Attribute |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 3170,
"end": 3575
} | class ____(DataStoreEvent):
"""
Signals that a new job was added to the store.
:ivar job_id: ID of the job that was added
:ivar task_id: ID of the task the job would run
:ivar schedule_id: ID of the schedule the job was created from
"""
job_id: UUID = attrs.field(converter=as_uuid)
task_id: str
schedule_id: str | None
@attrs.define(kw_only=True, frozen=True)
| JobAdded |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 2339,
"end": 4878
} | class ____(Metadata):
codec_name: str
codec_config: dict[str, JSON]
def __init_subclass__(cls, *, codec_name: str | None = None, **kwargs: Any) -> None:
"""To be used only when creating the actual public-facing codec class."""
super().__init_subclass__(**kwargs)
if codec_name is not None:
namespace = codec_name
cls_name = f"{CODEC_PREFIX}{namespace}.{cls.__name__}"
cls.codec_name = f"{CODEC_PREFIX}{namespace}"
cls.__doc__ = f"""
See [{cls_name}][] for more details and parameters.
"""
def __init__(self, **codec_config: JSON) -> None:
if not self.codec_name:
raise ValueError(
"The codec name needs to be supplied through the `codec_name` attribute."
) # pragma: no cover
unprefixed_codec_name = _expect_name_prefix(self.codec_name)
if "id" not in codec_config:
codec_config = {"id": unprefixed_codec_name, **codec_config}
elif codec_config["id"] != unprefixed_codec_name:
raise ValueError(
f"Codec id does not match {unprefixed_codec_name}. Got: {codec_config['id']}."
) # pragma: no cover
object.__setattr__(self, "codec_config", codec_config)
warn(
"Numcodecs codecs are not in the Zarr version 3 specification and "
"may not be supported by other zarr implementations.",
category=ZarrUserWarning,
stacklevel=2,
)
@cached_property
def _codec(self) -> Numcodec:
return get_numcodec(self.codec_config) # type: ignore[arg-type]
@classmethod
def from_dict(cls, data: dict[str, JSON]) -> Self:
codec_config = _parse_codec_configuration(data)
return cls(**codec_config)
def to_dict(self) -> dict[str, JSON]:
codec_config = self.codec_config.copy()
codec_config.pop("id", None)
return {
"name": self.codec_name,
"configuration": codec_config,
}
def compute_encoded_size(self, input_byte_length: int, chunk_spec: ArraySpec) -> int:
raise NotImplementedError # pragma: no cover
# Override __repr__ because dynamically constructed classes don't seem to work otherwise
def __repr__(self) -> str:
codec_config = self.codec_config.copy()
codec_config.pop("id", None)
return f"{self.__class__.__name__}(codec_name={self.codec_name!r}, codec_config={codec_config!r})"
| _NumcodecsCodec |
python | sphinx-doc__sphinx | sphinx/directives/other.py | {
"start": 8862,
"end": 9495
} | class ____(SphinxDirective):
"""Directive for a list of names."""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
children = self.parse_content_to_nodes()
if len(children) != 1 or not isinstance(children[0], nodes.bullet_list):
logger.warning(
__('.. acks content is not a list'),
location=(self.env.current_document.docname, self.lineno),
)
return []
return [addnodes.acks('', *children)]
| Acks |
python | pytorch__pytorch | test/test_tensor_creation_ops.py | {
"start": 3397,
"end": 154551
} | class ____(TestCase):
exact_dtype = True
@onlyCPU
@dtypes(torch.float)
def test_diag_embed(self, device, dtype):
x = torch.arange(3 * 4, dtype=dtype, device=device).view(3, 4)
result = torch.diag_embed(x)
expected = torch.stack([torch.diag(r) for r in x], 0)
self.assertEqual(result, expected)
result = torch.diag_embed(x, offset=1, dim1=0, dim2=2)
expected = torch.stack([torch.diag(r, 1) for r in x], 1)
self.assertEqual(result, expected)
def test_cat_mem_overlap(self, device):
x = torch.rand((1, 3), device=device).expand((6, 3))
y = torch.rand((3, 3), device=device)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.cat([y, y], out=x)
@onlyNativeDeviceTypes
def test_vander(self, device):
x = torch.tensor([1, 2, 3, 5], device=device)
self.assertEqual((0, 0), torch.vander(torch.tensor([]), 0).shape)
with self.assertRaisesRegex(RuntimeError, "N must be non-negative."):
torch.vander(x, N=-1)
with self.assertRaisesRegex(RuntimeError, "x must be a one-dimensional tensor."):
torch.vander(torch.stack((x, x)))
@onlyNativeDeviceTypes
@dtypes(torch.bool, torch.uint8, torch.int8, torch.short, torch.int, torch.long,
torch.float, torch.double,
torch.cfloat, torch.cdouble)
def test_vander_types(self, device, dtype):
if dtype is torch.uint8:
# Note: no negative uint8 values
X = [[1, 2, 3, 5], [0, 1 / 3, 1, math.pi, 3 / 7]]
elif dtype is torch.bool:
# Note: see https://github.com/pytorch/pytorch/issues/37398
# for why this is necessary.
X = [[True, True, True, True], [False, True, True, True, True]]
elif dtype in [torch.cfloat, torch.cdouble]:
X = [[1 + 1j, 1 + 0j, 0 + 1j, 0 + 0j],
[2 + 2j, 3 + 2j, 4 + 3j, 5 + 4j]]
else:
X = [[1, 2, 3, 5], [-math.pi, 0, 1 / 3, 1, math.pi, 3 / 7]]
N = [None, 0, 1, 3]
increasing = [False, True]
for x, n, inc in product(X, N, increasing):
numpy_dtype = torch_to_numpy_dtype_dict[dtype]
pt_x = torch.tensor(x, device=device, dtype=dtype)
np_x = np.array(x, dtype=numpy_dtype)
pt_res = torch.vander(pt_x, increasing=inc) if n is None else torch.vander(pt_x, n, inc)
np_res = np.vander(np_x, n, inc)
self.assertEqual(
pt_res,
torch.from_numpy(np_res),
atol=1e-3,
rtol=0,
exact_dtype=False)
def test_cat_all_dtypes_and_devices(self, device):
for dt in all_types_and_complex_and(
torch.half,
torch.bool,
torch.bfloat16,
torch.chalf,
torch.float8_e4m3fn,
torch.float8_e4m3fnuz,
torch.float8_e5m2,
torch.float8_e5m2fnuz,
):
x = torch.tensor([[1, 2], [3, 4]], dtype=dt, device=device)
expected1 = torch.tensor([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 0), expected1)
expected2 = torch.tensor([[1, 2, 1, 2], [3, 4, 3, 4]], dtype=dt, device=device)
self.assertEqual(torch.cat((x, x), 1), expected2)
def test_fill_all_dtypes_and_devices(self, device):
for dt in all_types_complex_float8_and(torch.half, torch.bool, torch.bfloat16, torch.chalf):
for x in [torch.tensor((10, 10), dtype=dt, device=device),
torch.empty(10000, dtype=dt, device=device)]: # large tensor
numel = x.numel()
bound_dtypes = (torch.uint8, torch.int8, torch.float8_e4m3fn,
torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz)
bound = 100 if dt in bound_dtypes else 2000
for n in range(-bound, bound, bound // 10):
x.fill_(n)
self.assertEqual(x, torch.tensor([n] * numel, dtype=dt, device=device))
self.assertEqual(dt, x.dtype)
def test_roll(self, device):
numbers = torch.arange(1, 9, device=device)
single_roll = numbers.roll(1, 0)
expected = torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device)
self.assertEqual(single_roll, expected, msg=f"{single_roll} did not equal expected result")
roll_backwards = numbers.roll(-2, 0)
expected = torch.tensor([3, 4, 5, 6, 7, 8, 1, 2], device=device)
self.assertEqual(roll_backwards, expected, msg=f"{roll_backwards} did not equal expected result")
data = numbers.view(2, 2, 2)
rolled = data.roll(1, 0)
expected = torch.tensor([5, 6, 7, 8, 1, 2, 3, 4], device=device).view(2, 2, 2)
self.assertEqual(expected, rolled, msg=f"{rolled} did not equal expected result: {expected}")
data = data.view(2, 4)
# roll a loop until back where started
loop_rolled = data.roll(2, 0).roll(4, 1)
self.assertEqual(data, loop_rolled, msg=f"{loop_rolled} did not equal the original: {data}")
# multiple inverse loops
self.assertEqual(data, data.roll(-20, 0).roll(-40, 1))
self.assertEqual(torch.tensor([8, 1, 2, 3, 4, 5, 6, 7], device=device), numbers.roll(1, 0))
# test non-contiguous
# strided equivalent to numbers.as_strided(size=(4, 2), stride=(1, 4))
strided = numbers.view(2, 4).transpose(0, 1)
self.assertFalse(strided.is_contiguous(), "this test needs a non-contiguous tensor")
expected = torch.tensor([4, 8, 1, 5, 2, 6, 3, 7]).view(4, 2)
rolled = strided.roll(1, 0)
self.assertEqual(expected, rolled,
msg=f"non contiguous tensor rolled to {rolled} instead of {expected} ")
# test roll with no dimension specified
expected = numbers.roll(1, 0).view(2, 4)
self.assertEqual(expected, data.roll(1), msg="roll with no dims should flatten and roll.")
self.assertEqual(expected, data.roll(1, dims=None), msg="roll with no dims should flatten and roll.")
# test roll over multiple dimensions
expected = torch.tensor([[7, 8, 5, 6], [3, 4, 1, 2]], device=device)
double_rolled = data.roll(shifts=(2, -1), dims=(1, 0))
self.assertEqual(double_rolled, expected,
msg=f"should be able to roll over two dimensions, got {double_rolled}")
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=()))
self.assertRaisesRegex(RuntimeError, "required", lambda: data.roll(shifts=(), dims=1))
# shifts/dims should align
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1, 2), dims=(1,)))
self.assertRaisesRegex(RuntimeError, "align", lambda: data.roll(shifts=(1,), dims=(1, 2)))
# test bool tensor
t = torch.zeros(6, dtype=torch.bool, device=device)
t[0] = True
t[3] = True
self.assertEqual(torch.tensor([False, True, False, False, True, False]), t.roll(1, 0))
# test complex tensor
t = torch.tensor([1, 2 + 1j, 3.5, 4. + 2j, 5j, 6.], device=device)
t[0] = 1 + 0.5j
t[3] = 4.
expected = torch.tensor([6., 1 + 0.5j, 2 + 1j, 3.5, 4., 5j], device=device)
self.assertEqual(expected, t.roll(1, 0))
def test_diagflat(self, device):
dtype = torch.float32
# Basic sanity test
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
# Test offset
x = torch.randn((100,), dtype=dtype, device=device)
result = torch.diagflat(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
# Test where input has more than one dimension
x = torch.randn((2, 3, 4), dtype=dtype, device=device)
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Noncontig input
x = torch.randn((2, 3, 4), dtype=dtype, device=device).transpose(2, 0)
self.assertFalse(x.is_contiguous())
result = torch.diagflat(x)
expected = torch.diag(x.contiguous().view(-1))
self.assertEqual(result, expected)
# Complex number support
result = torch.diagflat(torch.ones(4, dtype=torch.complex128))
expected = torch.eye(4, dtype=torch.complex128)
self.assertEqual(result, expected)
def test_block_diag(self, device):
def block_diag_workaround(*arrs):
arrs_expanded = []
for a in arrs:
if a.dim() == 2:
arrs_expanded.append(a)
elif a.dim() == 1:
arrs_expanded.append(a.expand(1, a.size(0)))
elif a.dim() == 0:
arrs_expanded.append(a.expand(1, 1))
shapes = torch.tensor([a.shape for a in arrs_expanded], device=device)
out = torch.zeros(
torch.sum(shapes, dim=0).tolist(),
dtype=arrs_expanded[0].dtype,
device=device
)
r, c = 0, 0
for i, (rr, cc) in enumerate(shapes):
out[r:r + rr, c:c + cc] = arrs_expanded[i]
r += rr
c += cc
return out
tensors = [
torch.rand((2, 2), device=device),
torch.rand((2, 3), device=device),
torch.rand(10, device=device),
torch.rand((8, 1), device=device),
torch.rand(1, device=device)[0]
]
result = torch.block_diag(*tensors)
result_check = block_diag_workaround(*tensors)
self.assertEqual(result, result_check)
tensor = torch.rand(1, device=device)[0]
result = torch.block_diag(tensor)
result_check = tensor.expand(1, 1)
self.assertEqual(result, result_check)
tensor = torch.rand(10, device=device)
result = torch.block_diag(tensor)
result_check = tensor.expand(1, tensor.size(0))
self.assertEqual(result, result_check)
result = torch.block_diag()
result_check = torch.empty(1, 0, device=device)
self.assertEqual(result, result_check)
self.assertEqual(result.device.type, 'cpu')
test_dtypes = [
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128
]
# Test pairs of different dtypes
for dtype1 in test_dtypes:
for dtype2 in test_dtypes:
a = torch.tensor(1, device=device, dtype=dtype1)
b = torch.tensor(2, device=device, dtype=dtype2)
result = torch.block_diag(a, b)
result_dtype = torch.result_type(a, b)
result_check = torch.tensor([[1, 0], [0, 2]], device=device, dtype=result_dtype)
self.assertEqual(result, result_check)
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 1 has 3 dimensions"
):
torch.block_diag(torch.tensor(5), torch.tensor([[[6]]]))
with self.assertRaisesRegex(
RuntimeError,
"torch.block_diag: Input tensors must have 2 or fewer dimensions. Input 0 has 4 dimensions"
):
torch.block_diag(torch.tensor([[[[6]]]]))
if device != 'cpu':
with self.assertRaisesRegex(
RuntimeError,
(
"torch.block_diag: input tensors must all be on the same device."
" Input 0 is on device cpu and input 1 is on device "
)
):
torch.block_diag(torch.ones(2, 2).cpu(), torch.ones(2, 2, device=device))
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_block_diag_scipy(self, device):
import scipy.linalg
scipy_tensors_list = [
[
1,
[2],
[],
[3, 4, 5],
[[], []],
[[6], [7.3]]
],
[
[[1, 2], [3, 4]],
[1]
],
[
[[4, 9], [7, 10]],
[4.6, 9.12],
[1j + 3]
],
[]
]
expected_torch_types = [
torch.float32,
torch.int64,
torch.complex64,
torch.float32
]
expected_scipy_types = [
torch.float64,
# windows scipy block_diag returns int32 types
torch.int32 if IS_WINDOWS else torch.int64,
torch.complex128,
torch.float64
]
for scipy_tensors, torch_type, scipy_type in zip(scipy_tensors_list, expected_torch_types, expected_scipy_types):
torch_tensors = [torch.tensor(t, device=device) for t in scipy_tensors]
torch_result = torch.block_diag(*torch_tensors)
self.assertEqual(torch_result.dtype, torch_type)
scipy_result = torch.tensor(
scipy.linalg.block_diag(*scipy_tensors),
device=device
)
self.assertEqual(scipy_result.dtype, scipy_type)
scipy_result = scipy_result.to(torch_type)
self.assertEqual(torch_result, scipy_result)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.float32, torch.float64)
def test_torch_complex(self, device, dtype):
real = torch.tensor([1, 2], device=device, dtype=dtype)
imag = torch.tensor([3, 4], device=device, dtype=dtype)
z = torch.complex(real, imag)
complex_dtype = float_to_corresponding_complex_type_map[dtype]
self.assertEqual(torch.tensor([1.0 + 3.0j, 2.0 + 4.0j], dtype=complex_dtype), z)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_torch_polar(self, device, dtype):
abs = torch.tensor([1, 2, -3, -4.5, 1, 1], device=device, dtype=dtype)
angle = torch.tensor([math.pi / 2, 5 * math.pi / 4, 0, -11 * math.pi / 6, math.pi, -math.pi],
device=device, dtype=dtype)
z = torch.polar(abs, angle)
complex_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
self.assertEqual(torch.tensor([1j, -1.41421356237 - 1.41421356237j, -3,
-3.89711431703 - 2.25j, -1, -1],
dtype=complex_dtype),
z, atol=1e-5, rtol=1e-5)
@onlyNativeDeviceTypes
@dtypes(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64,
torch.complex64, torch.complex128, torch.bool)
def test_torch_complex_floating_dtype_error(self, device, dtype):
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
error = r"Expected both inputs to be Half, Float or Double tensors but " \
r"got [A-Za-z]+ and [A-Za-z]+"
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_torch_complex_same_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
for op in (torch.complex, torch.polar):
other_dtype = torch.float64 if dtype == torch.float32 else torch.float32
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=other_dtype)
error = f"Expected object of scalar type {dtype_name(dtype)} but got scalar type " \
f"{dtype_name(other_dtype)} for second argument"
with self.assertRaisesRegex(RuntimeError, error):
op(a, b)
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_torch_complex_out_dtype_error(self, device, dtype):
def dtype_name(dtype):
return 'Float' if dtype == torch.float32 else 'Double'
def complex_dtype_name(dtype):
return 'ComplexFloat' if dtype == torch.complex64 else 'ComplexDouble'
for op in (torch.complex, torch.polar):
a = torch.tensor([1, 2], device=device, dtype=dtype)
b = torch.tensor([3, 4], device=device, dtype=dtype)
out = torch.zeros(2, device=device, dtype=dtype)
expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128
error = f"Expected object of scalar type {complex_dtype_name(expected_dtype)} but got scalar type " \
f"{dtype_name(dtype)} for argument 'out'"
with self.assertRaisesRegex(RuntimeError, error):
op(a, b, out=out)
def test_cat_empty_legacy(self, device):
# FIXME: this is legacy behavior and should be removed
# when we support empty tensors with arbitrary sizes
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((0,), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
def test_cat_empty(self, device):
dtype = torch.float32
x = torch.randn((4, 3, 32, 32), dtype=dtype, device=device)
empty = torch.randn((4, 0, 32, 32), dtype=dtype, device=device)
res1 = torch.cat([x, empty], dim=1)
res2 = torch.cat([empty, x], dim=1)
self.assertEqual(res1, res2)
res1 = torch.cat([empty, empty], dim=1)
self.assertEqual(res1, empty)
def test_concat_empty_list_error(self, device):
# Regression test for https://github.com/pytorch/pytorch/issues/155306
msg = "expected a non-empty list of Tensors"
with self.assertRaisesRegex(ValueError, msg):
torch.concat([], dim='N')
with self.assertRaisesRegex(ValueError, msg):
torch.concatenate([], dim='N')
def test_cat_out(self, device):
x = torch.zeros((0), device=device)
y = torch.randn((4, 6), device=device)
w = y.view(-1).clone()
a = torch.cat([w[:2], w[4:6]])
b = torch.cat([w[:2], w[4:6]], out=w[6:10])
self.assertEqual(a, b)
self.assertEqual(a, w[6:10])
self.assertEqual(w[:6], y.view(-1)[:6])
# Case:
# Reference: https://github.com/pytorch/pytorch/issues/49878
for dim in [0, 1]:
x = torch.zeros((10, 5, 2), device=device)
random_length = random.randint(1, 4)
y = x.narrow(dim, 0, x.shape[dim] - random_length)
val = torch.full_like(y[0], 3., device=device)
if dim == 0:
self.assertTrue(y.is_contiguous())
else:
self.assertFalse(y.is_contiguous())
torch.cat((val[None],) * y.shape[0], dim=0, out=y)
expected_y = torch.cat((val[None],) * y.shape[0], dim=0)
expected_x = torch.zeros((10, 5, 2), device=device)
if dim == 0:
expected_x[:x.shape[dim] - random_length, :, :] = expected_y
elif dim == 1:
expected_x[:, :x.shape[dim] - random_length, :] = expected_y
self.assertEqual(y, expected_y)
self.assertEqual(x, expected_x)
@dtypes(*all_types_and_complex(), torch.uint16, torch.uint32, torch.uint64)
def test_cat_out_fast_path_dim0_dim1(self, device, dtype):
int_types = integral_types_and(torch.uint16, torch.uint32, torch.uint64)
x = torch.zeros((0), device=device, dtype=dtype)
if dtype in int_types:
y = torch.randint(low=0, high=100, size=(4, 6), device=device, dtype=dtype)
else:
y = torch.randn((4, 6), device=device, dtype=dtype)
# Test concat on dimension 0
w = y.view(-1).clone()
a = torch.cat([w[:2], w[4:6]])
b = torch.cat([w[:2], w[4:6]], out=w[6:10])
# Note that there is no guarantee that slicing here will result in
# contiguous tensors
self.assertEqual(a, b)
self.assertEqual(a, w[6:10])
self.assertEqual(w[:6], y.view(-1)[:6])
# If inputs are contiguous tensors, then fast concat paths will be invoked
a_fastcat = torch.cat([w[:2].contiguous(), w[4:6].contiguous()])
self.assertEqual(a_fastcat, a)
# Test concat on dimension 1
w = y.clone()
w_slices = torch.tensor_split(w, (2, 4), dim=1)
# Note that the tensor in w_slices[] here may not be a contiguous
# tensor and we need to make sure this is not broken by fast concat
b = torch.cat([w_slices[0], w_slices[1]], dim=1)
expected_b = torch.index_select(w, 1, torch.tensor([0, 1, 2, 3], device=device))
self.assertEqual(b, expected_b)
# If inputs are contiguous tensors, then fast concat paths will be invoked
b_fastcat = torch.cat([w_slices[0].contiguous(), w_slices[1].contiguous()], dim=1)
self.assertEqual(b_fastcat, expected_b)
# Finally, we need to make sure backward is not broken
# Integral types will not have grad
if dtype not in int_types:
a = torch.randn((4, 3), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((2, 3), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((5, 3), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((5, 2), device=device, dtype=dtype, requires_grad=True)
expected_a_grad = torch.ones((4, 3), device=device, dtype=dtype)
expected_b_grad = torch.ones((2, 3), device=device, dtype=dtype)
expected_c_grad = torch.ones((5, 3), device=device, dtype=dtype)
expected_d_grad = torch.ones((5, 2), device=device, dtype=dtype)
# All the new tensors should be contiguous here. Let us make sure
# to explicitly set them contiguous to enforce fast cat
dim0_cat = torch.cat([a.contiguous(), b.contiguous()], dim=0)
if dtype in complex_types():
dim0_cat.sum().abs().backward()
self.assertEqual(a.grad.abs(), expected_a_grad.abs())
self.assertEqual(b.grad.abs(), expected_b_grad.abs())
else:
dim0_cat.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
dim1_cat = torch.cat([c.contiguous(), d.contiguous()], dim=1)
if dtype in complex_types():
dim1_cat.sum().abs().backward()
self.assertEqual(c.grad.abs(), expected_c_grad.abs())
self.assertEqual(d.grad.abs(), expected_d_grad.abs())
else:
dim1_cat.sum().backward()
self.assertEqual(c.grad, expected_c_grad)
self.assertEqual(d.grad, expected_d_grad)
def test_cat_out_channels_last(self, device):
x = torch.randn((4, 3, 8, 8))
y = torch.randn(x.shape)
res1 = torch.cat((x, y))
z = res1.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), out=z)
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
def test_cat_in_channels_last(self, device):
for dim in range(4):
x = torch.randn((4, 15, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
# Size larger than grain size.
x = torch.randn((4, 15, 256, 256), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y), dim=dim)
x = x.clone().contiguous(memory_format=torch.channels_last)
y = y.clone().contiguous(memory_format=torch.channels_last)
res2 = torch.cat((x, y), dim=dim)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(res1, res2)
@onlyNativeDeviceTypes
def test_cat_preserve_channels_last(self, device):
x = torch.randn((4, 3, 8, 8), device=device)
y = torch.randn(x.shape, device=device)
res1 = torch.cat((x, y))
res2 = torch.cat((x.contiguous(memory_format=torch.channels_last), y.contiguous(memory_format=torch.channels_last)))
self.assertEqual(res1, res2)
self.assertTrue(res2.is_contiguous(memory_format=torch.channels_last))
# discontiguous channels-last inputs
x = torch.arange(24, dtype=torch.float, device=device).reshape(2, 2, 3, 2).to(memory_format=torch.channels_last)
x1 = x[:, :, :2]
x2 = x[:, :, 1:]
res1 = torch.cat((x1, x2), dim=-1)
res2 = torch.cat((x1.contiguous(), x2.contiguous()), dim=-1)
self.assertEqual(res1, res2)
self.assertTrue(res1.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
def test_cat_channels_last_large_inputs(self, device):
num_tensors = 130
inputs_cuda = [
torch.randn((2, 3, 4, 4), device=device).contiguous(memory_format=torch.channels_last)
for _ in range(num_tensors)
]
inputs_cpu = [t.cpu() for t in inputs_cuda]
result = torch.cat(inputs_cuda, dim=1)
expected = torch.cat(inputs_cpu, dim=1)
self.assertEqual(result.cpu(), expected)
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
def test_cat_out_memory_format(self, device):
inp_size = (4, 4, 4, 4)
expected_size = (8, 4, 4, 4)
a_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.channels_last)
a_cpu = torch.randn(inp_size, device='cpu').contiguous(memory_format=torch.channels_last)
b_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.contiguous_format)
b_cpu = torch.randn(inp_size, device='cpu').contiguous(memory_format=torch.contiguous_format)
c_cuda = torch.randn(inp_size, device=device).contiguous(memory_format=torch.channels_last)
# Case 1: if out= is the correct shape then the memory format of out= is respected
out_cuda = torch.empty(expected_size, device=device).contiguous(memory_format=torch.contiguous_format)
res1_cuda = torch.cat((a_cuda, b_cuda), out=out_cuda)
out_cpu = torch.empty(expected_size, device='cpu').contiguous(memory_format=torch.contiguous_format)
res1_cpu = torch.cat((a_cpu, b_cpu), out=out_cpu)
self.assertTrue(res1_cuda.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(res1_cpu.is_contiguous(memory_format=torch.contiguous_format))
# Case 2: if out= is not the correct shape then the output it is resized internally
# - For both CPU and CUDA variants, it only propagates memory format if all the tensors have
# the same memory format, otherwise it just uses contiguous_format as a default
out_cuda = torch.empty((0), device=device).contiguous(memory_format=torch.contiguous_format)
# a_cuda and b_cuda have different memory_format
res2_cuda = torch.cat((a_cuda, b_cuda), out=out_cuda)
out_cpu = torch.empty((0), device='cpu').contiguous(memory_format=torch.contiguous_format)
res2_cpu = torch.cat((a_cpu, b_cpu), out=out_cpu)
self.assertTrue(res2_cuda.is_contiguous(memory_format=torch.contiguous_format))
self.assertTrue(res2_cpu.is_contiguous(memory_format=torch.contiguous_format))
out_cuda = torch.empty((0), device=device).contiguous(memory_format=torch.contiguous_format)
# a_cuda and c_cuda have same memory_format
res3_cuda = torch.cat((a_cuda, c_cuda), out=out_cuda)
self.assertTrue(res3_cuda.is_contiguous(memory_format=torch.channels_last))
@onlyCUDA
def test_cat_stack_cross_devices(self, device):
cuda = torch.randn((3, 3), device=device)
cpu = torch.randn((3, 3), device='cpu')
# Stack
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.stack((cuda, cpu))
with self.assertRaisesRegex(RuntimeError,
"Expected all tensors to be on the same device"):
torch.stack((cpu, cuda))
# TODO: reconcile with other cat tests
# TODO: Compare with a NumPy reference instead of CPU
@onlyCUDA
def test_cat(self, device):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.rand(13, SIZE, SIZE, device=device).transpose(0, pos_dim)
y = torch.rand(17, SIZE, SIZE, device=device).transpose(0, pos_dim)
z = torch.rand(19, SIZE, SIZE, device=device).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randn(20, SIZE, SIZE, device=device)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randn(1, SIZE, SIZE, device=device)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
# TODO: update this test to compare against NumPy instead of CPU
@onlyCUDA
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_device_rounding(self, device, dtype):
# test half-to-even
a = [-5.8, -3.5, -2.3, -1.5, -0.5, 0.5, 1.5, 2.3, 3.5, 5.8]
res = [-6., -4., -2., -2., 0., 0., 2., 2., 4., 6.]
a_tensor = torch.tensor(a, device=device).round()
res_tensor = torch.tensor(res, device='cpu')
self.assertEqual(a_tensor, res_tensor)
# Note: This test failed on XLA since its test cases are created by empty_strided which
# doesn't support overlapping sizes/strides in XLA impl
@onlyNativeDeviceTypes
def test_like_fn_stride_proparation_vs_tensoriterator_unary_op(self, device):
# Test like functions against tensoriterator based unary operator (exp) to
# make sure the returned tensor from like function follows the same stride propergation
# rule as what tensoriterator does for unary operator. The like function's output strides
# is computed on CPU side always, no need to test GPU here.
def compare_helper_(like_fn, t):
te = torch.exp(t)
tl = like_fn(t)
self.assertEqual(te.stride(), tl.stride())
self.assertEqual(te.size(), tl.size())
like_fns = [
lambda t, **kwargs: torch.zeros_like(t, **kwargs),
lambda t, **kwargs: torch.ones_like(t, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs),
lambda t, **kwargs: torch.randint_like(t, 100, **kwargs),
lambda t, **kwargs: torch.randn_like(t, **kwargs),
lambda t, **kwargs: torch.rand_like(t, **kwargs),
lambda t, **kwargs: torch.full_like(t, 7, **kwargs),
lambda t, **kwargs: torch.empty_like(t, **kwargs)]
# dense non-overlapping tensor,
# non-dense non-overlapping sliced tensor
# non-dense non-overlapping gapped tensor
# non-dense non-overlapping 0 strided tensor
# non-dense overlapping general tensor
# non-dense overlapping sliced tensor
# non-dense overlapping gapped tensor
# non-dense overlapping 0 strided tensor
# non-dense overlapping equal strides
tset = (
torch.randn(4, 3, 2, device=device),
torch.randn(4, 3, 2, device=device)[:, :, ::2],
torch.empty_strided((4, 3, 2), (10, 3, 1), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 0, 3), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 2), device=device).fill_(1.0),
torch.empty_strided((4, 3, 2), (4, 2, 1), device=device)[:, :, ::2].fill_(1.0),
torch.empty_strided((4, 3, 2), (10, 1, 1), device=device).fill_(1.0),
torch.empty_strided((4, 1, 1, 2), (10, 0, 0, 2), device=device).fill_(1.0),
torch.empty_strided((4, 2, 3), (10, 3, 3), device=device).fill_(1.0))
for like_fn in like_fns:
for t in tset:
for p in permutations(range(t.dim())):
tp = t.permute(p)
compare_helper_(like_fn, tp)
def _hvd_split_helper(self, torch_fn, np_fn, op_name, inputs, device, dtype, dim):
dimension_error_message = op_name + " requires a tensor with at least "
divisibiliy_error_message = op_name + " attempted to split along dimension "
for shape, arg in inputs:
direction = dim - (len(shape) == 1 and dim == 1)
bound = dim + 2 * (dim == 0) + (dim == 2)
error_expected = len(shape) < bound or (not isinstance(arg, list) and shape[direction] % arg != 0)
t = make_tensor(shape, dtype=dtype, device=device)
t_np = t.cpu().numpy()
if not error_expected:
self.assertEqual(torch_fn(t, arg), np_fn(t_np, arg))
else:
self.assertRaises(RuntimeError, lambda: torch_fn(t, arg))
self.assertRaises(ValueError, lambda: np_fn(t, arg))
expected_error_message = dimension_error_message if len(shape) < bound else divisibiliy_error_message
self.assertRaisesRegex(RuntimeError, expected_error_message, lambda: torch_fn(t, arg))
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_hsplit(self, device, dtype):
inputs = (
((), 3),
((), [2, 4, 6]),
((6,), 2),
((6,), 4),
((6,), [2, 5]),
((6,), [7, 9]),
((3, 8), 4),
((3, 8), 5),
((3, 8), [1, 5]),
((3, 8), [3, 8]),
((5, 5, 5), 2),
((5, 5, 5), [1, 4]),
((5, 0, 5), 3),
((5, 5, 0), [2, 6]),
)
self._hvd_split_helper(torch.hsplit, np.hsplit, "torch.hsplit", inputs, device, dtype, 1)
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_vsplit(self, device, dtype):
inputs = (
((6,), 2),
((6,), 4),
((6, 5), 2),
((6, 5), 4),
((6, 5), [1, 2, 3]),
((6, 5), [1, 5, 9]),
((6, 5, 5), 2),
((6, 0, 5), 2),
((5, 0, 5), [1, 5]),
)
self._hvd_split_helper(torch.vsplit, np.vsplit, "torch.vsplit", inputs, device, dtype, 0)
@onlyNativeDeviceTypes
@dtypes(torch.long, torch.float32, torch.complex64)
def test_dsplit(self, device, dtype):
inputs = (
((6,), 4),
((6, 6), 3),
((5, 5, 6), 2),
((5, 5, 6), 4),
((5, 5, 6), [1, 2, 3]),
((5, 5, 6), [1, 5, 9]),
((5, 5, 0), 2),
((5, 0, 6), 4),
((5, 0, 6), [1, 2, 3]),
((5, 5, 6), [1, 5, 9]),
)
self._hvd_split_helper(torch.dsplit, np.dsplit, "torch.dsplit", inputs, device, dtype, 2)
def _test_special_stacks(self, dim, at_least_dim, torch_fn, np_fn, device, dtype):
# Test error for non-tuple argument
t = torch.randn(10)
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn(t)
# Test error for a single array
with self.assertRaisesRegex(TypeError, "must be tuple of Tensors, not Tensor"):
torch_fn(t)
# Test 0-D
num_tensors = random.randint(1, 5)
input_t = [torch.tensor(random.uniform(0, 10), device=device, dtype=dtype) for i in range(num_tensors)]
actual = torch_fn(input_t)
expected = np_fn([input.cpu().numpy() for input in input_t])
self.assertEqual(actual, expected)
for ndims in range(1, 5):
base_shape = list(_rand_shape(ndims, min_size=1, max_size=5))
for i in range(ndims):
shape = list(base_shape)
num_tensors = random.randint(1, 5)
torch_input = []
# Create tensors with shape being different along one axis only
for _ in range(num_tensors):
shape[i] = random.randint(1, 5)
torch_input.append(_generate_input(tuple(shape), dtype, device, with_extremal=False))
# Determine if input tensors have valid dimensions.
valid_dim = True
for k in range(len(torch_input) - 1):
for tdim in range(ndims):
# Test whether all tensors have the same shape except in concatenating dimension
# Unless the number of dimensions is less than the corresponding at_least function dimension
# Since the original concatenating dimension would shift after applying at_least and would no
# longer be the concatenating dimension
if (ndims < at_least_dim or tdim != dim) and torch_input[k].size()[tdim] != torch_input[k + 1].size()[tdim]:
valid_dim = False
# Special case for hstack is needed since hstack works differently when ndims is 1
if valid_dim or (torch_fn is torch.hstack and ndims == 1):
# Valid dimensions, test against numpy
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_fn(torch_input)
expected = np_fn(np_input)
self.assertEqual(actual, expected)
else:
# Invalid dimensions, test for error
with self.assertRaisesRegex(RuntimeError, "Sizes of tensors must match except in dimension"):
torch_fn(torch_input)
with self.assertRaises(ValueError):
np_input = [input.cpu().numpy() for input in torch_input]
np_fn(np_input)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_hstack_column_stack(self, device, dtype):
ops = ((torch.hstack, np.hstack), (torch.column_stack, np.column_stack))
for torch_op, np_op in ops:
self._test_special_stacks(1, 1, torch_op, np_op, device, dtype)
# Test torch.column_stack with combinations of 1D and 2D tensors input
one_dim_tensor = torch.arange(0, 10).to(dtype=dtype, device=device)
two_dim_tensor = torch.arange(0, 100).to(dtype=dtype, device=device).reshape(10, 10)
inputs = two_dim_tensor, one_dim_tensor, two_dim_tensor, one_dim_tensor
torch_result = torch.column_stack(inputs)
np_inputs = [input.cpu().numpy() for input in inputs]
np_result = np.column_stack(np_inputs)
self.assertEqual(np_result,
torch_result)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_vstack_row_stack(self, device, dtype):
ops = ((torch.vstack, np.vstack), (torch.row_stack, np.vstack))
for torch_op, np_op in ops:
self._test_special_stacks(0, 2, torch_op, np_op, device, dtype)
for _ in range(5):
# Test dimension change for 1D tensor of size (N) and 2D tensor of size (1, N)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch_op(torch_input)
expected = np_op(np_input)
self.assertEqual(actual, expected)
@onlyNativeDeviceTypes
@dtypes(*all_types_and_complex_and(torch.half))
def test_dstack(self, device, dtype):
self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype)
for _ in range(5):
# Test dimension change for 1D tensor of size (N), 2D tensor of size (1, N), and 3D tensor of size (1, N, 1)
n = random.randint(1, 10)
input_a = _generate_input((n,), dtype, device, with_extremal=False)
input_b = _generate_input((1, n), dtype, device, with_extremal=False)
input_c = _generate_input((1, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b, input_c]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
# Test dimension change for 2D tensor of size (M, N) and 3D tensor of size (M, N, 1)
m = random.randint(1, 10)
n = random.randint(1, 10)
input_a = _generate_input((m, n), dtype, device, with_extremal=False)
input_b = _generate_input((m, n, 1), dtype, device, with_extremal=False)
torch_input = [input_a, input_b]
np_input = [input.cpu().numpy() for input in torch_input]
actual = torch.dstack(torch_input)
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
@dtypes(torch.int32, torch.int64)
def test_large_linspace(self, device, dtype):
start = torch.iinfo(dtype).min
end = torch.iinfo(dtype).max & ~0xfff
steps = 15
x = torch.linspace(start, end, steps, dtype=dtype, device=device)
self.assertGreater(x[1] - x[0], (end - start) / steps)
@dtypes(torch.float32, torch.float64)
def test_unpack_double(self, device, dtype):
# Reference: https://github.com/pytorch/pytorch/issues/33111
vals = (2 ** 24 + 1, 2 ** 53 + 1,
np.iinfo(np.int64).max, np.iinfo(np.uint64).max, np.iinfo(np.uint64).max + 1,
-1e500, 1e500)
for val in vals:
t = torch.tensor(val, dtype=dtype, device=device)
a = np.array(val, dtype=torch_to_numpy_dtype_dict[dtype])
self.assertEqual(t, torch.from_numpy(a))
def _float_to_int_conversion_helper(self, vals, device, dtype, refs=None):
if refs is None:
a = np.array(vals, dtype=np.float32).astype(torch_to_numpy_dtype_dict[dtype])
refs = torch.from_numpy(a)
t = torch.tensor(vals, device=device, dtype=torch.float).to(dtype)
self.assertEqual(refs, t.cpu())
# Checks that float->integer casts don't produce undefined behavior errors.
# Note: In C++, casting from a floating value to an integral dtype
# is undefined if the floating point value is not within the integral
# dtype's dynamic range. This can (and should) cause undefined behavior
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
# NumPy may have the same behavior.
@onlyNativeDeviceTypes
@unittest.skipIf(IS_PPC, "Test is broken on PowerPC, see https://github.com/pytorch/pytorch/issues/39671")
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_finite(self, device, dtype):
min = torch.finfo(torch.float).min
max = torch.finfo(torch.float).max
# Note: CUDA max float -> integer conversion is divergent on some dtypes
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2, max)
refs = None
if self.device_type == 'cuda':
if torch.version.hip:
# HIP min float -> int64 conversion is divergent
vals = (-2, -1.5, -.5, 0, .5, 1.5, 2)
else:
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
elif dtype == torch.uint8:
# Note: CPU max float -> uint8 conversion is divergent
vals = (min, -2, -1.5, -.5, 0, .5, 1.5, 2)
# Note: numpy -2.0 or -1.5 -> uint8 conversion is undefined
# see https://github.com/pytorch/pytorch/issues/97794
refs = (0, 254, 255, 0, 0, 0, 1, 2)
elif dtype == torch.int16:
# CPU min and max float -> int16 conversion is divergent.
vals = (-2, -1.5, -.5, 0, .5, 1.5, 2)
self._float_to_int_conversion_helper(vals, device, dtype, refs)
# Note: CUDA will fail this test on most dtypes, often dramatically.
# Note: This test validates undefined behavior consistency in float-to-ints casts
# NB: torch.uint16, torch.uint32, torch.uint64 excluded as this
# nondeterministically fails, warning "invalid value encountered in cast"
@onlyCPU
@unittest.skipIf(IS_S390X, "Test fails for int16 on s390x. Needs investigation.")
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
def test_float_to_int_conversion_nonfinite(self, device, dtype):
vals = (float('-inf'), float('inf'), float('nan'))
if dtype == torch.bool:
refs = (True, True, True)
elif IS_ARM64:
refs = (torch.iinfo(dtype).min, torch.iinfo(dtype).max, 0)
if dtype in (torch.int8, torch.int16):
refs = (0, -1, 0)
else:
refs = (0, 0, 0)
if dtype in (torch.int32, torch.int64):
refs = (torch.iinfo(dtype).min, ) * 3
self._float_to_int_conversion_helper(vals, device, dtype, refs)
@onlyNativeDeviceTypes
def test_complex_type_conversions(self, device):
dtypes = [torch.float, torch.complex64, torch.complex128]
for from_type in dtypes:
for to_type in dtypes:
from_tensor = torch.randn(4, dtype=from_type, device=device)
to_tensor = from_tensor.to(to_type)
if from_type.is_complex and not to_type.is_complex:
self.assertEqual(torch.real(from_tensor), to_tensor, exact_dtype=False)
elif not from_type.is_complex and to_type.is_complex:
self.assertEqual(from_tensor, torch.real(to_tensor), exact_dtype=False)
self.assertEqual(torch.zeros_like(torch.imag(to_tensor)), torch.imag(to_tensor), exact_dtype=False)
else:
self.assertEqual(from_tensor, to_tensor, exact_dtype=False)
@slowTest
@onlyCPU
def test_cat_big(self, device):
SIZE1 = 6500
SIZE2 = 4500
concat_list = []
concat_list.append(torch.ones((SIZE1, 1024 * 512), dtype=torch.uint8, device=device))
concat_list.append(torch.ones((SIZE2, 1024 * 512), dtype=torch.uint8, device=device))
result = torch.cat(concat_list)
self.assertEqual(result.size(0), SIZE1 + SIZE2)
@onlyCPU
@dtypes(torch.half, torch.double, torch.int)
def test_cat2(self, device, dtype):
SIZE = 10
for dim in range(-3, 3):
pos_dim = dim if dim >= 0 else 3 + dim
x = torch.randint(low=-100, high=100, size=(13, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
y = torch.randint(low=-100, high=100, size=(17, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
z = torch.randint(low=-100, high=100, size=(19, SIZE, SIZE), device=device).to(dtype).transpose(0, pos_dim)
res1 = torch.cat((x, y, z), dim)
self.assertEqual(res1.narrow(pos_dim, 0, 13), x, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 13, 17), y, atol=0, rtol=0)
self.assertEqual(res1.narrow(pos_dim, 30, 19), z, atol=0, rtol=0)
x = torch.randint(low=-100, high=100, size=(20, SIZE, SIZE), device=device).to(dtype)
self.assertEqual(torch.cat(torch.split(x, 7)), x)
self.assertEqual(torch.cat(torch.chunk(x, 7)), x)
y = torch.randint(low=-100, high=100, size=(1, SIZE, SIZE), device=device).to(dtype)
z = torch.cat([x, y])
self.assertEqual(z.size(), (21, SIZE, SIZE))
@dtypes(torch.float)
def test_cat_size1(self, device, dtype):
# create a tensor that has aligned stride along dim - 1 dimension
# but catted slice size is not aligned
x1 = torch.randn(16, 16, device=device, dtype=dtype)[:1, :1]
xref = x1.clone().view(-1).view(x1.shape)
# make sure output size is aligned, need at least 4 elements for this
res = torch.cat([x1, x1, x1, x1], dim=-1)
ref = torch.cat([xref, xref, xref, xref], dim=-1)
self.assertEqual(res, ref)
@dtypes(torch.float)
def test_cat_trailing_dim(self, device, dtype):
x1 = torch.randn(16, 16, 23, device=device, dtype=dtype)
x2 = torch.rand_like(x1)
res = torch.cat([x1, x2], dim=1)
ref = torch.cat([x1.cpu(), x2.cpu()], dim=1)
self.assertEqual(res, ref)
@dtypes(torch.float)
def test_cat_misaligned(self, device, dtype):
x1 = torch.randn(14, device=device, dtype=dtype)[2:]
x2 = torch.rand_like(x1)
res = torch.cat([x1, x2], dim=-1)
ref = torch.cat([x1.cpu(), x2.cpu()], dim=-1)
self.assertEqual(res, ref)
@dtypes(torch.float)
def test_cat_multi_batch(self, device, dtype):
xs = [torch.randn(16, 16, device=device, dtype=dtype) for _ in range(130)]
xs_cpu = [x.cpu() for x in xs]
res = torch.cat(xs, dim=-1)
ref = torch.cat(xs_cpu, dim=-1)
self.assertEqual(res, ref)
xs = [torch.randn(16, 15, 15, device=device, dtype=dtype) for _ in range(130)]
xs[128] = torch.randn(15, 15, 15, device=device, dtype=dtype)
xs[129] = torch.randn(17, 15, 15, device=device, dtype=dtype)
xs_cpu = [x.cpu() for x in xs]
res = torch.cat(xs, dim=0)
ref = torch.cat(xs_cpu, dim=0)
self.assertEqual(res, ref)
@dtypes(torch.float)
@largeTensorTest("16GB")
def test_cat_large_tensor(self, device, dtype):
N = 2 ** 32 // dtype.itemsize
inps = [torch.randn(N, device=device, dtype=dtype), torch.randn(N // 128, device=device, dtype=dtype)]
res = torch.cat(inps, dim=0)
ref = torch.cat([x.cpu() for x in inps])
self.assertEqual(res, ref)
# FIXME: Create an OpInfo-based tensor creation method test that verifies this for all tensor
# creation methods and verify all dtypes and layouts
@dtypes(torch.bool, torch.uint8, torch.int16, torch.int64, torch.float16, torch.float32, torch.complex64)
def test_zeros_dtype_layout_device_match(self, device, dtype):
layout = torch.strided
t = torch.zeros((2, 3), device=device, dtype=dtype, layout=layout)
self.assertIs(dtype, t.dtype)
self.assertIs(layout, t.layout)
self.assertEqual(torch.device(device), t.device)
def test_stack(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4), device=device, dtype=dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4), device=device, dtype=dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4), device=device, dtype=dtype)
for dim in range(4):
res = torch.stack((x, y, z), dim)
res_neg = torch.stack((x, y, z), dim - 4)
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
self.assertEqual(res, res_neg)
self.assertEqual(res.size(), expected_size)
self.assertEqual(res.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res.select(dim, 2), z, atol=0, rtol=0)
def test_stack_out(self, device):
for dtype in (torch.half, torch.double, torch.int):
x = torch.randint(low=-100, high=100, size=(2, 3, 4), device=device, dtype=dtype)
y = torch.randint(low=-100, high=100, size=(2, 3, 4), device=device, dtype=dtype)
z = torch.randint(low=-100, high=100, size=(2, 3, 4), device=device, dtype=dtype)
for dim in range(4):
expected_size = x.size()[:dim] + (3,) + x.size()[dim:]
res_out = x.new(expected_size)
res_neg_out = x.new(expected_size)
res_out_dp = res_out.data_ptr()
res_out_neg_dp = res_neg_out.data_ptr()
torch.stack((x, y, z), dim, out=res_out)
torch.stack((x, y, z), dim - 4, out=res_neg_out)
self.assertEqual(res_out, res_neg_out)
self.assertEqual(res_out.size(), expected_size)
self.assertEqual(res_out_dp, res_out.data_ptr())
self.assertEqual(res_out_neg_dp, res_neg_out.data_ptr())
self.assertEqual(res_out.select(dim, 0), x, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 1), y, atol=0, rtol=0)
self.assertEqual(res_out.select(dim, 2), z, atol=0, rtol=0)
def test_repeat_interleave(self, device):
x = torch.tensor([0, 1, 2, 3], device=device)
expected = torch.tensor([1, 2, 2, 3, 3, 3], device=device)
self.assertEqual(torch.repeat_interleave(x), expected)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4, device=device).reshape(2, 2))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.arange(4.0, device=device))
with self.assertRaises(RuntimeError):
torch.repeat_interleave(torch.tensor([1, 2, -1, 3, 4], device=device))
y = torch.tensor([[1, 2], [3, 4]], device=device)
y1_v1 = torch.repeat_interleave(y, 2)
y1_v2 = torch.repeat_interleave(y, torch.tensor(2, device=device))
y1_v3 = torch.repeat_interleave(y, torch.tensor([2], device=device))
y1_expect = torch.tensor([1, 1, 2, 2, 3, 3, 4, 4], device=device)
self.assertEqual(y1_v1, y1_expect)
self.assertEqual(y1_v2, y1_expect)
self.assertEqual(y1_v3, y1_expect)
y2 = torch.repeat_interleave(y, 3, dim=1)
y2_expect = torch.tensor([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]], device=device)
self.assertEqual(y2, y2_expect)
y3 = torch.repeat_interleave(y, torch.tensor([1, 2], device=device), dim=0)
y3_expect = torch.tensor([[1, 2],
[3, 4],
[3, 4]], device=device)
self.assertEqual(y3, y3_expect)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.tensor([1, 2, 3], device=device), dim=0)
with self.assertRaises(RuntimeError):
torch.repeat_interleave(y, torch.arange(9, device=device).reshape(3, 3), dim=0)
# test zero sized dimension
x = torch.zeros((5, 0), device=device)
y = torch.repeat_interleave(x, repeats=3, dim=1)
self.assertEqual(y, x.new_zeros(5, 0, device=device))
x = torch.tensor([], dtype=torch.int64, device=device)
y = torch.repeat_interleave(x, x)
self.assertEqual(y, x)
def test_new_methods_requires_grad(self, device):
size = (10,)
test_cases = [
# method name, args
('new_full', [size, 1]),
('new_empty', [size]),
('new_zeros', [size]),
('new_ones', [size]),
]
for method_name, args in test_cases:
x = torch.randn(size, device=device)
for requires_grad in [True, False]:
x_new = x.__getattribute__(method_name)(*args, requires_grad=requires_grad)
self.assertEqual(x_new.requires_grad, requires_grad)
x = torch.randint(10, size, device=device)
with self.assertRaisesRegex(
RuntimeError,
r'Only Tensors of floating point and complex dtype can require gradients'):
x_new = x.__getattribute__(method_name)(*args, requires_grad=True)
def test_tensor_from_sequence(self, device):
class MockSequence:
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, item):
raise TypeError
class GoodMockSequence(MockSequence):
def __getitem__(self, item):
return self.lst[item]
bad_mock_seq = MockSequence([1.0, 2.0, 3.0])
good_mock_seq = GoodMockSequence([1.0, 2.0, 3.0])
with self.assertRaisesRegex(ValueError, 'could not determine the shape'):
torch.tensor(bad_mock_seq, device=device)
self.assertEqual(torch.tensor([1.0, 2.0, 3.0], device=device), torch.tensor(good_mock_seq, device=device))
def test_simple_scalar_cast(self, device):
ok = [torch.tensor([1.5], device=device), torch.zeros(1, 1, 1, 1, device=device)]
ok_values = [1.5, 0]
not_ok = map(torch.Tensor, [[], [1, 2], [[1, 2], [3, 4]]])
for tensor, value in zip(ok, ok_values):
self.assertEqual(int(tensor), int(value))
self.assertEqual(float(tensor), float(value))
self.assertEqual(complex(tensor), complex(value))
self.assertEqual(complex(torch.tensor(1.5j)), 1.5j)
for tensor in not_ok:
self.assertRaises(ValueError, lambda: int(tensor))
self.assertRaises(ValueError, lambda: float(tensor))
self.assertRaises(ValueError, lambda: complex(tensor))
self.assertRaises(RuntimeError, lambda: float(torch.tensor(1.5j)))
self.assertRaises(RuntimeError, lambda: int(torch.tensor(1.5j)))
def test_offset_scalar_cast(self, device):
x = torch.tensor([1., 2., 3.], device=device)
y = x[2:]
self.assertEqual(int(y), 3)
def test_meshgrid_empty(self):
with self.assertRaisesRegex(RuntimeError,
'expects a non-empty TensorList'):
torch.meshgrid()
def test_meshgrid_unsupported_indexing(self):
with self.assertRaisesRegex(RuntimeError,
'indexing must be one of "xy" or "ij"'):
torch.meshgrid(torch.tensor([1, 2]), indexing='')
def test_meshgrid_non_1d_tensor(self):
with self.assertRaisesRegex(RuntimeError,
'Expected 0D or 1D tensor'):
torch.meshgrid(torch.tensor([[1, 2], [3, 4]]))
def test_meshgrid_inconsistent_dtype(self):
with self.assertRaisesRegex(
RuntimeError, 'expects all tensors to have the same dtype'):
torch.meshgrid(torch.tensor([1], dtype=torch.int),
torch.tensor([2], dtype=torch.float))
def test_meshgrid_inconsistent_device(self):
with self.assertRaisesRegex(
RuntimeError, 'expects all tensors to have the same device'):
torch.meshgrid(torch.tensor([1], device='cpu'),
torch.tensor([2], device='meta'))
def test_meshgrid_warns_if_no_indexing(self):
with self.assertWarnsOnceRegex(
UserWarning, '.*will be required to pass the indexing arg.*'):
torch.meshgrid(torch.tensor([1, 2]))
def test_meshgrid_default_indexing(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c])
self.assertEqual(grid_a.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c.shape, torch.Size([1, 3, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c)
self.assertEqual(grid_a2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c2.shape, torch.Size([1, 3, 2]))
expected_grid_a = torch.ones(1, 3, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1],
[2, 2],
[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2],
[1, 2],
[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_meshgrid_xy_indexing(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c], indexing='xy')
self.assertEqual(grid_a.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_b.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_c.shape, torch.Size([3, 1, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c, indexing='xy')
self.assertEqual(grid_a2.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_b2.shape, torch.Size([3, 1, 2]))
self.assertEqual(grid_c2.shape, torch.Size([3, 1, 2]))
expected_grid_a = torch.ones(3, 1, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1]],
[[2, 2]],
[[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2]],
[[1, 2]],
[[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_meshgrid_ij_indexing(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid([a, b, c], indexing='ij')
self.assertEqual(grid_a.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c.shape, torch.Size([1, 3, 2]))
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c, indexing='ij')
self.assertEqual(grid_a2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_b2.shape, torch.Size([1, 3, 2]))
self.assertEqual(grid_c2.shape, torch.Size([1, 3, 2]))
expected_grid_a = torch.ones(1, 3, 2, dtype=torch.int64, device=device)
expected_grid_b = torch.tensor([[[1, 1],
[2, 2],
[3, 3]]], device=device)
expected_grid_c = torch.tensor([[[1, 2],
[1, 2],
[1, 2]]], device=device)
self.assertTrue(grid_a.equal(expected_grid_a))
self.assertTrue(grid_b.equal(expected_grid_b))
self.assertTrue(grid_c.equal(expected_grid_c))
self.assertTrue(grid_a2.equal(expected_grid_a))
self.assertTrue(grid_b2.equal(expected_grid_b))
self.assertTrue(grid_c2.equal(expected_grid_c))
def test_meshgrid_ij_indexing_is_default(self, device):
a = torch.tensor(1, device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
grid_a, grid_b, grid_c = torch.meshgrid(a, b, c, indexing='ij')
grid_a2, grid_b2, grid_c2 = torch.meshgrid(a, b, c)
self.assertTrue(grid_a.equal(grid_a2))
self.assertTrue(grid_b.equal(grid_b2))
self.assertTrue(grid_c.equal(grid_c2))
@skipMeta
def test_meshgrid_vs_numpy(self, device):
# Shapes to the random tensors. Each line is a test case, and
# each list within that line is the shape of a single
# tensor. The shapes are restricted to 0D (represented by [])
# and 1D tensors.
cases = [
[[]],
[[1], [1], [1]],
[[], [], []],
[[3], [5], [7]],
[[3], [], [7]],
[[11], [13]],
[[15]],
]
# We also need to test the different indexing modes. We can't
# just enumerate them because we don't presently support the
# same modes as numpy.meshgrid, nor does our default
# correspond to their default.
#
# TODO Eliminate this and replace it with a list of all
# supported indexing modes when we have full compatibility.
indexing_correspondence = [
# No indexing in PyTorch corresponds to "ij" indexing in
# NumPy.
({}, {'indexing': 'ij'}),
# No indexing in NumPy corresponds to "xy" indexing in
# PyTorch.
({'indexing': 'xy'}, {}),
# "ij" and "xy" are implemented identically in both.
({'indexing': 'ij'}, {'indexing': 'ij'}),
({'indexing': 'xy'}, {'indexing': 'xy'}),
]
for shapes, (torch_kwargs, numpy_kwargs) in product(cases, indexing_correspondence):
with self.subTest(shapes=shapes, torch_kwargs=torch_kwargs, numpy_kwargs=numpy_kwargs):
tensors = [make_tensor(shape, device=device, dtype=torch.int) for shape in shapes]
torch_grids = torch.meshgrid(*tensors, **torch_kwargs)
numpy_grids = np.meshgrid(*(tensor.cpu().numpy() for tensor in tensors), **numpy_kwargs)
self.assertEqual(torch_grids, numpy_grids)
def test_cartesian_prod(self, device):
a = torch.tensor([1], device=device)
b = torch.tensor([1, 2, 3], device=device)
c = torch.tensor([1, 2], device=device)
prod = torch.cartesian_prod(a, b, c)
expected = torch.tensor(list(product([a], b, c)), device=device)
self.assertEqual(expected, prod)
# test 0 size input
d = torch.empty(0, dtype=b.dtype, device=device)
prod = torch.cartesian_prod(a, b, c, d)
expected = torch.empty(0, 4, dtype=b.dtype, device=device)
self.assertEqual(expected, prod)
# test single input
prod = torch.cartesian_prod(b)
self.assertEqual(b, prod)
def test_combinations(self, device):
a = torch.tensor([1, 2, 3], device=device)
c = torch.combinations(a, r=0)
expected = torch.empty(0, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=1)
expected = torch.tensor(list(combinations(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=1, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=1)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a)
expected = torch.tensor(list(combinations(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, with_replacement=True)
expected = torch.tensor(list(combinations_with_replacement(a, r=2)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=3)
expected = torch.tensor(list(combinations(a, r=3)), device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=4)
expected = torch.empty(0, 4, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
c = torch.combinations(a, r=5)
expected = torch.empty(0, 5, dtype=a.dtype, device=device)
self.assertEqual(c, expected)
# test empty input
a = torch.empty(0, device=device)
c1 = torch.combinations(a)
c2 = torch.combinations(a, with_replacement=True)
expected = torch.empty(0, 2, dtype=a.dtype, device=device)
self.assertEqual(c1, expected)
self.assertEqual(c2, expected)
@skipMeta
def test_linlogspace_mem_overlap(self, device):
x = torch.rand(1, device=device).expand(10)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.linspace(1, 10, 10, out=x)
with self.assertRaisesRegex(RuntimeError, 'unsupported operation'):
torch.logspace(1, 10, 10, out=x)
def test_ctor_with_numpy_array(self, device):
correct_dtypes = [
np.double,
float,
np.float16,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
bool,
]
incorrect_byteorder = '>' if sys.byteorder == 'little' else '<'
incorrect_dtypes = [incorrect_byteorder + t for t in ['d', 'f']]
for dtype in correct_dtypes:
array = np.array([1, 2, 3, 4], dtype=dtype)
# Upcast
tensor = torch.DoubleTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
# Downcast (sometimes)
tensor = torch.FloatTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
tensor = torch.HalfTensor(array).to(device)
for i in range(len(array)):
self.assertEqual(tensor[i], array[i])
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_random(self, device, dtype):
# This test is flaky with p<=(2/(ub-lb))^200=6e-36
t = torch.empty(200, dtype=dtype, device=device)
lb = 1
ub = 4
t.fill_(-1)
t.random_(lb, ub)
self.assertEqual(t.min(), lb)
self.assertEqual(t.max(), ub - 1)
t.fill_(-1)
t.random_(ub)
self.assertEqual(t.min(), 0)
self.assertEqual(t.max(), ub - 1)
def test_random_bool(self, device):
size = 2000
t = torch.empty(size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
t.fill_(True)
t.random_()
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(0.4 < (t.eq(True)).to(torch.int).sum().item() / size < 0.6)
def test_random_from_to_bool(self, device):
size = 2000
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
min_val = 0
max_val = 1
froms = [int64_min_val, -42, min_val - 1, min_val, max_val, max_val + 1, 42]
tos = [-42, min_val - 1, min_val, max_val, max_val + 1, 42, int64_max_val]
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=torch.bool, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = 1
self.assertTrue(from_ <= t.to(torch.int).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.int).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
# NB: uint64 is broken because its max value is not representable in
# int64_t, but this is what random expects
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch.uint16, torch.uint32))
def test_random_full_range(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
t = torch.empty(size, dtype=dtype, device=device)
if dtype in [torch.float, torch.double, torch.half, torch.bfloat16]:
from_ = int(max(-fp_limit, int64_min_val))
to_inc_ = int(min(fp_limit, int64_max_val))
else:
from_ = int(max(torch.iinfo(dtype).min, int64_min_val))
to_inc_ = int(min(torch.iinfo(dtype).max, int64_max_val))
range_ = to_inc_ - from_ + 1
t.random_(from_, None)
delta = max(1, alpha * range_)
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_inc_ - delta) < t.to(torch.double).max() <= to_inc_)
# NB: uint64 is broken because its max value is not representable in
# int64_t, but this is what random expects
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch .uint16, torch.uint32))
def test_random_from_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, -42, min_val - 1, min_val, 42, max_val, max_val + 1]
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
froms = [min_val, -42, 0, 42]
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
froms = [int64_min_val, min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1]
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
if dtype == torch.double:
fp_limit = 2**53
elif dtype == torch.float:
fp_limit = 2**24
elif dtype == torch.half:
fp_limit = 2**11
elif dtype == torch.bfloat16:
fp_limit = 2**8
else:
fp_limit = 0
for from_ in froms:
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= from_ <= max_val):
self.assertRaisesRegex(
RuntimeError,
"from is out of bounds",
lambda: t.random_(from_, to_)
)
elif not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
if dtype.is_floating_point and (
not (-fp_limit <= from_ <= fp_limit) or not (-fp_limit <= (to_ - 1) <= fp_limit)):
if not (-fp_limit <= from_ <= fp_limit):
self.assertWarnsRegex(UserWarning, "from is out of bounds",
lambda: t.random_(from_, to_))
if not (-fp_limit <= (to_ - 1) <= fp_limit):
self.assertWarnsRegex(UserWarning, "to - 1 is out of bounds",
lambda: t.random_(from_, to_))
else:
t.random_(from_, to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*all_types_and(torch.bfloat16, torch.half, torch.uint16, torch.uint32))
def test_random_to(self, device, dtype):
size = 2000
alpha = 0.1
int64_min_val = torch.iinfo(torch.int64).min
int64_max_val = torch.iinfo(torch.int64).max
if dtype in [torch.float, torch.double, torch.half]:
min_val = int(max(torch.finfo(dtype).min, int64_min_val))
max_val = int(min(torch.finfo(dtype).max, int64_max_val))
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.bfloat16:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val >> 1]
elif dtype == torch.uint8:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [-42, min_val - 1, min_val, 42, max_val, max_val + 1, int64_max_val]
elif dtype == torch.int64:
min_val = int64_min_val
max_val = int64_max_val
tos = [-42, 0, 42, max_val]
else:
min_val = torch.iinfo(dtype).min
max_val = torch.iinfo(dtype).max
tos = [min_val - 1, min_val, -42, 0, 42, max_val, max_val + 1, int64_max_val]
from_ = 0
for to_ in tos:
t = torch.empty(size, dtype=dtype, device=device)
if to_ > from_:
if not (min_val <= (to_ - 1) <= max_val):
self.assertRaisesRegex(
RuntimeError,
"to - 1 is out of bounds",
lambda: t.random_(from_, to_)
)
else:
t.random_(to_)
range_ = to_ - from_
delta = max(1, alpha * range_)
if dtype == torch.bfloat16:
# Less strict checks because of rounding errors
# TODO investigate rounding errors
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) < t.to(torch.double).max() <= to_)
else:
self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta))
self.assertTrue((to_ - delta) <= t.to(torch.double).max() < to_)
else:
self.assertRaisesRegex(
RuntimeError,
"random_ expects 'from' to be less than 'to', but got from=" + str(from_) + " >= to=" + str(to_),
lambda: t.random_(from_, to_)
)
@dtypes(*all_types_and(torch.bfloat16, torch.half))
def test_random_default(self, device, dtype):
size = 2000
alpha = 0.1
if dtype == torch.float:
to_inc = 1 << 24
elif dtype == torch.double:
to_inc = 1 << 53
elif dtype == torch.half:
to_inc = 1 << 11
elif dtype == torch.bfloat16:
to_inc = 1 << 8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(size, dtype=dtype, device=device)
t.random_()
self.assertTrue(0 <= t.to(torch.double).min() < alpha * to_inc)
self.assertTrue((to_inc - alpha * to_inc) < t.to(torch.double).max() <= to_inc)
# TODO: this test should be updated
@onlyNativeDeviceTypes
def test_empty_full(self, device):
torch_device = torch.device(device)
device_type = torch_device.type
dtypes = get_all_dtypes(include_half=False, include_bfloat16=False, include_complex32=True)
if device_type == 'cpu':
do_test_empty_full(self, dtypes, torch.strided, torch_device)
if device_type == 'cuda':
do_test_empty_full(self, dtypes, torch.strided, None)
do_test_empty_full(self, dtypes, torch.strided, torch_device)
# TODO: this test should be updated
@suppress_warnings
@onlyNativeDeviceTypes
@deviceCountAtLeast(1)
def test_tensor_device(self, devices):
device_type = torch.device(devices[0]).type
if device_type == 'cpu':
self.assertEqual('cpu', torch.tensor(5).device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu').device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu:0').device.type)
self.assertEqual('cpu',
torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cpu:0').device.type)
self.assertEqual('cpu', torch.tensor(np.random.randn(2, 3), device='cpu').device.type)
if device_type == 'cuda':
self.assertEqual('cuda:0', str(torch.tensor(5).cuda(0).device))
self.assertEqual('cuda:0', str(torch.tensor(5).cuda('cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device=0).device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device='cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cuda:0').device))
self.assertEqual('cuda:0', str(torch.tensor(np.random.randn(2, 3), device='cuda:0').device))
for device in devices:
with torch.cuda.device(device):
device_string = 'cuda:' + str(torch.cuda.current_device())
self.assertEqual(device_string,
str(torch.tensor(5, dtype=torch.int64, device='cuda').device))
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu')
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu:0')
if len(devices) > 1:
self.assertEqual('cuda:1', str(torch.tensor(5).cuda(1).device))
self.assertEqual('cuda:1', str(torch.tensor(5).cuda('cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device=1).device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32),
device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(np.random.randn(2, 3), device='cuda:1').device))
# TODO: this test should be updated
@onlyNativeDeviceTypes
def test_as_strided_neg(self, device):
error = r'as_strided: Negative strides are not supported at the ' \
r'moment, got strides: \[-?[0-9]+(, -?[0-9]+)*\]'
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(3, 3, device=device), (1, 1), (2, -1))
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(14, device=device), (2,), (-11,))
# TODO: this test should be updated
def test_zeros(self, device):
res1 = torch.zeros(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.zeros(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
boolTensor = torch.zeros(2, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[False, False], [False, False]],
device=device, dtype=torch.bool)
self.assertEqual(boolTensor, expected)
halfTensor = torch.zeros(1, 1, device=device, dtype=torch.half)
expected = torch.tensor([[0.]], device=device, dtype=torch.float16)
self.assertEqual(halfTensor, expected)
bfloat16Tensor = torch.zeros(1, 1, device=device, dtype=torch.bfloat16)
expected = torch.tensor([[0.]], device=device, dtype=torch.bfloat16)
self.assertEqual(bfloat16Tensor, expected)
complexTensor = torch.zeros(2, 2, device=device, dtype=torch.complex64)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex64)
self.assertEqual(complexTensor, expected)
complexHalfTensor = torch.zeros(2, 2, device=device, dtype=torch.complex32)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex32)
self.assertEqual(complexHalfTensor, expected)
def test_zeros_bounds_checking(self, device):
# Test negative large integer
with self.assertRaisesRegex(RuntimeError, r"zeros: Dimension size must be non-negative."):
torch.zeros(-6744789213055875072, device=device)
# TODO: this test should be updated
def test_zeros_out(self, device):
shape = (3, 4)
out = torch.zeros(shape, device=device)
torch.zeros(shape, device=device, out=out)
# change the dtype, layout, device
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, dtype=torch.int64, out=out)
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, layout=torch.sparse_coo, out=out)
# leave them the same
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, dtype=out.dtype, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, out=out))
# TODO: this test should be updated
def test_ones(self, device):
res1 = torch.ones(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.ones(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
# test boolean tensor
res1 = torch.ones(1, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[True, True]], device=device, dtype=torch.bool)
self.assertEqual(res1, expected)
# test chalf
self.assertEqual(torch.ones(100, 100, device=device, dtype=torch.chalf),
torch.ones(100, 100, device=device, dtype=torch.cfloat), exact_dtype=False)
# TODO: this test should be updated
@onlyCPU
def test_constructor_dtypes(self, device):
self.assertIs(torch.tensor([]).dtype, torch.get_default_dtype())
self.assertIs(torch.uint8, torch.ByteTensor.dtype)
self.assertIs(torch.float32, torch.FloatTensor.dtype)
self.assertIs(torch.float64, torch.DoubleTensor.dtype)
with set_default_tensor_type('torch.FloatTensor'):
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
# only floating-point types are supported as the default type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type('torch.IntTensor'))
with set_default_dtype(torch.float64):
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.DoubleStorage, torch.Storage)
with set_default_tensor_type(torch.FloatTensor):
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
if torch.cuda.is_available():
with set_default_tensor_type(torch.cuda.FloatTensor):
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.float32, torch.cuda.FloatTensor.dtype)
self.assertIs(torch.cuda.FloatStorage, torch.Storage)
with set_default_dtype(torch.float64):
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.cuda.DoubleStorage, torch.Storage)
# don't allow passing dtype to set_default_tensor_type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32))
# don't allow passing dtype to set_default_dtype
for t in all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.qint8):
# only floating-point types are supported as the default type
if t in (
torch.half,
torch.float,
torch.double,
torch.bfloat16):
with set_default_dtype(t):
pass
else:
self.assertRaises(TypeError, lambda: torch.set_default_dtype(t))
# TODO: this test should be updated
@onlyCPU
def test_constructor_device_legacy(self, device):
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor((2.0, 3.0), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cuda'))
# Tensor constructor/new with Tensor argument shouldn't work with device specified
i = torch.tensor([1], device='cpu')
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cuda'))
x = torch.randn((3,), device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor((2.0, 3.0), device='cpu'))
# Tensor constructor/new with Tensor argument shouldn't work with device specified
i = torch.tensor([1], device='cuda')
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(i, device='cpu'))
self.assertRaises(RuntimeError, lambda: i.new(i, device='cpu'))
with set_default_tensor_type(torch.cuda.FloatTensor):
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cpu'))
x = torch.randn((3,), device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cpu'))
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory(self, device):
# TODO: This test probably doesn't make too much sense now that
# torch.tensor has been established for a while; it makes more
# sense to test the legacy behavior in terms of the new behavior
expected = torch.Tensor([1, 1])
# test data
res1 = torch.tensor([1, 1])
self.assertEqual(res1, expected, exact_dtype=False)
res1 = torch.tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = torch.tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = torch.tensor(expected, dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy with numpy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
a = np.array([5.]).astype(dtype)
res1 = torch.tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
# test boolean tensor
a = torch.tensor([True, True, False, True, True], dtype=torch.bool)
b = torch.tensor([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)
self.assertEqual(a, b)
c = torch.tensor([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)
self.assertEqual(a, c)
d = torch.tensor((-.3, 0, .3, 1, 3 / 7), dtype=torch.bool)
e = torch.tensor((True, False, True, True, True), dtype=torch.bool)
self.assertEqual(e, d)
f = torch.tensor((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)
self.assertEqual(e, f)
int64_max = torch.iinfo(torch.int64).max
int64_min = torch.iinfo(torch.int64).min
float64_max = torch.finfo(torch.float64).max
float64_min = torch.finfo(torch.float64).min
g_1 = torch.tensor((float('nan'), 0, int64_min, int64_max, int64_min - 1), dtype=torch.bool)
self.assertEqual(e, g_1)
g_2 = torch.tensor((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min), dtype=torch.bool)
self.assertEqual(e, g_2)
g_3 = torch.tensor((float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e291), dtype=torch.bool)
self.assertEqual(e, g_3)
h = torch.tensor([True, False, False, True, False, True, True], dtype=torch.bool)
i = torch.tensor([1e-323, 1e-324, 0j, 1e-323j, 1e-324j, 1 + 2j, -1j], dtype=torch.bool)
self.assertEqual(h, i)
j = torch.tensor((True, True, True, True), dtype=torch.bool)
k = torch.tensor((1e323, -1e323, float('inf'), -float('inf')), dtype=torch.bool)
self.assertEqual(j, k)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory_copy_var(self, device):
def check_copy(copy, is_leaf, requires_grad, data_ptr=None):
if data_ptr is None:
data_ptr = copy.data_ptr
self.assertEqual(copy, source, exact_dtype=False)
self.assertTrue(copy.is_leaf == is_leaf)
self.assertTrue(copy.requires_grad == requires_grad)
self.assertTrue(copy.data_ptr == data_ptr)
source = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
# test torch.tensor()
check_copy(torch.tensor(source), True, False)
check_copy(torch.tensor(source, requires_grad=False), True, False)
check_copy(torch.tensor(source, requires_grad=True), True, True)
# test tensor.new_tensor()
copy = torch.randn(1)
check_copy(copy.new_tensor(source), True, False)
check_copy(copy.new_tensor(source, requires_grad=False), True, False)
check_copy(copy.new_tensor(source, requires_grad=True), True, True)
# test torch.as_tensor()
check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not copy
check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
# TODO: this test should be updated
@onlyCPU
def test_tensor_factory_type_inference(self, device):
def test_inference(default_dtype):
default_complex_dtype = torch.complex64 if default_dtype == torch.float32 else torch.complex128
self.assertIs(default_dtype, torch.tensor(()).dtype)
self.assertIs(default_dtype, torch.tensor(5.).dtype)
self.assertIs(torch.int64, torch.tensor(5).dtype)
self.assertIs(torch.bool, torch.tensor(True).dtype)
self.assertIs(torch.int32, torch.tensor(5, dtype=torch.int32).dtype)
self.assertIs(default_dtype, torch.tensor(((7, 5), (9, 5.))).dtype)
self.assertIs(default_dtype, torch.tensor(((5., 5), (3, 5))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, 3), (3, 5))).dtype)
self.assertIs(default_complex_dtype, torch.tensor(((5, 3 + 2j), (3, 5 + 4j))).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(())).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(5.)).dtype)
if np.array(5).dtype == np.int64: # np long, which can be 4 bytes (e.g. on windows)
self.assertIs(torch.int64, torch.tensor(np.array(5)).dtype)
else:
self.assertIs(torch.int32, torch.tensor(np.array(5)).dtype)
self.assertIs(torch.uint8, torch.tensor(np.array(3, dtype=np.uint8)).dtype)
self.assertIs(default_dtype, torch.tensor(((7, np.array(5)), (np.array(9), 5.))).dtype)
self.assertIs(torch.float64, torch.tensor(((7, 5), (9, np.array(5.)))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, np.array(3)), (np.array(3), 5))).dtype)
for dtype in [torch.float64, torch.float32]:
with set_default_dtype(dtype):
test_inference(dtype)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_new_tensor(self, device):
expected = torch.autograd.Variable(torch.ByteTensor([1, 1]))
# test data
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1, expected)
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = expected.new_tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertEqual(res2, expected, exact_dtype=False)
self.assertIs(torch.int, res2.dtype)
# test copy with numpy
a = np.array([5.])
res1 = torch.tensor(a)
res1 = res1.new_tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
if torch.cuda.device_count() >= 2:
expected = expected.cuda(1)
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
res2 = expected.new_tensor(expected)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int, device=0)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), 0)
res1 = expected.new_tensor(1)
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor(1, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
# TODO: this test should be updated
@onlyCPU
def test_as_tensor(self, device):
# from python data
x = [[0, 1], [2, 3]]
self.assertEqual(torch.tensor(x), torch.as_tensor(x))
self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
# python data with heterogeneous types
z = [0, 'torch']
with self.assertRaisesRegex(TypeError, "invalid data type"):
torch.tensor(z)
torch.as_tensor(z)
# python data with self-referential lists
z = [0]
z += [z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
z = [[1, 2], z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
# from tensor (doesn't copy unless type is different)
y = torch.tensor(x)
self.assertIs(y, torch.as_tensor(y))
self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
if torch.cuda.is_available():
self.assertIsNot(y, torch.as_tensor(y, device='cuda'))
y_cuda = y.to('cuda')
self.assertIs(y_cuda, torch.as_tensor(y_cuda))
self.assertIs(y_cuda, torch.as_tensor(y_cuda, device='cuda'))
# doesn't copy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
n = np.random.rand(5, 6).astype(dtype)
n_astensor = torch.as_tensor(n)
self.assertEqual(torch.tensor(n), n_astensor)
n_astensor[0][0] = 25.7
self.assertEqual(torch.tensor(n), n_astensor)
# changing dtype causes copy
n = np.random.rand(5, 6).astype(np.float32)
n_astensor = torch.as_tensor(n, dtype=torch.float64)
self.assertEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
n_astensor[0][1] = 250.8
self.assertNotEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
# changing device causes copy
if torch.cuda.is_available():
n = np.random.randn(5, 6)
n_astensor = torch.as_tensor(n, device='cuda')
self.assertEqual(torch.tensor(n, device='cuda'), n_astensor)
n_astensor[0][2] = 250.9
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@suppress_warnings
@dtypesIfCPU(torch.float, torch.bfloat16, torch.float16)
@dtypes(torch.float)
def test_range(self, device, dtype):
res1 = torch.range(0, 1, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.range(0, 1, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
torch.range(0, 3, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1, 0), device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.range(1, 0, -1, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.range(1, 1, -1, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.range(1, 1, 1, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
self.assertEqual(len(w), 1)
# TODO: this test should be updated
def test_arange(self, device):
res = torch.tensor(range(10000), device=device)
res1 = torch.arange(0, 10000, device=device) # Use a larger number so vectorized code can be triggered
res2 = torch.tensor([], dtype=torch.int64, device=device)
torch.arange(0, 10000, out=res2)
self.assertEqual(res, res1, atol=0, rtol=0)
self.assertEqual(res, res2, atol=0, rtol=0)
# Vectorization on non-contiguous tensors
res = torch.rand(3, 3, 300000, device=device).to(torch.int64)
res = res.permute(2, 0, 1)
torch.arange(0, 300000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.arange(0, 300000 * 3 * 3, device=device))
# Check arange with only one argument
res1 = torch.arange(10, device=device)
res2 = torch.arange(0, 10, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check arange for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.arange(0, 4, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0., 0., 1.), (0., 2., 3.)), device=device)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1., 0.), device=device)
res2 = torch.tensor([], device=device)
torch.arange(1, -1, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor([], device=device)
torch.arange(1, 0, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.arange(1, 2, 1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# FloatTensor
out = torch.tensor([], dtype=torch.float, device=device)
res1 = torch.arange(0.6, 0.89, 0.1, out=out)
self.assertEqual(res1, [0.6, 0.7, 0.8])
out = torch.tensor([], dtype=torch.float, device=device)
res1 = torch.arange(1, 10, 0.3, out=out)
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# DoubleTensor
out = torch.tensor([], dtype=torch.double, device=device)
res1 = torch.arange(0.6, 0.89, 0.1, out=out)
self.assertEqual(res1, [0.6, 0.7, 0.8])
out = torch.tensor([], dtype=torch.double, device=device)
res1 = torch.arange(1, 10, 0.3, out=out)
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# Bool Input matching numpy semantics
r = torch.arange(True, device=device)
self.assertEqual(r[0], 0)
r2 = torch.arange(False, device=device)
self.assertEqual(len(r2), 0)
self.assertEqual(r.dtype, torch.int64)
self.assertEqual(r2.dtype, torch.int64)
# Check that it's exclusive
r = torch.arange(0, 5, device=device)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 5)
r = torch.arange(0, 6, 3, device=device)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 3)
self.assertEqual(r.numel(), 2)
r = torch.arange(0, 5, 2, device=device)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 3)
r = torch.arange(0, -5, -2, device=device)
self.assertEqual(r.min(), -4)
self.assertEqual(r.max(), 0)
self.assertEqual(r.numel(), 3)
r1 = torch.arange(0, 5 + 1e-6, device=device)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(0, 5, dtype=torch.float32, device=device)
r3 = torch.arange(0, 5 - 1e-6, device=device)
self.assertEqual(r1[:-1], r2, atol=0, rtol=0)
self.assertEqual(r2, r3, atol=0, rtol=0)
r1 = torch.arange(10, -1 + 1e-6, -1, device=device)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(10, -1, -1, dtype=torch.float32, device=device)
r3 = torch.arange(10, -1 - 1e-6, -1, device=device)
self.assertEqual(r1, r2, atol=0, rtol=0)
self.assertEqual(r2, r3[:-1], atol=0, rtol=0)
w = 1449629115440469
r = torch.arange(0, 100 * w, w, device=device)
self.assertEqual(r.numel(), 100)
# Test Rounding Errors
line = torch.zeros(size=(1, 49), device=device)
self.assertWarnsRegex(UserWarning, 'The out tensor will be resized',
lambda: torch.arange(-1, 1, 2. / 49, dtype=torch.float32, out=line))
self.assertEqual(line.shape, [50])
x = torch.empty(1).expand(10)
self.assertRaises(RuntimeError, lambda: torch.arange(10, out=x))
msg = "unsupported range"
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('-inf'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), device=device))
self.assertRaisesRegex(
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
# check that it holds a consistent output shape on precision-cornered step sizes
d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
self.assertEqual(d.shape[0], 800)
# TODO: this test should be updated
@onlyCPU
def test_arange_inference(self, device):
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)
# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
# cannot call storage() on meta tensor
@skipMeta
def test_empty_strided(self, device):
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
empty_strided = torch.empty_strided(shape, strides, device=device)
# as_strided checks the storage size is big enough to support such a strided tensor;
# instead of repeating this calculation, we just use empty_strided which does the same
# calculation when setting the storage size.
as_strided = torch.empty(empty_strided.storage().size(),
device=device).as_strided(shape, strides)
self.assertEqual(empty_strided.shape, as_strided.shape)
self.assertEqual(empty_strided.stride(), as_strided.stride())
def test_new_empty_strided(self, device):
def _test(sizes, strides, dtype):
x = torch.zeros(5, 5, dtype=dtype, device=device)
result = x.new_empty_strided(sizes, strides)
expected = torch.empty_strided(sizes, strides, dtype=x.dtype, device=x.device)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.stride(), expected.stride())
self.assertEqual(result.dtype, expected.dtype)
self.assertEqual(result.device, expected.device)
_test([2, 3], [3, 1], torch.float)
_test([5, 3], [0, 1], torch.int)
_test([], [], torch.float)
# Some really weird cases
for shape in [(2, 3, 4), (0, 2, 0)]:
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
_test(shape, strides, torch.float)
# Make sure sizes and strides have the same length
# https://github.com/pytorch/pytorch/issues/82416
with self.assertRaisesRegex(
RuntimeError,
r"dimensionality of sizes \(1\) must match dimensionality of strides \(0\)"):
dtype = torch.float64
x = torch.tensor(-4.8270, dtype=dtype, device=device)
size = (2,)
stride = ()
x.new_empty_strided(size, stride, dtype=dtype, device=device)
def test_strided_mismatched_stride_shape(self, device):
for shape, strides in [((1, ), ()), ((1, 2), (1, ))]:
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided(shape, strides)
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided_(shape, strides)
def test_empty_tensor_props(self, device):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
for size in sizes:
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
size_ones_instead_of_zeros = (x if x != 0 else 1 for x in size)
y = torch.empty(tuple(size_ones_instead_of_zeros), device=device)
self.assertEqual(x.stride(), y.stride())
@onlyNativeDeviceTypes
def test_empty_overflow(self, device):
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
torch.empty([2, 4, 2**29, 2**29], dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
torch.empty([8, 8, 2**29, 2**29], dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Storage size calculation overflowed'):
torch.empty_strided([8, 8], [2**61, 1], dtype=torch.float64)
with self.assertRaisesRegex(RuntimeError, 'Stride calculation overflowed'):
torch.empty([0, 4, 2305843009213693952], dtype=torch.float32)
def test_eye(self, device):
for dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if dtype == torch.bfloat16:
continue
# Test the RuntimeError is raised when either m or n is a negative number
for n, m in ((-1, 1), (1, -1), (-1, -1)):
with self.assertRaisesRegex(RuntimeError, 'must be greater or equal to'):
torch.eye(n, m, device=device, dtype=dtype)
# Test when the `m` parameter is not provided
for n in (3, 5, 7):
res1 = torch.eye(n, device=device, dtype=dtype)
naive_eye = torch.zeros(n, n, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, out=res2)
self.assertEqual(res1, res2)
for n, m in product([3, 5, 7], repeat=2):
# Construct identity using diagonal and fill
res1 = torch.eye(n, m, device=device, dtype=dtype)
naive_eye = torch.zeros(n, m, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, m, out=res2)
self.assertEqual(res1, res2)
@precisionOverride({torch.float: 1e-8, torch.double: 1e-10})
@dtypes(*floating_and_complex_types())
def test_linspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0)
end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0)
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
@dtypes(*integral_types())
def test_linspace_vs_numpy_integral(self, device, dtype):
start = 1
end = 127
for steps in [25, 50]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
def _test_linspace_logspace_complex_helper(self, torch_fn, np_fn, device, dtype):
start = torch.randn(1, dtype=dtype).item()
end = (start + torch.randn(1, dtype=dtype) + random.randint(5, 15)).item()
def test_fn(torch_fn, numpy_fn, steps):
t = torch_fn(start, end, steps, device=device)
a = numpy_fn(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
test_fn(torch.linspace, np.linspace, steps)
@dtypes(torch.complex64)
def test_linspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.linspace, np.linspace,
device, dtype)
@dtypes(torch.complex64)
def test_logspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.logspace, np.logspace,
device, dtype)
@precisionOverride({torch.float: 1e-6, torch.double: 1e-10})
@dtypes(*floating_types())
def test_logspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375
end = .0315315723419189453125
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.logspace(start, end, steps, device=device, dtype=dtype)
a = np.logspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertEqual(t[0], a[0])
self.assertEqual(t[steps - 1], a[steps - 1])
@onlyCUDA
@largeTensorTest('16GB')
def test_range_factories_64bit_indexing(self, device):
bigint = 2 ** 31 + 1
t = torch.arange(bigint, dtype=torch.long, device=device)
self.assertEqual(t[-1].item(), bigint - 1)
del t
t = torch.linspace(0, 1, bigint, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 1)
del t
t = torch.logspace(0, 1, bigint, 2, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 2)
del t
@expectedFailureMeta # RuntimeError: The tensor has a non-zero number of elements
@onlyNativeDeviceTypes
def test_tensor_ctor_device_inference(self, device):
torch_device = torch.device(device)
values = torch.tensor((1, 2, 3), device=device)
# Tests tensor and as_tensor
# Note: warnings are suppressed (suppresses warnings)
for op in (torch.tensor, torch.as_tensor):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(op(values).device, torch_device)
self.assertEqual(op(values, dtype=torch.float64).device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
self.assertEqual(op(values.cpu()).device, torch.device('cpu'))
# Tests sparse ctor
indices = torch.tensor([[0, 1, 1],
[2, 0, 1],
[2, 1, 0]], device=device)
sparse_size = (3, 3, 3)
sparse_default = torch.sparse_coo_tensor(indices, values, sparse_size)
self.assertEqual(sparse_default.device, torch_device)
sparse_with_dtype = torch.sparse_coo_tensor(indices, values, sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
sparse_with_dtype = torch.sparse_coo_tensor(indices.cpu(), values.cpu(),
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))
@onlyCUDA
@onlyNativeDeviceTypes
def test_new_tensor_device(self, device):
torch_device = torch.device(device)
cpu_device = torch.device('cpu')
tensor = torch.tensor((1, 2, 3), device=device)
# need more than one device_type to test this
assert self.device_type == 'cuda'
for left, right in product([tensor, tensor.cpu()], [tensor, tensor.cpu()]):
for device_arg in [torch_device, cpu_device, None]:
if device_arg is None:
self.assertEqual(left.new_tensor(right).device, left.device)
else:
self.assertEqual(left.new_tensor(right, device=device_arg).device, device_arg)
def _test_signal_window_functions(self, name, dtype, device, **kwargs):
import scipy.signal as signal
torch_method = getattr(torch, name + '_window')
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(
size,
periodic=periodic,
layout=torch.strided,
requires_grad=False,
**kwargs,
device=device,
dtype=dtype,
)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(
signal.get_window(
(name, *(kwargs.values())), size, fftbins=periodic
)
)
self.assertEqual(res, ref.to(dtype))
with self.assertRaisesRegex(RuntimeError, r'not implemented for sparse types'):
torch_method(3, layout=torch.sparse_coo)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)
@onlyNativeDeviceTypes
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long)
@parametrize("window", ['hann', 'hamming', 'bartlett', 'blackman'])
def test_signal_window_functions(self, device, dtype, window):
self._test_signal_window_functions(window, dtype, device)
@onlyNativeDeviceTypes
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long, torch.bfloat16, torch.float16)
def test_kaiser_window(self, device, dtype):
for _ in range(50):
self._test_signal_window_functions('kaiser', dtype, device, beta=random.random() * 30)
def _test_signal_windows_functions(self, name, dtype, device, **kwargs):
import scipy.signal as signal
torch_method = getattr(torch.signal.windows, name)
if not dtype.is_floating_point:
with self.assertRaisesRegex(RuntimeError, r'floating point'):
torch_method(3, dtype=dtype)
return
for size in [0, 1, 2, 5, 10, 50, 100, 1024, 2048]:
for periodic in [True, False]:
res = torch_method(size, sym=not periodic, **kwargs, device=device, dtype=dtype)
# NB: scipy always returns a float64 result
ref = torch.from_numpy(signal.get_window((name, *(kwargs.values())), size, fftbins=periodic))
self.assertEqual(res, ref, exact_dtype=False)
self.assertTrue(torch_method(3, requires_grad=True).requires_grad)
self.assertFalse(torch_method(3).requires_grad)
# torch.signal.windows functions (except any with extra parameters)
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypes(torch.float, torch.double)
@parametrize("window", ['bartlett', 'blackman', 'cosine', 'hamming', 'hann', 'nuttall'])
def test_signal_windows_functions(self, device, dtype, window):
self._test_signal_windows_functions(window, dtype, device)
# torch.signal.windows.kaiser
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypes(torch.float, torch.double)
def test_kaiser(self, device, dtype):
for _ in range(50):
self._test_signal_windows_functions('kaiser', dtype, device, beta=random.random() * 30)
def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
for shape in shapes:
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16, torch.chalf):
self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
if dt == torch.bool:
self.assertEqual(shape, torch.randint(2, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 2).shape)
elif dt.is_complex:
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
else:
self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
if dt not in {torch.double, torch.float, torch.half, torch.bfloat16,
torch.complex32, torch.complex64, torch.complex128}:
self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
if dt == torch.double or dt == torch.float or dt.is_complex:
self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, 0, device=device).shape)
self.assertEqual((5, 0), torch.eye(5, 0, device=device).shape)
self.assertEqual((0, 5), torch.eye(0, 5, device=device).shape)
self.assertEqual((0,), torch.linspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.logspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.randperm(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, periodic=False, device=device).shape)
self.assertEqual((0,), torch.hamming_window(0, device=device).shape)
self.assertEqual((0,), torch.hann_window(0, device=device).shape)
self.assertEqual((0,), torch.kaiser_window(0, device=device).shape)
self.assertEqual((1, 1, 0), torch.tensor([[[]]], device=device).shape)
self.assertEqual((1, 1, 0), torch.as_tensor([[[]]], device=device).shape)
@onlyCUDA
def test_tensor_factory_gpu_type_inference(self, device):
with set_default_tensor_type(torch.cuda.DoubleTensor):
with set_default_dtype(torch.float32):
self.assertIs(torch.float32, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
with set_default_dtype(torch.float64):
self.assertIs(torch.float64, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
@onlyCUDA
def test_tensor_factory_gpu_type(self, device):
with set_default_tensor_type(torch.cuda.FloatTensor):
x = torch.zeros((5, 5))
self.assertIs(torch.float32, x.dtype)
self.assertTrue(x.is_cuda)
with set_default_tensor_type(torch.cuda.DoubleTensor):
x = torch.zeros((5, 5))
self.assertIs(torch.float64, x.dtype)
self.assertTrue(x.is_cuda)
@skipCPUIf(True, 'compares device with cpu')
@dtypes(torch.int, torch.long, torch.float, torch.double)
def test_arange_device_vs_cpu(self, device, dtype):
cpu_tensor = torch.arange(0, 10, dtype=dtype, device='cpu')
device_tensor = torch.arange(0, 10, dtype=dtype, device=device)
self.assertEqual(cpu_tensor, device_tensor)
@dtypes(torch.bfloat16, torch.float16)
def test_arange_lowp(self, device, dtype):
ref_tensor = torch.tensor([0, 1, 2, 3], dtype=dtype, device=device)
f16_tensor = torch.arange(0, 4, dtype=dtype, device=device)
self.assertEqual(ref_tensor, f16_tensor)
# step=2
ref_tensor = torch.tensor([0, 2, 4], dtype=dtype, device=device)
f16_tensor = torch.arange(0, 6, step=2, dtype=dtype, device=device)
self.assertEqual(ref_tensor, f16_tensor)
@dtypes(*all_types_and_complex_and(torch.bfloat16))
@dtypesIfCUDA(*all_types_and_complex_and(torch.bfloat16))
def test_linspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.linspace(_from, to, 137, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# small tensor
self.assertEqual(torch.linspace(10, 20, 11, device=device, dtype=dtype),
torch.tensor(list(range(10, 21)), device=device, dtype=dtype))
# large tensor
if dtype not in (torch.int8, torch.uint8):
self.assertEqual(torch.linspace(10, 2000, 1991, device=device, dtype=dtype),
torch.tensor(list(range(10, 2001)), device=device, dtype=dtype))
# Vectorization on non-contiguous tensors
if dtype not in (torch.int8, torch.uint8): # int8 and uint8 are too small for this test
res = torch.rand(3, 3, 1000, device=device).to(dtype)
res = res.permute(2, 0, 1)
torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype))
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device, dtype=dtype))
# steps = 1
self.assertEqual(torch.linspace(0, 1, 1, device=device, dtype=dtype),
torch.zeros(1, device=device, dtype=dtype), atol=0, rtol=0)
# steps = 0
self.assertEqual(torch.linspace(0, 1, 0, device=device, dtype=dtype).numel(), 0, atol=0, rtol=0)
# steps not provided
self.assertRaises(TypeError, lambda: torch.linspace(0, 1, device=device, dtype=dtype))
if dtype == torch.float:
# passed dtype can't be safely casted to inferred dtype
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0, 1j, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0j, 1, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.linspace\(\): inferred dtype"):
torch.linspace(0j, 1j, 5, device=device, dtype=dtype)
# Check linspace for generating the correct output for each dtype.
start = 0 if dtype == torch.uint8 else -100
expected_lin = torch.tensor([start + .5 * i for i in range(401)], device=device, dtype=torch.double)
actual_lin = torch.linspace(start, start + 200, 401, device=device, dtype=dtype)
# If on GPU, allow for minor error depending on dtype.
tol = 0.
if device != 'cpu':
if dtype == torch.half:
tol = 1e-1
elif dtype == torch.float:
tol = 1e-5
elif dtype == torch.double:
tol = 1e-10
self.assertEqual(expected_lin.to(dtype), actual_lin, atol=tol, rtol=0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3, device=device, dtype=dtype),
torch.tensor((2, 1, 0), device=device, dtype=dtype),
atol=0, rtol=0)
# Check for race condition (correctness when applied on a large tensor).
if dtype not in (torch.int8, torch.uint8, torch.int16, torch.half, torch.bfloat16):
y = torch.linspace(0, 999999 + (999999j if dtype.is_complex else 0),
1000000, device=device, dtype=dtype)
if dtype.is_complex:
cond = torch.logical_and(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)
else:
cond = y[:-1] < y[1:]
correct = all(cond)
self.assertTrue(correct)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2), dtype=dtype)
self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype), atol=0, rtol=0)
def _test_linspace_logspace_deduction_helper(self, fn, device):
for start, end in [(1, 2), (1., 2), (1., -2.), (1j, 2j), (0., 2j), (1j, 2)]:
dtype = torch.float32
if isinstance(start, complex) or isinstance(end, complex):
dtype = torch.cfloat
self.assertEqual(fn(start, end, steps=100, device=device).dtype, dtype)
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.linspace, device)
def test_logspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.logspace, device)
# The implementation of linspace+logspace goes through a different path
# when the steps arg is equal to 0 or 1. For other values of `steps`
# they call specialized linspace (or logspace) kernels.
LINSPACE_LOGSPACE_SPECIAL_STEPS = [0, 1]
# NOTE [Linspace+Logspace precision override]
# Our Linspace and logspace torch.half CUDA kernels are not very precise.
# Since linspace/logspace are deterministic, we can compute an expected
# amount of error (by testing without a precision override), adding a tiny
# amount (EPS) to that, and using that value as the override.
LINSPACE_LOGSPACE_EXTRA_EPS = 1e-5
# Compares linspace device vs. cpu
def _test_linspace(self, device, dtype, steps):
a = torch.linspace(0, 10, steps=steps, dtype=dtype, device=device)
b = torch.linspace(0, 10, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_linspace_device_vs_cpu(self, device, dtype):
self._test_linspace(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16))
def test_linspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_linspace(device, dtype, steps=steps)
# Compares logspace device vs cpu
def _test_logspace(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# Compares logspace device vs cpu
def _test_logspace_base2(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps, base=2)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_device_vs_cpu(self, device, dtype):
self._test_logspace(device, dtype, steps=10)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_base2(self, device, dtype):
self._test_logspace_base2(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_logspace(device, dtype, steps=steps)
self._test_logspace_base2(device, dtype, steps=steps)
@dtypes(*all_types_and(torch.bfloat16))
@dtypesIfCUDA(*integral_types_and(torch.half, torch.bfloat16, torch.float32, torch.float64) if TEST_WITH_ROCM else
all_types_and(torch.half, torch.bfloat16))
def test_logspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.logspace(_from, to, 137, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1, device=device, dtype=dtype))
# steps not provided
self.assertRaises(TypeError, lambda: torch.logspace(0, 1, device=device, dtype=dtype))
self.assertEqual(torch.logspace(0, 1, 1, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype), atol=0, rtol=0)
if dtype == torch.float:
# passed dtype can't be safely casted to inferred dtype
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0, 1j, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0j, 1, 5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r"torch.logspace\(\): inferred dtype"):
torch.logspace(0j, 1j, 5, device=device, dtype=dtype)
# Check precision - start, stop and base are chosen to avoid overflow
# steps is chosen so that step size is not subject to rounding error
# a tolerance is needed for gpu tests due to differences in computation
atol = None
rtol = None
if self.device_type == 'cpu':
atol = 0
rtol = 0
self.assertEqual(torch.tensor([2. ** (i / 8.) for i in range(49)], device=device, dtype=dtype),
torch.logspace(0, 6, steps=49, base=2, device=device, dtype=dtype),
atol=atol, rtol=rtol)
# Check non-default base=2
self.assertEqual(torch.logspace(1, 1, 1, 2, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype) * 2)
self.assertEqual(torch.logspace(0, 2, 3, 2, device=device, dtype=dtype),
torch.tensor((1, 2, 4), device=device, dtype=dtype))
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2, device=device, dtype=dtype),
torch.tensor((10, 1), device=device, dtype=dtype), atol=0, rtol=0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.logspace(0, 3, 4, base=2, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.tensor(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype), atol=0, rtol=0)
@onlyNativeDeviceTypes
@dtypes(torch.half, torch.float, torch.double)
def test_full_inference(self, device, dtype):
size = (2, 2)
with set_default_dtype(dtype):
# Tests bool fill value inference
t = torch.full(size, True)
self.assertEqual(t.dtype, torch.bool)
# Tests integer fill value inference
t = torch.full(size, 1)
self.assertEqual(t.dtype, torch.long)
# Tests float fill value inference
t = torch.full(size, 1.)
self.assertEqual(t.dtype, dtype)
# Tests complex inference
t = torch.full(size, (1 + 1j))
ctype = torch.complex128 if dtype is torch.double else torch.complex64
self.assertEqual(t.dtype, ctype)
def test_full_out(self, device):
size = (5,)
o = torch.empty(size, device=device, dtype=torch.long)
# verifies dtype/out conflict throws a RuntimeError
with self.assertRaises(RuntimeError):
torch.full(o.shape, 1., dtype=torch.float, out=o)
# verifies out dtype overrides inference
self.assertEqual(torch.full(o.shape, 1., out=o).dtype, o.dtype)
self.assertEqual(torch.full(size, 1, out=o).dtype, o.dtype)
# check that warning for numpy being not writable is suppressed
# when a copy of it is being created.
# see issue #47160
def test_tensor_from_non_writable_numpy(self, device):
with warnings.catch_warnings(record=True) as w:
a = np.arange(5.)
a.flags.writeable = False
t = torch.tensor(a)
self.assertEqual(len(w), 0)
@onlyCPU
@parametrize('shared', [True, False])
@unittest.skipIf(IS_WINDOWS, "NamedTemporaryFile on windows")
def test_from_file(self, device, shared):
dtype = torch.float64
t = torch.randn(2, 5, dtype=dtype, device=device)
with tempfile.NamedTemporaryFile() as f:
expected_filename = f.name if shared else None
t.numpy().tofile(f)
t_mapped = torch.from_file(f.name, shared=shared, size=t.numel(), dtype=dtype)
self.assertTrue(t_mapped.untyped_storage().filename == expected_filename)
self.assertEqual(torch.flatten(t), t_mapped)
s = torch.UntypedStorage.from_file(f.name, shared, nbytes=t.numel() * dtype.itemsize)
self.assertTrue(s.filename == expected_filename)
@onlyCPU
def test_storage_filename(self, device):
t = torch.randn(2, 5, device=device)
self.assertIsNone(t.untyped_storage().filename)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_refs_tensor(self, device, dtype):
self.assertEqual(torch._refs.tensor([], device=device, dtype=dtype), torch.tensor([], device=device, dtype=dtype))
# Class for testing random tensor creation ops, like torch.randint
| TestTensorCreation |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/embedding_ops.py | {
"start": 294,
"end": 3038
} | class ____(torch.nn.Module):
_version = 1
def __init__(self, num_embeddings, embedding_dim, dtype=torch.quint8):
super().__init__()
self.dtype = dtype
if self.dtype in [torch.quint8, torch.quint4x2]:
scales = torch.ones(num_embeddings, dtype=torch.float)
zero_points = torch.zeros(num_embeddings, dtype=torch.float)
wq = torch._empty_per_channel_affine_quantized(
[num_embeddings, embedding_dim],
scales=scales,
zero_points=zero_points,
axis=0,
dtype=self.dtype,
)
self.set_weight(wq)
else:
raise NotImplementedError(
f"Unsupported dtype on quantized embedding! Supports quint8 and quint4x2. Got dtype: {dtype}"
)
@torch.jit.export
def set_weight(self, weight: torch.Tensor) -> None:
if self.dtype in [torch.quint8, torch.quint4x2]:
self._packed_weight = torch.ops.quantized.embedding_bag_prepack(weight)
else:
raise NotImplementedError(
"Unsupported dtype for quantized embedding prepack! Supports quint8 and quint4x2."
)
@torch.jit.export
def _weight(self):
if self.dtype in [torch.quint8, torch.quint4x2]:
return torch.ops.quantized.embedding_bag_unpack(self._packed_weight)
else:
raise NotImplementedError(
"Unsupported dtype for quantized embedding unpack! Supports quint8 and quint4x2."
)
def forward(self, x):
return x
# Version 1
# self
# |--- _packed_weight : Tensor representing weight of EmbeddingPackedParamsBase
# |--- dtype : torch.dtype
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "dtype"] = self.dtype
destination[prefix + "_packed_weight"] = self._weight()
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
self.dtype = state_dict[prefix + "dtype"]
state_dict.pop(prefix + "dtype")
weight = state_dict[prefix + "_packed_weight"]
state_dict.pop(prefix + "_packed_weight")
self.set_weight(weight)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
def __repr__(self):
return self._weight().__repr__()
| EmbeddingPackedParams |
python | hynek__structlog | tests/processors/test_processors.py | {
"start": 6966,
"end": 7656
} | class ____:
@pytest.mark.parametrize("true_value", [True, 1, 1.1])
def test_obtains_exc_info_on_True(self, true_value):
"""
If the passed argument evaluates to True obtain exc_info ourselves.
"""
try:
0 / 0
except Exception:
assert sys.exc_info() == _figure_out_exc_info(true_value)
else:
pytest.fail("Exception not raised.")
def test_py3_exception_no_traceback(self):
"""
Exceptions without tracebacks are simply returned with None for
traceback.
"""
e = ValueError()
assert (e.__class__, e, None) == _figure_out_exc_info(e)
| TestFigureOutExcInfo |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_data_condition_index.py | {
"start": 3163,
"end": 5285
} | class ____(OrganizationDataConditionAPITestCase):
def test_group_filter(self) -> None:
response = self.get_success_response(
self.organization.slug,
group=DataConditionHandler.Group.WORKFLOW_TRIGGER,
status_code=200,
)
assert len(response.data) == 1
assert response.data[0] == {
"type": Condition.REAPPEARED_EVENT.value,
"handlerGroup": DataConditionHandler.Group.WORKFLOW_TRIGGER.value,
"comparisonJsonSchema": {"type": "boolean"},
}
response = self.get_success_response(
self.organization.slug, group=DataConditionHandler.Group.ACTION_FILTER, status_code=200
)
assert len(response.data) == 2
assert response.data[0] == {
"type": Condition.AGE_COMPARISON.value,
"handlerGroup": DataConditionHandler.Group.ACTION_FILTER.value,
"handlerSubgroup": DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES.value,
"comparisonJsonSchema": {
"type": "object",
"properties": {
"value": {"type": "integer", "minimum": 0},
},
"required": ["value"],
"additionalProperties": False,
},
}
assert response.data[1] == {
"type": Condition.ISSUE_CATEGORY.value,
"handlerGroup": DataConditionHandler.Group.ACTION_FILTER.value,
"handlerSubgroup": DataConditionHandler.Subgroup.ISSUE_ATTRIBUTES.value,
"comparisonJsonSchema": {
"type": "object",
"properties": {
"value": {"type": "integer", "minimum": 0},
},
"required": ["value"],
"additionalProperties": False,
},
}
def test_invalid_group(self) -> None:
self.get_error_response(self.organization.slug, group="invalid", status_code=400)
def test_no_group(self) -> None:
self.get_error_response(self.organization.slug, status_code=400)
| OrganizationDataConditionIndexBaseTest |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-vertex/llama_index/embeddings/vertex/base.py | {
"start": 3834,
"end": 8876
} | class ____(BaseEmbedding):
embed_mode: VertexEmbeddingMode = Field(
default=VertexEmbeddingMode.RETRIEVAL_MODE,
description="The embedding mode to use.",
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Vertex."
)
client_email: Optional[str] = Field(
default=None, description="The client email for the VertexAI credentials."
)
token_uri: Optional[str] = Field(
default=None, description="The token URI for the VertexAI credentials."
)
private_key_id: Optional[str] = Field(
default=None, description="The private key ID for the VertexAI credentials."
)
private_key: Optional[str] = Field(
default=None, description="The private key for the VertexAI credentials."
)
_model: TextEmbeddingModel = PrivateAttr()
def __init__(
self,
model_name: str = "textembedding-gecko@003",
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
embed_mode: VertexEmbeddingMode = VertexEmbeddingMode.RETRIEVAL_MODE,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
num_workers: Optional[int] = None,
client_email: Optional[str] = None,
token_uri: Optional[str] = None,
private_key_id: Optional[str] = None,
private_key: Optional[str] = None,
) -> None:
if credentials is None:
if client_email and token_uri and private_key_id and private_key:
info = {
"client_email": client_email,
"token_uri": token_uri,
"private_key_id": private_key_id,
"private_key": private_key.replace("\\n", "\n"),
}
credentials = service_account.Credentials.from_service_account_info(
info
)
else:
raise ValueError(
"Either provide credentials or all of client_email, token_uri, private_key_id, and private_key."
)
init_vertexai(project=project, location=location, credentials=credentials)
callback_manager = callback_manager or CallbackManager([])
additional_kwargs = additional_kwargs or {}
super().__init__(
embed_mode=embed_mode,
project=project,
location=location,
credentials=credentials,
additional_kwargs=additional_kwargs,
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
num_workers=num_workers,
client_email=client_email,
token_uri=token_uri,
private_key_id=private_key_id,
private_key=private_key,
)
self._model = TextEmbeddingModel.from_pretrained(model_name)
@classmethod
def class_name(cls) -> str:
return "VertexTextEmbedding"
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
texts = _get_embedding_request(
texts=texts,
embed_mode=self.embed_mode,
is_query=False,
model_name=self.model_name,
)
embeddings = self._model.get_embeddings(texts, **self.additional_kwargs)
return [embedding.values for embedding in embeddings]
def _get_text_embedding(self, text: str) -> Embedding:
return self._get_text_embeddings([text])[0]
async def _aget_text_embedding(self, text: str) -> Embedding:
return (await self._aget_text_embeddings([text]))[0]
async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]:
texts = _get_embedding_request(
texts=texts,
embed_mode=self.embed_mode,
is_query=False,
model_name=self.model_name,
)
embeddings = await self._model.get_embeddings_async(
texts, **self.additional_kwargs
)
return [embedding.values for embedding in embeddings]
def _get_query_embedding(self, query: str) -> Embedding:
texts = _get_embedding_request(
texts=[query],
embed_mode=self.embed_mode,
is_query=True,
model_name=self.model_name,
)
embeddings = self._model.get_embeddings(texts, **self.additional_kwargs)
return embeddings[0].values
async def _aget_query_embedding(self, query: str) -> Embedding:
texts = _get_embedding_request(
texts=[query],
embed_mode=self.embed_mode,
is_query=True,
model_name=self.model_name,
)
embeddings = await self._model.get_embeddings_async(
texts, **self.additional_kwargs
)
return embeddings[0].values
| VertexTextEmbedding |
python | ApeWorX__ape | src/ape_ethereum/multicall/exceptions.py | {
"start": 415,
"end": 557
} | class ____(MulticallException):
def __init__(self):
super().__init__("Multicall not supported on this chain.")
| UnsupportedChainError |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 27902,
"end": 28673
} | class ____(DelegatingLexer):
"""
Subclass of the `CheetahLexer` that highlights unlexed data
with the `JavascriptLexer`.
"""
name = 'JavaScript+Cheetah'
aliases = ['js+cheetah', 'javascript+cheetah',
'js+spitfire', 'javascript+spitfire']
mimetypes = ['application/x-javascript+cheetah',
'text/x-javascript+cheetah',
'text/javascript+cheetah',
'application/x-javascript+spitfire',
'text/x-javascript+spitfire',
'text/javascript+spitfire']
def __init__(self, **options):
super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
CheetahLexer, **options)
| CheetahJavascriptLexer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N802.py | {
"start": 515,
"end": 635
} | class ____(ast.NodeVisitor):
def visit_Constant(self, node):
pass
def bad_Name(self):
pass
| Visitor |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/natbot/crawler.py | {
"start": 429,
"end": 770
} | class ____(TypedDict):
"""A typed dictionary containing information about elements in the viewport."""
node_index: str
backend_node_id: int
node_name: str | None
node_value: str | None
node_meta: list[str]
is_clickable: bool
origin_x: int
origin_y: int
center_x: int
center_y: int
| ElementInViewPort |
python | jazzband__django-pipeline | tests/tests/test_compiler.py | {
"start": 868,
"end": 1274
} | class ____(SubProcessCompiler):
output_extension = "junk"
def match_file(self, path):
return path.endswith(".coffee")
def compile_file(self, infile, outfile, outdated=False, force=False):
command = (
("this-exists-nowhere-as-a-command-and-should-fail",),
infile,
outfile,
)
return self.execute_command(command)
| InvalidCompiler |
python | pyodide__pyodide | src/py/_pyodide/_base.py | {
"start": 2603,
"end": 5485
} | class ____(Exception):
"""We will throw this to return a result from our code.
This allows us to distinguish between "code used top level await" and "code
returned a generator or coroutine".
"""
def __init__(self, v: Any) -> None:
super().__init__(v)
self.value = v
# We need EvalCodeResultException available inside the running code. I suppose
# we could import it, wrap all of the code in a try/finally block, and delete it
# again in the finally block but I think this is the best way.
#
# Put it into a list to avoid breaking CPython test test_inheritance
# (test.test_baseexception.ExceptionClassTests) which examines all Exceptions in
# builtins.
builtins.___EvalCodeResultException = [EvalCodeResultException] # type: ignore[attr-defined]
# We will substitute in the value of x we are trying to return.
_raise_template_ast = ast.parse("raise ___EvalCodeResultException[0](x)").body[0]
def _last_expr_to_raise(mod: ast.Module) -> None:
"""If the final ast node is a statement, raise an EvalCodeResultException
with the value of the statement.
"""
if not mod.body:
return
last_node = mod.body[-1]
if not isinstance(mod.body[-1], ast.Expr | ast.Await):
return
raise_expr = deepcopy(_raise_template_ast)
# Replace x with our value in _raise_template_ast.
raise_expr.exc.args[0] = last_node.value # type: ignore[attr-defined]
mod.body[-1] = raise_expr
def _parse_and_compile_gen(
source: str,
*,
return_mode: str = "last_expr",
quiet_trailing_semicolon: bool = True,
mode: str = "exec",
filename: str = "<exec>",
flags: int = 0x0,
dont_inherit: bool = False,
optimize: int = -1,
) -> Generator[ast.Module, ast.Module, CodeType]:
"""Parse ``source``, then yield the AST, then compile the AST and return the
code object.
By yielding the ast, we give callers the opportunity to do further ast
manipulations. Because generators are annoying to call, this is wrapped in
the Executor class.
"""
# handle mis-indented input from multi-line strings
source = dedent(source)
mod = compile(source, filename, mode, flags | ast.PyCF_ONLY_AST)
# Pause here, allow caller to transform ast if they like.
mod = yield mod
if quiet_trailing_semicolon and should_quiet(source):
return_mode = "none"
if return_mode == "last_expr_or_assign":
# add extra expression with just the L-value so that we can handle it
# with the last_expr code.
_last_assign_to_expr(mod)
if return_mode.startswith("last_expr"): # last_expr or last_expr_or_assign
_last_expr_to_raise(mod)
ast.fix_missing_locations(mod)
return compile(mod, filename, mode, flags, dont_inherit, optimize)
ReturnMode = Literal["last_expr", "last_expr_or_assign", "none"]
| EvalCodeResultException |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 40981,
"end": 41790
} | class ____(nn.Module):
def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]):
super().__init__()
self.config = config
hidden_size = config.hidden_size
projection_dim = config.projection_dim
self.linear1 = nn.Linear(hidden_size, projection_dim)
self.activation = ACT2FN[config.projection_hidden_act]
self.linear2 = nn.Linear(projection_dim, projection_dim)
def forward(self, hidden_states):
hidden_states = self.linear1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.linear2(hidden_states)
return hidden_states
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->ClapText, persistent=False->persistent=True
| ClapProjectionLayer |
python | GoogleCloudPlatform__python-docs-samples | compute/autoscaler/demo/frontend.py | {
"start": 3514,
"end": 3713
} | class ____(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
if __name__ == "__main__":
httpd = DemoHttpServer(("", 80), DemoRequestHandler)
httpd.serve_forever()
| DemoHttpServer |
python | doocs__leetcode | solution/2300-2399/2331.Evaluate Boolean Binary Tree/Solution.py | {
"start": 192,
"end": 457
} | class ____:
def evaluateTree(self, root: Optional[TreeNode]) -> bool:
if root.left is None:
return bool(root.val)
op = or_ if root.val == 2 else and_
return op(self.evaluateTree(root.left), self.evaluateTree(root.right))
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/matrix_inverse_op_test.py | {
"start": 6295,
"end": 8247
} | class ____(test.Benchmark):
shapes = [
(4, 4),
(10, 10),
(16, 16),
(101, 101),
(256, 256),
(1000, 1000),
(1024, 1024),
(2048, 2048),
(513, 4, 4),
(513, 16, 16),
(513, 256, 256),
]
def _GenerateMatrix(self, shape):
batch_shape = shape[:-2]
shape = shape[-2:]
assert shape[0] == shape[1]
n = shape[0]
matrix = np.ones(shape).astype(np.float32) / (
2.0 * n) + np.diag(np.ones(n).astype(np.float32))
return variables.Variable(np.tile(matrix, batch_shape + (1, 1)))
def benchmarkMatrixInverseOp(self):
for adjoint in False, True:
for shape in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_cpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/gpu:0"):
matrix = self._GenerateMatrix(shape)
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(inv),
min_iters=25,
name="matrix_inverse_gpu_{shape}_adjoint_{adjoint}".format(
shape=shape, adjoint=adjoint))
if __name__ == "__main__":
test.main()
| MatrixInverseBenchmark |
python | pyca__cryptography | src/cryptography/x509/name.py | {
"start": 7104,
"end": 8935
} | class ____:
def __init__(self, attributes: Iterable[NameAttribute[str | bytes]]):
attributes = list(attributes)
if not attributes:
raise ValueError("a relative distinguished name cannot be empty")
if not all(isinstance(x, NameAttribute) for x in attributes):
raise TypeError("attributes must be an iterable of NameAttribute")
# Keep list and frozenset to preserve attribute order where it matters
self._attributes = attributes
self._attribute_set = frozenset(attributes)
if len(self._attribute_set) != len(attributes):
raise ValueError("duplicate attributes are not allowed")
def get_attributes_for_oid(
self,
oid: ObjectIdentifier,
) -> list[NameAttribute[str | bytes]]:
return [i for i in self if i.oid == oid]
def rfc4514_string(
self, attr_name_overrides: _OidNameMap | None = None
) -> str:
"""
Format as RFC4514 Distinguished Name string.
Within each RDN, attributes are joined by '+', although that is rarely
used in certificates.
"""
return "+".join(
attr.rfc4514_string(attr_name_overrides)
for attr in self._attributes
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, RelativeDistinguishedName):
return NotImplemented
return self._attribute_set == other._attribute_set
def __hash__(self) -> int:
return hash(self._attribute_set)
def __iter__(self) -> Iterator[NameAttribute[str | bytes]]:
return iter(self._attributes)
def __len__(self) -> int:
return len(self._attributes)
def __repr__(self) -> str:
return f"<RelativeDistinguishedName({self.rfc4514_string()})>"
| RelativeDistinguishedName |
python | pappasam__jedi-language-server | jedi_language_server/text_edit_utils.py | {
"start": 1251,
"end": 4336
} | class ____:
"""Convert jedi Refactoring objects into renaming machines."""
def __init__(self, workspace: Workspace, refactoring: Refactoring) -> None:
self.workspace = workspace
self.refactoring = refactoring
def lsp_renames(self) -> Iterator[RenameFile]:
"""Get all File rename operations."""
for old_name, new_name in self.refactoring.get_renames():
yield RenameFile(
kind="rename",
old_uri=old_name.as_uri(),
new_uri=new_name.as_uri(),
options=RenameFileOptions(
ignore_if_exists=True, overwrite=True
),
)
def lsp_text_document_edits(self) -> Iterator[TextDocumentEdit]:
"""Get all text document edits."""
changed_files = self.refactoring.get_changed_files()
for path, changed_file in changed_files.items():
uri = path.as_uri()
document = self.workspace.get_text_document(uri)
notebook_mapper = notebook_utils.notebook_coordinate_mapper(
self.workspace, notebook_uri=uri
)
source = (
notebook_mapper.notebook_source
if notebook_mapper
else document.source
)
version = 0 if document.version is None else document.version
text_edits = lsp_text_edits(source, changed_file)
if text_edits:
text_document_edit = TextDocumentEdit(
text_document=OptionalVersionedTextDocumentIdentifier(
uri=uri,
version=version,
),
edits=text_edits,
)
if notebook_mapper is not None:
yield from notebook_mapper.cell_text_document_edits(
text_document_edit
)
else:
yield text_document_edit
_OPCODES_CHANGE = {"replace", "delete", "insert"}
def lsp_text_edits(
old_code: str, changed_file: ChangedFile
) -> List[Union[TextEdit, AnnotatedTextEdit]]:
"""Take a jedi `ChangedFile` and convert to list of text edits.
Handles inserts, replaces, and deletions within a text file.
Additionally, makes sure returned code is syntactically valid
Python.
"""
new_code = changed_file.get_new_code()
if not is_valid_python(new_code):
return []
position_lookup = PositionLookup(old_code)
text_edits: List[Union[TextEdit, AnnotatedTextEdit]] = []
for opcode in get_opcodes(old_code, new_code):
if opcode.op in _OPCODES_CHANGE:
start = position_lookup.get(opcode.old_start)
end = position_lookup.get(opcode.old_end)
new_text = new_code[opcode.new_start : opcode.new_end]
text_edits.append(
TextEdit(
range=Range(start=start, end=end),
new_text=new_text,
)
)
return text_edits
| RefactoringConverter |
python | coleifer__peewee | tests/expressions.py | {
"start": 440,
"end": 878
} | class ____(BaseNamesTest):
@skip_if(IS_SQLITE)
def test_regexp_iregexp(self):
people = [Person.create(name=name) for name in ('n1', 'n2', 'n3')]
self.assertNames(Person.name.regexp('n[1,3]'), ['n1', 'n3'])
self.assertNames(Person.name.regexp('N[1,3]'), [])
self.assertNames(Person.name.iregexp('n[1,3]'), ['n1', 'n3'])
self.assertNames(Person.name.iregexp('N[1,3]'), ['n1', 'n3'])
| TestRegexp |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_invite_request_details.py | {
"start": 1802,
"end": 2944
} | class ____(InviteRequestBase):
def test_get_invalid(self) -> None:
self.login_as(user=self.user)
resp = self.get_response(self.org.slug, "123")
assert resp.status_code == 404
def test_me_not_supported(self) -> None:
self.login_as(user=self.user)
# the serializer allows this value but it makes no sense for this view
resp = self.get_response(self.org.slug, "me")
assert resp.status_code == 404
def test_get_invite_request(self) -> None:
self.login_as(user=self.user)
resp = self.get_response(self.org.slug, self.invite_request.id)
assert resp.status_code == 200
assert resp.data["email"] == self.invite_request.email
assert resp.data["inviteStatus"] == "requested_to_be_invited"
assert resp.data["teams"] == []
resp = self.get_response(self.org.slug, self.request_to_join.id)
assert resp.status_code == 200
assert resp.data["email"] == self.request_to_join.email
assert resp.data["inviteStatus"] == "requested_to_join"
assert resp.data["teams"] == []
| OrganizationInviteRequestGetTest |
python | django__django | django/forms/fields.py | {
"start": 24629,
"end": 26752
} | class ____(FileField):
default_validators = [validators.validate_image_file_extension]
default_error_messages = {
"invalid_image": _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Check that the file-upload field data contains a valid image (GIF, JPG,
PNG, etc. -- whatever Pillow supports).
"""
f = super().to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we
# might have to read the data into memory.
if hasattr(data, "temporary_file_path"):
file = data.temporary_file_path()
else:
if hasattr(data, "read"):
file = BytesIO(data.read())
else:
file = BytesIO(data["content"])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception as exc:
# Pillow doesn't recognize it as an image.
raise ValidationError(
self.error_messages["invalid_image"],
code="invalid_image",
) from exc
if hasattr(f, "seek") and callable(f.seek):
f.seek(0)
return f
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if isinstance(widget, FileInput) and "accept" not in widget.attrs:
attrs.setdefault("accept", "image/*")
return attrs
| ImageField |
python | dagster-io__dagster | python_modules/dagster/dagster/_grpc/types.py | {
"start": 21098,
"end": 22814
} | class ____(
NamedTuple(
"_ExternalScheduleExecutionArgs",
[
("repository_origin", RemoteRepositoryOrigin),
("instance_ref", Optional[InstanceRef]),
("schedule_name", str),
("scheduled_execution_timestamp", Optional[float]),
("scheduled_execution_timezone", Optional[str]),
("log_key", Optional[Sequence[str]]),
("timeout", Optional[int]),
],
)
):
def __new__(
cls,
repository_origin: RemoteRepositoryOrigin,
instance_ref: Optional[InstanceRef],
schedule_name: str,
scheduled_execution_timestamp: Optional[float] = None,
scheduled_execution_timezone: Optional[str] = None,
log_key: Optional[Sequence[str]] = None,
timeout: Optional[int] = None,
):
return super().__new__(
cls,
repository_origin=check.inst_param(
repository_origin, "repository_origin", RemoteRepositoryOrigin
),
instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef),
schedule_name=check.str_param(schedule_name, "schedule_name"),
scheduled_execution_timestamp=check.opt_float_param(
scheduled_execution_timestamp, "scheduled_execution_timestamp"
),
scheduled_execution_timezone=check.opt_str_param(
scheduled_execution_timezone,
"scheduled_execution_timezone",
),
log_key=check.opt_list_param(log_key, "log_key", of_type=str),
timeout=check.opt_int_param(timeout, "timeout"),
)
@whitelist_for_serdes
| ExternalScheduleExecutionArgs |
python | bokeh__bokeh | tests/unit/bokeh/server/test_auth_provider.py | {
"start": 6556,
"end": 7600
} | class ____(RequestHandler): pass
""", func, suffix='.py')
def test_logout_url(self) -> None:
def func(filename: str):
am = bsa.AuthModule(filename)
assert am.login_url == "/foo"
assert am.get_login_url is None
assert am.login_handler is None
assert am.logout_url == "/bar"
assert am.logout_handler is None
with_file_contents("""
def get_user(handler): return 10
login_url = "/foo"
logout_url = "/bar"
""", func, suffix='.py')
def test_logout_handler(self) -> None:
def func(filename: str):
am = bsa.AuthModule(filename)
assert am.login_url == "/foo"
assert am.get_login_url is None
assert am.login_handler is None
assert am.logout_url == "/bar"
assert issubclass(am.logout_handler, RequestHandler)
with_file_contents("""
def get_user(handler): return 10
login_url = "/foo"
logout_url = "/bar"
from tornado.web import RequestHandler
| LoginHandler |
python | sanic-org__sanic | sanic/touchup/schemes/base.py | {
"start": 158,
"end": 992
} | class ____(ABC):
ident: str
_registry: set[type] = set()
def __init__(self, app) -> None:
self.app = app
@abstractmethod
def visitors(self) -> list[NodeTransformer]: ...
def __init_subclass__(cls):
BaseScheme._registry.add(cls)
def __call__(self):
return self.visitors()
@classmethod
def build(cls, method, module_globals, app):
raw_source = getsource(method)
src = dedent(raw_source)
node = parse(src)
for scheme in cls._registry:
for visitor in scheme(app)():
node = visitor.visit(node)
compiled_src = compile(node, method.__name__, "exec")
exec_locals: dict[str, Any] = {}
exec(compiled_src, module_globals, exec_locals) # nosec
return exec_locals[method.__name__]
| BaseScheme |
python | wandb__wandb | wandb/vendor/pygments/lexers/markup.py | {
"start": 10588,
"end": 12277
} | class ____(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'\\$', Keyword),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
default('#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
| TexLexer |
python | astropy__astropy | astropy/extern/ply/yacc.py | {
"start": 79245,
"end": 82667
} | class ____(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
| LRTable |
python | sympy__sympy | sympy/assumptions/relation/equality.py | {
"start": 2928,
"end": 3988
} | class ____(BinaryRelation):
"""
Binary predicate for $>$.
The purpose of this class is to provide the instance which represent
the ">" predicate in order to allow the logical inference.
This class must remain internal to assumptions module and user must
use :obj:`~.Gt()` instead to construct the equality expression.
Evaluating this predicate to ``True`` or ``False`` is done by
:func:`~.core.relational.is_gt`
Examples
========
>>> from sympy import ask, Q
>>> Q.gt(0, 0)
Q.gt(0, 0)
>>> ask(_)
False
See Also
========
sympy.core.relational.Gt
"""
is_reflexive = False
is_symmetric = False
name = 'gt'
handler = None
@property
def reversed(self):
return Q.lt
@property
def negated(self):
return Q.le
def eval(self, args, assumptions=True):
if assumptions == True:
# default assumptions for is_gt is None
assumptions = None
return is_gt(*args, assumptions)
| StrictGreaterThanPredicate |
python | realpython__materials | duck-typing-python/vehicles_duck.py | {
"start": 0,
"end": 311
} | class ____:
def __init__(self, make, model, color):
self.make = make
self.model = model
self.color = color
def start(self):
print("The car is starting")
def stop(self):
print("The car is stopping")
def drive(self):
print("The car is driving")
| Car |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 35636,
"end": 35836
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("DEVELOPMENT", "RUNTIME")
| RepositoryVulnerabilityAlertDependencyScope |
python | getsentry__sentry | src/sentry/taskworker/silolimiter.py | {
"start": 174,
"end": 1797
} | class ____(SiloLimit):
"""
Silo limiter for tasks
We don't want tasks to be spawned in the incorrect silo.
We can't reliably cause tasks to fail as not all tasks use
the ORM (which also has silo bound safety).
"""
def handle_when_unavailable(
self,
original_method: Callable[P, R],
current_mode: SiloMode,
available_modes: Iterable[SiloMode],
) -> Callable[P, R]:
def handle(*args: P.args, **kwargs: P.kwargs) -> Any:
name = original_method.__name__
message = f"Cannot call or spawn {name} in {current_mode},"
raise self.AvailabilityError(message)
return handle
def __call__(self, decorated_task: Task[P, R]) -> Task[P, R]:
# Replace the sentry.taskworker.Task interface used to schedule tasks.
replacements = {"delay", "apply_async"}
for attr_name in replacements:
task_attr = getattr(decorated_task, attr_name)
if callable(task_attr):
limited_attr = self.create_override(task_attr)
setattr(decorated_task, attr_name, limited_attr)
update_attrs = [
"fullname",
"namespace",
"retry",
"at_most_once",
"wait_for_delivery",
"compression_type",
]
# Cast limited_func as the super class type is just Callable, but we
# know here we have Task instances.
limited_func = cast(
Task[P, R], self.create_override(decorated_task, update_attrs=update_attrs)
)
return limited_func
| TaskSiloLimit |
python | google__pytype | pytype/abstract/_function_base.py | {
"start": 5971,
"end": 10140
} | class ____(Function):
"""An abstract value representing a native function.
Attributes:
name: Function name. Might just be something like "<lambda>".
func: An object with a __call__ method.
ctx: context.Context instance.
"""
def __init__(self, name: str, func: Callable, ctx: "context.Context") -> None:
super().__init__(name, ctx)
self.func = func
self.bound_class = lambda callself, underlying: self
def argcount(self, _: "cfg.CFGNode") -> int:
return self.func.func_code.argcount # pytype: disable=attribute-error
def call(
self,
node: "cfg.CFGNode",
func: "cfg.Binding",
args: function.Args,
alias_map: "datatypes.UnionFind | None" = None,
) -> "tuple[cfg.CFGNode, cfg.Variable]":
sig = None
if isinstance(
self.func.__self__, # pytype: disable=attribute-error
_classes.CallableClass,
):
sig = function.Signature.from_callable(
self.func.__self__ # pytype: disable=attribute-error
)
args = args.simplify(node, self.ctx, match_signature=sig)
del sig
posargs = [u.AssignToNewVariable(node) for u in args.posargs]
namedargs = {
k: u.AssignToNewVariable(node) for k, u in args.namedargs.items()
}
try:
inspect.signature(self.func).bind(
node, *posargs, **namedargs
) # pytype: disable=wrong-arg-types
except ValueError as e:
# Happens for, e.g.,
# def f((x, y)): pass
# f((42,))
raise NotImplementedError("Wrong number of values to unpack") from e
except TypeError as e:
# The possible errors here are:
# (1) wrong arg count
# (2) duplicate keyword
# (3) unexpected keyword
# The way we constructed namedargs rules out (2).
if "keyword" in str(e):
# Happens for, e.g.,
# def f(*args): pass
# f(x=42)
raise NotImplementedError("Unexpected keyword") from e
# The function was passed the wrong number of arguments. The signature is
# ([self, ]node, ...). The length of "..." tells us how many variables
# are expected.
expected_argcount = len(inspect.getfullargspec(self.func).args) - 1
func = self.func
if inspect.ismethod(func) and func.__self__ is not None:
expected_argcount -= 1
actual_argcount = len(posargs) + len(namedargs)
if actual_argcount > expected_argcount or (
not args.starargs and not args.starstarargs
):
# If we have too many arguments, or starargs and starstarargs are both
# empty, then we can be certain of a WrongArgCount error.
argnames = tuple("_" + str(i) for i in range(expected_argcount))
sig = function.Signature.from_param_names(self.name, argnames)
raise error_types.WrongArgCount(sig, args, self.ctx)
assert actual_argcount < expected_argcount
# Assume that starargs or starstarargs fills in the missing arguments.
# Instead of guessing where these arguments should go, overwrite all of
# the arguments with a list of unsolvables of the correct length, which
# is guaranteed to give us a correct (but imprecise) analysis.
posargs = [
self.ctx.new_unsolvable(node) for _ in range(expected_argcount)
]
namedargs = {}
if "self" in namedargs:
argnames = tuple(
"_" + str(i) for i in range(len(posargs) + len(namedargs))
)
sig = function.Signature.from_param_names(self.name, argnames)
raise error_types.DuplicateKeyword(sig, args, self.ctx, "self")
return self.func( # pytype: disable=not-callable
node, *posargs, **namedargs
)
def get_positional_names(self):
# TODO: b/350643999 - this is the only place that func_code is used,
# find out what the type of this is and delete the dead code if not used.
code = self.func.func_code # pytype: disable=attribute-error
return list(code.varnames[: code.argcount])
def property_get(
self, callself: "cfg.Variable", is_class: bool = False
) -> "NativeFunction":
return self
_SomeFunction = TypeVar("_SomeFunction", bound=Function)
| NativeFunction |
python | ray-project__ray | python/ray/data/aggregate.py | {
"start": 29371,
"end": 33143
} | class ____(AggregateFnV2[List[Any], List[Any]]):
"""Defines Quantile aggregation.
Example:
.. testcode::
import ray
from ray.data.aggregate import Quantile
ds = ray.data.range(100)
# Schema: {'id': int64}
ds = ds.add_column("group_key", lambda x: x % 3)
# Schema: {'id': int64, 'group_key': int64}
# Calculating the 50th percentile (median) per group:
result = ds.groupby("group_key").aggregate(Quantile(q=0.5, on="id")).take_all()
# result: [{'group_key': 0, 'quantile(id)': ...},
# {'group_key': 1, 'quantile(id)': ...},
# {'group_key': 2, 'quantile(id)': ...}]
Args:
on: The name of the column to calculate the quantile on. Must be provided.
q: The quantile to compute, which must be between 0 and 1 inclusive.
For example, q=0.5 computes the median.
ignore_nulls: Whether to ignore null values. Default is True.
alias_name: Optional name for the resulting column.
"""
def __init__(
self,
on: Optional[str] = None,
q: float = 0.5,
ignore_nulls: bool = True,
alias_name: Optional[str] = None,
):
self._q = q
super().__init__(
alias_name if alias_name else f"quantile({str(on)})",
on=on,
ignore_nulls=ignore_nulls,
zero_factory=list,
)
def combine(self, current_accumulator: List[Any], new: List[Any]) -> List[Any]:
if isinstance(current_accumulator, List) and isinstance(new, List):
current_accumulator.extend(new)
return current_accumulator
if isinstance(current_accumulator, List) and (not isinstance(new, List)):
if new is not None and new != "":
current_accumulator.append(new)
return current_accumulator
if isinstance(new, List) and (not isinstance(current_accumulator, List)):
if current_accumulator is not None and current_accumulator != "":
new.append(current_accumulator)
return new
ls = []
if current_accumulator is not None and current_accumulator != "":
ls.append(current_accumulator)
if new is not None and new != "":
ls.append(new)
return ls
def aggregate_block(self, block: Block) -> List[Any]:
block_acc = BlockAccessor.for_block(block)
ls = []
for row in block_acc.iter_rows(public_row_format=False):
ls.append(row.get(self._target_col_name))
return ls
def finalize(self, accumulator: List[Any]) -> Optional[Any]:
if self._ignore_nulls:
accumulator = [v for v in accumulator if not is_null(v)]
else:
nulls = [v for v in accumulator if is_null(v)]
if len(nulls) > 0:
# If nulls are present and not ignored, the quantile is undefined.
# Return the first null encountered to preserve column type.
return nulls[0]
if not accumulator:
# If the list is empty (e.g., all values were null and ignored, or no values),
# quantile is undefined.
return None
key = lambda x: x # noqa: E731
input_values = sorted(accumulator)
k = (len(input_values) - 1) * self._q
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(input_values[int(k)])
# Interpolate between the elements at floor and ceil indices.
d0 = key(input_values[int(f)]) * (c - k)
d1 = key(input_values[int(c)]) * (k - f)
return round(d0 + d1, 5)
@PublicAPI
| Quantile |
python | django__django | tests/model_fields/test_jsonfield.py | {
"start": 2281,
"end": 4547
} | class ____(SimpleTestCase):
def test_deconstruct(self):
field = models.JSONField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.JSONField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_deconstruct_custom_encoder_decoder(self):
field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs["encoder"], DjangoJSONEncoder)
self.assertEqual(kwargs["decoder"], CustomJSONDecoder)
def test_get_transforms(self):
@models.JSONField.register_lookup
class MyTransform(Transform):
lookup_name = "my_transform"
field = models.JSONField()
transform = field.get_transform("my_transform")
self.assertIs(transform, MyTransform)
models.JSONField._unregister_lookup(MyTransform)
transform = field.get_transform("my_transform")
self.assertIsInstance(transform, KeyTransformFactory)
def test_key_transform_text_lookup_mixin_non_key_transform(self):
transform = Transform("test")
msg = (
"Transform should be an instance of KeyTransform in order to use "
"this lookup."
)
with self.assertRaisesMessage(TypeError, msg):
KeyTransformTextLookupMixin(transform)
def test_get_prep_value(self):
class JSONFieldGetPrepValue(models.JSONField):
def get_prep_value(self, value):
if value is True:
return {"value": True}
return value
def noop_adapt_json_value(value, encoder):
return value
field = JSONFieldGetPrepValue()
with mock.patch.object(
connection.ops, "adapt_json_value", noop_adapt_json_value
):
self.assertEqual(
field.get_db_prep_value(True, connection, prepared=False),
{"value": True},
)
self.assertIs(
field.get_db_prep_value(True, connection, prepared=True), True
)
self.assertEqual(field.get_db_prep_value(1, connection, prepared=False), 1)
| TestMethods |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/memcache/guestbook/main.py | {
"start": 932,
"end": 1354
} | class ____(ndb.Model):
"""Models an individual Guestbook entry with author, content, and date."""
author = ndb.StringProperty()
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
def guestbook_key(guestbook_name=None):
"""Constructs a Datastore key for a Guestbook entity with guestbook_name"""
return ndb.Key("Guestbook", guestbook_name or "default_guestbook")
| Greeting |
python | Textualize__textual | docs/examples/how-to/containers08.py | {
"start": 275,
"end": 732
} | class ____(App):
"""Simple app to play with containers."""
CSS = """
.with-border {
border: heavy green;
}
"""
def compose(self) -> ComposeResult:
yield Box("Box 1") # (1)!
with Center(classes="with-border"): # (2)!
yield Box("Box 2")
with Right(classes="with-border"): # (3)!
yield Box("Box 3")
if __name__ == "__main__":
app = ContainerApp()
app.run()
| ContainerApp |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 137258,
"end": 138405
} | class ____(Response):
"""
Response of projects.make_private endpoint.
:param updated: Number of projects updated
:type updated: int
"""
_service = "projects"
_action = "make_private"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of projects updated",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| MakePrivateResponse |
python | Pylons__pyramid | tests/test_integration.py | {
"start": 27331,
"end": 28743
} | class ____(unittest.TestCase):
def _makeConfig(self):
from pyramid.config import Configurator
config = Configurator()
return config
def _makeTestApp(self, config):
app = config.make_wsgi_app()
return TestApp(app)
def test_unicode_in_url_404(self):
request_path = '/avalia%C3%A7%C3%A3o_participante'
request_path_unicode = b'/avalia\xc3\xa7\xc3\xa3o_participante'.decode(
'utf-8'
)
config = self._makeConfig()
testapp = self._makeTestApp(config)
res = testapp.get(request_path, status=404)
# Pyramid default 404 handler outputs:
# '404 Not Found\n\nThe resource could not be found.\n\n\n'
# '/avalia\xe7\xe3o_participante\n\n'
self.assertTrue(request_path_unicode in res.text)
def test_unicode_in_url_200(self):
request_path = '/avalia%C3%A7%C3%A3o_participante'
request_path_unicode = b'/avalia\xc3\xa7\xc3\xa3o_participante'.decode(
'utf-8'
)
def myview(request):
return 'XXX'
config = self._makeConfig()
config.add_route('myroute', request_path_unicode)
config.add_view(myview, route_name='myroute', renderer='json')
testapp = self._makeTestApp(config)
res = testapp.get(request_path, status=200)
self.assertEqual(res.text, '"XXX"')
| UnicodeInURLTest |
python | openai__openai-python | src/openai/types/evals/run_cancel_response.py | {
"start": 11886,
"end": 12365
} | class ____(BaseModel):
cached_tokens: int
"""The number of tokens retrieved from cache."""
completion_tokens: int
"""The number of completion tokens generated."""
invocation_count: int
"""The number of invocations."""
run_model_name: str = FieldInfo(alias="model_name")
"""The name of the model."""
prompt_tokens: int
"""The number of prompt tokens used."""
total_tokens: int
"""The total number of tokens used."""
| PerModelUsage |
python | numba__numba | numba/cuda/stubs.py | {
"start": 1003,
"end": 1217
} | class ____(Stub):
'''A triple, (x, y, z)'''
_description_ = '<Dim3>'
@property
def x(self):
pass
@property
def y(self):
pass
@property
def z(self):
pass
| Dim3 |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 1650,
"end": 1934
} | class ____:
def setup(self):
self.df = DataFrame(np.random.randn(10000, 25))
self.df["foo"] = "bar"
self.df["bar"] = "baz"
self.df = self.df._consolidate()
def time_frame_get_numeric_data(self):
self.df._get_numeric_data()
| GetNumericData |
python | getsentry__sentry | tests/sentry/integrations/jira/test_sentry_installation.py | {
"start": 931,
"end": 1876
} | class ____(JiraSentryInstallationViewTestCase):
@patch(
"sentry.integrations.jira.views.sentry_installation.get_integration_from_request",
side_effect=ExpiredSignatureError(),
)
def test_expired_signature_error(self, mock_get_integration_from_request: MagicMock) -> None:
response = self.client.get(self.path)
assert response.status_code == 200
assert REFRESH_REQUIRED in response.content
@patch(
"sentry.integrations.jira.views.sentry_installation.get_integration_from_request",
side_effect=AtlassianConnectValidationError(),
)
def test_expired_invalid_installation_error(
self, mock_get_integration_from_request: MagicMock
) -> None:
response = self.client.get(self.path)
assert response.status_code == 200
assert UNABLE_TO_VERIFY_INSTALLATION.encode() in response.content
@control_silo_test
| JiraSentryInstallationViewErrorsTest |
python | PyCQA__pylint | tests/functional/c/crash_missing_module_type.py | {
"start": 267,
"end": 434
} | class ____:
""" Class """
@decor
def prop(self):
""" method """
return self
if __name__ == '__main__':
trop = Foo()
trop.prop = 42
| Foo |
python | celery__celery | celery/bootsteps.py | {
"start": 8727,
"end": 10552
} | class ____(metaclass=StepType):
"""A Bootstep.
The :meth:`__init__` method is called when the step
is bound to a parent object, and can as such be used
to initialize attributes in the parent object at
parent instantiation-time.
"""
#: Optional step name, will use ``qualname`` if not specified.
name = None
#: Optional short name used for graph outputs and in logs.
label = None
#: Set this to true if the step is enabled based on some condition.
conditional = False
#: List of other steps that that must be started before this step.
#: Note that all dependencies must be in the same blueprint.
requires = ()
#: This flag is reserved for the workers Consumer,
#: since it is required to always be started last.
#: There can only be one object marked last
#: in every blueprint.
last = False
#: This provides the default for :meth:`include_if`.
enabled = True
def __init__(self, parent, **kwargs):
pass
def include_if(self, parent):
"""Return true if bootstep should be included.
You can define this as an optional predicate that decides whether
this step should be created.
"""
return self.enabled
def instantiate(self, name, *args, **kwargs):
return instantiate(name, *args, **kwargs)
def _should_include(self, parent):
if self.include_if(parent):
return True, self.create(parent)
return False, None
def include(self, parent):
return self._should_include(parent)[0]
def create(self, parent):
"""Create the step."""
def __repr__(self):
return f'<step: {self.alias}>'
@property
def alias(self):
return self.label or _label(self)
def info(self, obj):
pass
| Step |
python | django-extensions__django-extensions | django_extensions/admin/__init__.py | {
"start": 7179,
"end": 7292
} | class ____(
ForeignKeyAutocompleteAdminMixin, admin.StackedInline
):
pass
| ForeignKeyAutocompleteStackedInline |
python | eth-brownie__brownie | brownie/network/gas/strategies.py | {
"start": 3719,
"end": 4647
} | class ____(SimpleGasStrategy):
"""
Gas strategy for determining a price using the GasNow API.
GasNow returns 4 possible prices:
rapid: the median gas prices for all transactions currently included
in the mining block
fast: the gas price transaction "N", the minimum priced tx currently
included in the mining block
standard: the gas price of the Max(2N, 500th) transaction in the mempool
slow: the gas price of the max(5N, 1000th) transaction within the mempool
Visit https://www.gasnow.org/ for more information on how GasNow
calculates gas prices.
"""
def __init__(self, speed: str = "fast"):
if speed not in {"rapid", "fast", "standard", "slow"}:
raise ValueError("`speed` must be one of: rapid, fast, standard, slow")
self.speed = speed
def get_gas_price(self) -> int:
return _fetch_gasnow(self.speed)
| GasNowStrategy |
python | ApeWorX__ape | tests/functional/utils/test_github.py | {
"start": 846,
"end": 6109
} | class ____:
def test_get_repo_unknown_repo(self, mocker, mock_session):
client = _GithubClient(session=mock_session)
# Make it raise 404.
response = mocker.MagicMock()
response.status_code = 404
error = HTTPError(response=response)
mock_session.request.side_effect = error
with pytest.raises(ProjectError, match=f"Unknown repository '{ORG_NAME}/{REPO_NAME}'"):
client.get_repo(ORG_NAME, REPO_NAME)
def test_clone_repo(self, mocker):
# NOTE: this test actually clones the repo.
client = _GithubClient()
git_patch = mocker.patch("ape.utils._github.subprocess.call")
git_patch.return_value = 0
with create_tempdir() as temp_dir:
try:
client.clone_repo("dapphub", "ds-test", Path(temp_dir), branch="master")
except ConnectTimeout:
pytest.xfail("Internet required to run this test.")
cmd = git_patch.call_args[0][0]
assert cmd[0].endswith("git")
assert cmd[1] == "-c"
assert cmd[2] == "advice.detachedHead=false"
assert cmd[3] == "clone"
assert cmd[4] == "https://github.com/dapphub/ds-test.git"
# cmd[5] is the temporary output path
assert cmd[6] == "--branch"
assert cmd[7] == "master"
def test_get_release(self, github_client, mock_session):
version = "0.1.0"
github_client.get_release(ORG_NAME, REPO_NAME, "0.1.0")
base_uri = f"https://api.github.com/repos/{ORG_NAME}/{REPO_NAME}/releases/tags"
expected_uri = f"{base_uri}/{version}"
assert mock_session.request.call_args[0] == ("GET", expected_uri)
@pytest.mark.parametrize("version", ("0.1.0", "v0.1.0"))
def test_get_release_retry(self, mock_release, github_client, mock_session, version):
"""
Ensure after failing to get a release, we re-attempt with
out a v-prefix.
"""
opposite = version.lstrip("v") if version.startswith("v") else f"v{version}"
def side_effect(method, uri, *arg, **kwargs):
_version = uri.split("/")[-1]
if _version == version:
# Force it to try the opposite.
raise ValueError()
return mock_release
mock_session.request.side_effect = side_effect
actual = github_client.get_release(ORG_NAME, REPO_NAME, version)
assert actual["name"] == REPO_NAME
calls = mock_session.request.call_args_list[-2:]
expected_uri = "https://api.github.com/repos/test/path/releases/tags"
assert calls[0][0] == ("GET", f"{expected_uri}/{version}")
assert calls[1][0] == ("GET", f"{expected_uri}/{opposite}")
def test_get_org_repos(self, github_client, mock_session):
_ = list(github_client.get_org_repos())
call = mock_session.method_calls[-1]
params = call.kwargs["params"]
# Show we are fetching more than the default 30 per page.
assert params == {"per_page": 100, "page": 1}
def test_available_plugins(self, mocker, github_client, mock_session):
response1 = mocker.MagicMock()
response1.json.return_value = [{"name": "ape-myplugin"}]
response2 = mocker.MagicMock()
response2.json.return_value = []
def get_org_repos(method, url, **kwargs):
if kwargs["params"]["page"] == 1:
return response1
else:
# End.
return response2
mock_session.request.side_effect = get_org_repos
actual = github_client.available_plugins
assert actual == {"ape_myplugin"}
def test_available_plugins_handles_401(self, mocker, github_client, mock_session, ape_caplog):
"""
When you get a 401 from using a token, Ape's GitHub client should not
only warn the user but retry the request w/o authorization, as it likely
will still work.
"""
mock_session.headers = {"Authorization": "token mytoken"}
response1 = mocker.MagicMock()
response1.json.return_value = [{"name": "ape-myplugin"}]
response2 = mocker.MagicMock()
response2.json.return_value = []
bad_auth_response = mocker.MagicMock()
bad_auth_response.status_code = 401
bad_auth_response.raise_for_status.side_effect = HTTPError(response=bad_auth_response)
def get_org_repos(method, url, **kwargs):
if mock_session.headers.get("Authorization") == "token mytoken":
return bad_auth_response
elif kwargs["params"]["page"] == 1:
return response1
else:
# End.
return response2
mock_session.request.side_effect = get_org_repos
actual = github_client.available_plugins
# Still works, even with bad auth.
assert actual == {"ape_myplugin"}
# Show we got our log message.
expected = (
"Requests are not authorized! GITHUB_ACCESS_TOKEN is likely "
"expired; received 401 when attempted to use it. If you need "
"GitHub authorization, try resetting your token."
)
assert ape_caplog.head == expected
| TestGithubClient |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/tests/test_00_console_widget.py | {
"start": 8936,
"end": 27612
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Create the application for the test case.
"""
cls._app = QtWidgets.QApplication.instance()
if cls._app is None:
cls._app = QtWidgets.QApplication([])
cls._app.setQuitOnLastWindowClosed(False)
@classmethod
def tearDownClass(cls):
""" Exit the application.
"""
QtWidgets.QApplication.quit()
def assert_text_equal(self, cursor, text):
cursor.select(QtGui.QTextCursor.Document)
selection = cursor.selectedText()
self.assertEqual(selection, text)
def test_special_characters(self):
""" Are special characters displayed correctly?
"""
w = ConsoleWidget()
cursor = w._get_prompt_cursor()
test_inputs = ['xyz\b\b=\n',
'foo\b\nbar\n',
'foo\b\nbar\r\n',
'abc\rxyz\b\b=']
expected_outputs = ['x=z\u2029',
'foo\u2029bar\u2029',
'foo\u2029bar\u2029',
'x=z']
for i, text in enumerate(test_inputs):
w._insert_plain_text(cursor, text)
self.assert_text_equal(cursor, expected_outputs[i])
# clear all the text
cursor.insertText('')
def test_erase_in_line(self):
""" Do control sequences for clearing the line work?
"""
w = ConsoleWidget()
cursor = w._get_prompt_cursor()
test_inputs = ['Hello\x1b[1KBye',
'Hello\x1b[0KBye',
'Hello\r\x1b[0KBye',
'Hello\r\x1b[1KBye',
'Hello\r\x1b[2KBye',
'Hello\x1b[2K\rBye']
expected_outputs = [' Bye',
'HelloBye',
'Bye',
'Byelo',
'Bye',
'Bye']
for i, text in enumerate(test_inputs):
w._insert_plain_text(cursor, text)
self.assert_text_equal(cursor, expected_outputs[i])
# clear all the text
cursor.insertText('')
def test_print_carriage_return(self):
""" Test that overwriting the current line works as intended,
before and after the cursor prompt.
"""
w = ConsoleWidget()
# Show a prompt
w._prompt = "prompt>"
w._prompt_sep = "\n"
w._show_prompt()
self.assert_text_equal(w._get_cursor(), '\u2029prompt>')
test_inputs = ['Hello\n', 'World\r',
'*' * 10, '\r',
'0', '1', '2', '3', '4',
'5', '6', '7', '8', '9',
'\r\n']
for text in test_inputs:
w._append_plain_text(text, before_prompt=True)
w._flush_pending_stream() # emulate text being flushed
self.assert_text_equal(w._get_cursor(),
"Hello\u20290123456789\u2029\u2029prompt>")
# Print after prompt
w._executing = True
test_inputs = ['\nF', 'o', 'o',
'\r', 'Bar', '\n']
for text in test_inputs:
w._append_plain_text(text, before_prompt=False)
w._flush_pending_stream() # emulate text being flushed
self.assert_text_equal(w._get_cursor(),
"Hello\u20290123456789\u2029\u2029prompt>\u2029Bar\u2029")
def test_link_handling(self):
noButton = QtCore.Qt.NoButton
noButtons = QtCore.Qt.NoButton
noModifiers = QtCore.Qt.NoModifier
MouseMove = QtCore.QEvent.MouseMove
QMouseEvent = QtGui.QMouseEvent
w = ConsoleWidget()
cursor = w._get_prompt_cursor()
w._insert_html(cursor, '<a href="http://python.org">written in</a>')
obj = w._control
tip = QtWidgets.QToolTip
self.assertEqual(tip.text(), '')
# should be somewhere else
elsewhereEvent = QMouseEvent(MouseMove, QtCore.QPointF(50, 50),
noButton, noButtons, noModifiers)
w.eventFilter(obj, elsewhereEvent)
self.assertEqual(tip.isVisible(), False)
self.assertEqual(tip.text(), '')
# should be over text
overTextEvent = QMouseEvent(MouseMove, QtCore.QPointF(1, 5),
noButton, noButtons, noModifiers)
w.eventFilter(obj, overTextEvent)
self.assertEqual(tip.isVisible(), True)
self.assertEqual(tip.text(), "http://python.org")
# should still be over text
stillOverTextEvent = QMouseEvent(MouseMove, QtCore.QPointF(1, 5),
noButton, noButtons, noModifiers)
w.eventFilter(obj, stillOverTextEvent)
self.assertEqual(tip.isVisible(), True)
self.assertEqual(tip.text(), "http://python.org")
def test_width_height(self):
# width()/height() QWidget properties should not be overridden.
w = ConsoleWidget()
self.assertEqual(w.width(), QtWidgets.QWidget.width(w))
self.assertEqual(w.height(), QtWidgets.QWidget.height(w))
def test_prompt_cursors(self):
"""Test the cursors that keep track of where the prompt begins and
ends"""
w = ConsoleWidget()
w._prompt = 'prompt>'
doc = w._control.document()
# Fill up the QTextEdit area with the maximum number of blocks
doc.setMaximumBlockCount(10)
for _ in range(9):
w._append_plain_text('line\n')
# Draw the prompt, this should cause the first lines to be deleted
w._show_prompt()
self.assertEqual(doc.blockCount(), 10)
# _prompt_pos should be at the end of the document
self.assertEqual(w._prompt_pos, w._get_end_pos())
# _append_before_prompt_pos should be at the beginning of the prompt
self.assertEqual(w._append_before_prompt_pos,
w._prompt_pos - len(w._prompt))
# insert some more text without drawing a new prompt
w._append_plain_text('line\n')
self.assertEqual(w._prompt_pos,
w._get_end_pos() - len('line\n'))
self.assertEqual(w._append_before_prompt_pos,
w._prompt_pos - len(w._prompt))
# redraw the prompt
w._show_prompt()
self.assertEqual(w._prompt_pos, w._get_end_pos())
self.assertEqual(w._append_before_prompt_pos,
w._prompt_pos - len(w._prompt))
# insert some text before the prompt
w._append_plain_text('line', before_prompt=True)
self.assertEqual(w._prompt_pos, w._get_end_pos())
self.assertEqual(w._append_before_prompt_pos,
w._prompt_pos - len(w._prompt))
def test_select_all(self):
w = ConsoleWidget()
w._append_plain_text('Header\n')
w._prompt = 'prompt>'
w._show_prompt()
control = w._control
app = QtWidgets.QApplication.instance()
cursor = w._get_cursor()
w._insert_plain_text_into_buffer(cursor, "if:\n pass")
cursor.clearSelection()
control.setTextCursor(cursor)
# "select all" action selects cell first
w.select_all_smart()
QTest.keyClick(control, QtCore.Qt.Key_C, QtCore.Qt.ControlModifier)
copied = app.clipboard().text()
self.assertEqual(copied, 'if:\n> pass')
# # "select all" action triggered a second time selects whole document
w.select_all_smart()
QTest.keyClick(control, QtCore.Qt.Key_C, QtCore.Qt.ControlModifier)
copied = app.clipboard().text()
self.assertEqual(copied, 'Header\nprompt>if:\n> pass')
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_keypresses(self):
"""Test the event handling code for keypresses."""
w = ConsoleWidget()
w._append_plain_text('Header\n')
w._prompt = 'prompt>'
w._show_prompt()
app = QtWidgets.QApplication.instance()
control = w._control
# Test setting the input buffer
w._set_input_buffer('test input')
self.assertEqual(w._get_input_buffer(), 'test input')
# Ctrl+K kills input until EOL
w._set_input_buffer('test input')
c = control.textCursor()
c.setPosition(c.position() - 3)
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_K, QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(), 'test in')
# Ctrl+V pastes
w._set_input_buffer('test input ')
app.clipboard().setText('pasted text')
QTest.keyClick(control, QtCore.Qt.Key_V, QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(), 'test input pasted text')
self.assertEqual(control.document().blockCount(), 2)
# Paste should strip indentation
w._set_input_buffer('test input ')
app.clipboard().setText(' pasted text')
QTest.keyClick(control, QtCore.Qt.Key_V, QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(), 'test input pasted text')
self.assertEqual(control.document().blockCount(), 2)
# Multiline paste, should also show continuation marks
w._set_input_buffer('test input ')
app.clipboard().setText('line1\nline2\nline3')
QTest.keyClick(control, QtCore.Qt.Key_V, QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
'test input line1\nline2\nline3')
self.assertEqual(control.document().blockCount(), 4)
self.assertEqual(control.document().findBlockByNumber(1).text(),
'prompt>test input line1')
self.assertEqual(control.document().findBlockByNumber(2).text(),
'> line2')
self.assertEqual(control.document().findBlockByNumber(3).text(),
'> line3')
# Multiline paste should strip indentation intelligently
# in the case where pasted text has leading whitespace on first line
# and we're pasting into indented position
w._set_input_buffer(' ')
app.clipboard().setText(' If 1:\n pass')
QTest.keyClick(control, QtCore.Qt.Key_V, QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
' If 1:\n pass')
# Ctrl+Backspace should intelligently remove the last word
w._set_input_buffer("foo = ['foo', 'foo', 'foo', \n"
" 'bar', 'bar', 'bar']")
QTest.keyClick(control, QtCore.Qt.Key_Backspace,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
("foo = ['foo', 'foo', 'foo', \n"
" 'bar', 'bar', '"))
QTest.keyClick(control, QtCore.Qt.Key_Backspace,
QtCore.Qt.ControlModifier)
QTest.keyClick(control, QtCore.Qt.Key_Backspace,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
("foo = ['foo', 'foo', 'foo', \n"
" '"))
QTest.keyClick(control, QtCore.Qt.Key_Backspace,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
("foo = ['foo', 'foo', 'foo', \n"
""))
QTest.keyClick(control, QtCore.Qt.Key_Backspace,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
"foo = ['foo', 'foo', 'foo',")
# Ctrl+Delete should intelligently remove the next word
w._set_input_buffer("foo = ['foo', 'foo', 'foo', \n"
" 'bar', 'bar', 'bar']")
c = control.textCursor()
c.setPosition(35)
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_Delete,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
("foo = ['foo', 'foo', ', \n"
" 'bar', 'bar', 'bar']"))
QTest.keyClick(control, QtCore.Qt.Key_Delete,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
("foo = ['foo', 'foo', \n"
" 'bar', 'bar', 'bar']"))
QTest.keyClick(control, QtCore.Qt.Key_Delete,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
"foo = ['foo', 'foo', 'bar', 'bar', 'bar']")
w._set_input_buffer("foo = ['foo', 'foo', 'foo', \n"
" 'bar', 'bar', 'bar']")
c = control.textCursor()
c.setPosition(48)
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_Delete,
QtCore.Qt.ControlModifier)
self.assertEqual(w._get_input_buffer(),
("foo = ['foo', 'foo', 'foo', \n"
"'bar', 'bar', 'bar']"))
# Left and right keys should respect the continuation prompt
w._set_input_buffer("line 1\n"
"line 2\n"
"line 3")
c = control.textCursor()
c.setPosition(20) # End of line 1
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_Right)
# Cursor should have moved after the continuation prompt
self.assertEqual(control.textCursor().position(), 23)
QTest.keyClick(control, QtCore.Qt.Key_Left)
# Cursor should have moved to the end of the previous line
self.assertEqual(control.textCursor().position(), 20)
# TODO: many more keybindings
def test_indent(self):
"""Test the event handling code for indent/dedent keypresses ."""
w = ConsoleWidget()
w._append_plain_text('Header\n')
w._prompt = 'prompt>'
w._show_prompt()
control = w._control
# TAB with multiline selection should block-indent
w._set_input_buffer("")
c = control.textCursor()
pos=c.position()
w._set_input_buffer("If 1:\n pass")
c.setPosition(pos, QtGui.QTextCursor.KeepAnchor)
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_Tab)
self.assertEqual(w._get_input_buffer()," If 1:\n pass")
# TAB with multiline selection, should block-indent to next multiple
# of 4 spaces, if first line has 0 < indent < 4
w._set_input_buffer("")
c = control.textCursor()
pos=c.position()
w._set_input_buffer(" If 2:\n pass")
c.setPosition(pos, QtGui.QTextCursor.KeepAnchor)
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_Tab)
self.assertEqual(w._get_input_buffer()," If 2:\n pass")
# Shift-TAB with multiline selection should block-dedent
w._set_input_buffer("")
c = control.textCursor()
pos=c.position()
w._set_input_buffer(" If 3:\n pass")
c.setPosition(pos, QtGui.QTextCursor.KeepAnchor)
control.setTextCursor(c)
QTest.keyClick(control, QtCore.Qt.Key_Backtab)
self.assertEqual(w._get_input_buffer(),"If 3:\n pass")
def test_complete(self):
class TestKernelClient(object):
def is_complete(self, source):
calls.append(source)
return msg_id
w = ConsoleWidget()
cursor = w._get_prompt_cursor()
w._execute = lambda *args: calls.append(args)
w.kernel_client = TestKernelClient()
msg_id = object()
calls = []
# test incomplete statement (no _execute called, but indent added)
w.execute("thing", interactive=True)
self.assertEqual(calls, ["thing"])
calls = []
w._handle_is_complete_reply(
dict(parent_header=dict(msg_id=msg_id),
content=dict(status="incomplete", indent="!!!")))
self.assert_text_equal(cursor, "thing\u2029> !!!")
self.assertEqual(calls, [])
# test complete statement (_execute called)
msg_id = object()
w.execute("else", interactive=True)
self.assertEqual(calls, ["else"])
calls = []
w._handle_is_complete_reply(
dict(parent_header=dict(msg_id=msg_id),
content=dict(status="complete", indent="###")))
self.assertEqual(calls, [("else", False)])
calls = []
self.assert_text_equal(cursor, "thing\u2029> !!!else\u2029")
# test missing answer from is_complete
msg_id = object()
w.execute("done", interactive=True)
self.assertEqual(calls, ["done"])
calls = []
self.assert_text_equal(cursor, "thing\u2029> !!!else\u2029")
w._trigger_is_complete_callback()
self.assert_text_equal(cursor, "thing\u2029> !!!else\u2029\u2029> ")
# assert that late answer isn't destroying anything
w._handle_is_complete_reply(
dict(parent_header=dict(msg_id=msg_id),
content=dict(status="complete", indent="###")))
self.assertEqual(calls, [])
def test_complete_python(self):
"""Test that is_complete is working correctly for Python."""
# Kernel client to test the responses of is_complete
class TestIPyKernelClient(object):
def is_complete(self, source):
tm = TransformerManager()
check_complete = tm.check_complete(source)
responses.append(check_complete)
# Initialize widget
responses = []
w = ConsoleWidget()
w._append_plain_text('Header\n')
w._prompt = 'prompt>'
w._show_prompt()
w.kernel_client = TestIPyKernelClient()
# Execute incomplete statement inside a block
code = '\n'.join(["if True:", " a = 1"])
w._set_input_buffer(code)
w.execute(interactive=True)
assert responses == [('incomplete', 4)]
# Execute complete statement inside a block
responses = []
code = '\n'.join(["if True:", " a = 1\n\n"])
w._set_input_buffer(code)
w.execute(interactive=True)
assert responses == [('complete', None)]
| TestConsoleWidget |
python | langchain-ai__langchain | libs/partners/deepseek/tests/unit_tests/test_chat_models.py | {
"start": 2432,
"end": 3527
} | class ____(ChatModelUnitTests):
"""Standard unit tests for `ChatDeepSeek` chat model."""
@property
def chat_model_class(self) -> type[ChatDeepSeek]:
"""Chat model class being tested."""
return ChatDeepSeek
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Parameters to initialize from environment variables."""
return (
{
"DEEPSEEK_API_KEY": "api_key",
"DEEPSEEK_API_BASE": "api_base",
},
{
"model": MODEL_NAME,
},
{
"api_key": "api_key",
"api_base": "api_base",
},
)
@property
def chat_model_params(self) -> dict:
"""Parameters to create chat model instance for testing."""
return {
"model": MODEL_NAME,
"api_key": "api_key",
}
def get_chat_model(self) -> ChatDeepSeek:
"""Get a chat model instance for testing."""
return ChatDeepSeek(**self.chat_model_params)
| TestChatDeepSeekUnit |
python | python__mypy | mypy/error_formatter.py | {
"start": 427,
"end": 1115
} | class ____(ErrorFormatter):
"""Formatter for basic JSON output format."""
def report_error(self, error: "MypyError") -> str:
"""Prints out the errors as simple, static JSON lines."""
return json.dumps(
{
"file": error.file_path,
"line": error.line,
"column": error.column,
"message": error.message,
"hint": None if len(error.hints) == 0 else "\n".join(error.hints),
"code": None if error.errorcode is None else error.errorcode.code,
"severity": error.severity,
}
)
OUTPUT_CHOICES = {"json": JSONFormatter()}
| JSONFormatter |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/annotations4.py | {
"start": 522,
"end": 1099
} | class ____:
# This should generate an error because aa is redeclared.
aa: int
def aa(self):
return 3
# This should generate two errors, one for each param.
def my_func(param1: int, param2):
param1: int = 3
param2: int = 4
# This should be fine because both declarations of 'e'
# use the same type.
e: list[int]
e = [3]
e: list[int]
def register(fn: Callable[[], None]) -> None: ...
# These should be be fine because they use the "_" name.
@register
def _():
print("Callback 1 called")
@register
def _():
print("Callback 2 called")
| Foo |
python | tensorflow__tensorflow | tensorflow/python/ops/data_flow_ops.py | {
"start": 35045,
"end": 38378
} | class ____(QueueBase):
"""A FIFOQueue that supports batching variable-sized tensors by padding.
A `PaddingFIFOQueue` may contain components with dynamic shape, while also
supporting `dequeue_many`. See the constructor for more details.
See `tf.queue.QueueBase` for a description of the methods on
this class.
"""
def __init__(self,
capacity,
dtypes,
shapes,
names=None,
shared_name=None,
name="padding_fifo_queue"):
"""Creates a queue that dequeues elements in a first-in first-out order.
A `PaddingFIFOQueue` has bounded capacity; supports multiple concurrent
producers and consumers; and provides exactly-once delivery.
A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each
element is a fixed-length tuple of tensors whose dtypes are
described by `dtypes`, and whose shapes are described by the `shapes`
argument.
The `shapes` argument must be specified; each component of a queue
element must have the respective shape. Shapes of fixed
rank but variable size are allowed by setting any shape dimension to None.
In this case, the inputs' shape may vary along the given dimension, and
`dequeue_many` will pad the given dimension with zeros up to the maximum
shape of all elements in the given batch.
Args:
capacity: An integer. The upper bound on the number of elements
that may be stored in this queue.
dtypes: A list of `DType` objects. The length of `dtypes` must equal
the number of tensors in each queue element.
shapes: A list of `TensorShape` objects, with the same length as
`dtypes`. Any dimension in the `TensorShape` containing value
`None` is dynamic and allows values to be enqueued with
variable size in that dimension.
names: (Optional.) A list of string naming the components in the queue
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
name: Optional name for the queue operation.
Raises:
ValueError: If shapes is not a list of shapes, or the lengths of dtypes
and shapes do not match, or if names is specified and the lengths of
dtypes and names do not match.
"""
dtypes = _as_type_list(dtypes)
shapes = _as_shape_list(shapes, dtypes, unknown_dim_allowed=True)
names = _as_name_list(names, dtypes)
if len(dtypes) != len(shapes):
raise ValueError("Shapes must be provided for all components, "
f"but received {len(dtypes)} dtypes and "
f"{len(shapes)} shapes.")
queue_ref = gen_data_flow_ops.padding_fifo_queue_v2(
component_types=dtypes,
shapes=shapes,
capacity=capacity,
shared_name=_shared_name(shared_name),
name=name)
super(PaddingFIFOQueue, self).__init__(dtypes, shapes, names, queue_ref)
@tf_export("queue.PriorityQueue",
v1=["queue.PriorityQueue", "io.PriorityQueue", "PriorityQueue"])
@deprecation.deprecated_endpoints(["io.PriorityQueue", "PriorityQueue"])
| PaddingFIFOQueue |
python | kamyu104__LeetCode-Solutions | Python/maximum-good-subarray-sum.py | {
"start": 63,
"end": 551
} | class ____(object):
def maximumSubarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
prefix = collections.defaultdict(lambda: float("inf"))
curr = 0
result = float("-inf")
for x in nums:
prefix[x] = min(prefix[x], curr)
curr += x
result = max(result, curr-prefix[x-k], curr-prefix[x+k])
return result if result != float("-inf") else 0
| Solution |
python | spyder-ide__spyder | spyder/plugins/debugger/widgets/framesbrowser.py | {
"start": 12418,
"end": 13061
} | class ____(QTreeWidgetItem):
def __init__(self, parent, name):
self.name = str(name)
text_color = SpyderPalette.COLOR_TEXT_1
title_format = str(
'<!-- ThreadItem -->'
'<b style="color:{1}">{0}</b>'
)
title = (title_format.format(name, text_color))
QTreeWidgetItem.__init__(self, parent, [title], QTreeWidgetItem.Type)
self.setToolTip(0, self.name)
def __lt__(self, x):
"""Smaller for sorting."""
return self.name < x.name
def __ge__(self, x):
"""Larger or equals for sorting."""
return self.name >= x.name
| ThreadItem |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_connection.py | {
"start": 5911,
"end": 8196
} | class ____(ConnectionPool):
"""Connection pool for Azure Database for PostgreSQL connections."""
def __init__(
self,
conninfo: str = "",
*,
azure_conn_info: ConnectionInfo = ConnectionInfo(),
**kwargs,
):
if isinstance(azure_conn_info.credentials, TokenCredential):
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
credential_provider = azure_conn_info.credentials
token = credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
_logger.debug("wrapping reconnect_failed function")
reconnect_failed: Callable[[ConnectionPool], None] | None = kwargs.get(
"reconnect_failed"
)
def reconnect_failed_wrapper(pool: ConnectionPool) -> None:
if reconnect_failed:
reconnect_failed(pool)
_logger.debug(
"getting token from TokenCredential for the scope: %s",
TOKEN_CREDENTIAL_SCOPE,
)
token = credential_provider.get_token(TOKEN_CREDENTIAL_SCOPE)
_logger.info("getting username and password from token")
username, password = get_username_password(token)
pool.kwargs.update(
user=username,
password=password,
)
kwargs["reconnect_failed"] = reconnect_failed_wrapper
else:
username, password = get_username_password(azure_conn_info.credentials)
azure_conn_info_kwargs = azure_conn_info.model_dump(
mode="json", exclude_none=True, exclude=set(["credentials"])
)
_logger.debug(
"updating ConnectionPool kwargs with those from: %s",
azure_conn_info_kwargs,
)
kwargs_ = kwargs.get("kwargs", {})
kwargs_.update(user=username, password=password, **azure_conn_info_kwargs)
kwargs["kwargs"] = kwargs_
super().__init__(conninfo, **kwargs)
| AzurePGConnectionPool |
python | PyCQA__pylint | tests/functional/s/super/super_checks.py | {
"start": 627,
"end": 757
} | class ____(NewAaaa):
"""new style"""
def __init__(self):
super().__init__() # <3.0:[missing-super-argument]
| Py3kAaaa |
python | mlflow__mlflow | examples/spark_udf/structs_and_arrays.py | {
"start": 89,
"end": 2122
} | class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return [str(" | ".join(map(str, row))) for _, row in model_input.iterrows()]
def main():
with SparkSession.builder.getOrCreate() as spark:
df = spark.createDataFrame(
[
(
"a",
[0],
{"bool": True},
[{"double": 0.1}],
)
],
schema=T.StructType(
[
T.StructField(
"str",
T.StringType(),
),
T.StructField(
"arr",
T.ArrayType(T.IntegerType()),
),
T.StructField(
"obj",
T.StructType(
[
T.StructField("bool", T.BooleanType()),
]
),
),
T.StructField(
"obj_arr",
T.ArrayType(
T.StructType(
[
T.StructField("double", T.DoubleType()),
]
)
),
),
]
),
)
df.printSchema()
df.show()
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=MyModel(),
signature=mlflow.models.infer_signature(df),
)
udf = mlflow.pyfunc.spark_udf(
spark=spark,
model_uri=model_info.model_uri,
result_type="string",
)
df.withColumn("output", udf("str", "arr", "obj", "obj_arr")).show()
if __name__ == "__main__":
main()
| MyModel |
python | pytorch__pytorch | torch/distributed/tensor/examples/convnext_example.py | {
"start": 1297,
"end": 2515
} | class ____(nn.Module):
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(
dim, dim, kernel_size=7, padding=3, groups=dim
) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6, data_format=torch.contiguous_format)
self.pwconv1 = nn.Conv2d(
dim, 4 * dim, kernel_size=1, stride=1
) # nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Conv2d(
4 * dim, dim, kernel_size=1, stride=1
) # nn.Linear(4 * dim, dim)
self.gamma = (
nn.Parameter(
layer_scale_init_value * torch.ones((dim, 1, 1)), requires_grad=True
)
if layer_scale_init_value > 0
else None
)
self.drop_path = nn.Identity()
def forward(self, x):
input_x = x
x = self.dwconv(x)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * self.drop_path(x)
x = input_x + x
return x
| Block |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 484555,
"end": 485250
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of CancelEnterpriseAdminInvitation"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "invitation", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
invitation = sgqlc.types.Field("EnterpriseAdministratorInvitation", graphql_name="invitation")
"""The invitation that was canceled."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of canceling an administrator
invitation.
"""
| CancelEnterpriseAdminInvitationPayload |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_responses/records/record_builder.py | {
"start": 151,
"end": 394
} | class ____(RecordBuilder):
@staticmethod
def extract_record(resource: str, execution_folder: str, data_field: Path):
return data_field.extract(find_template(resource=resource, execution_folder=execution_folder))
| MondayRecordBuilder |
python | html5lib__html5lib-python | html5lib/_inputstream.py | {
"start": 5687,
"end": 13658
} | class ____(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert ord(c) < 128
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not EOF:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
| HTMLUnicodeInputStream |
python | openai__openai-python | src/openai/types/chat/chat_completion_message.py | {
"start": 774,
"end": 991
} | class ____(BaseModel):
type: Literal["url_citation"]
"""The type of the URL citation. Always `url_citation`."""
url_citation: AnnotationURLCitation
"""A URL citation when using web search."""
| Annotation |
python | uqfoundation__dill | dill/tests/test_recursive.py | {
"start": 2177,
"end": 2469
} | class ____(Machine2):
def __init__(self):
super(SubMachine, self).__init__()
def test_partials():
assert copy(SubMachine(), byref=True)
assert copy(SubMachine(), byref=True, recurse=True)
assert copy(SubMachine(), recurse=True)
assert copy(SubMachine())
| SubMachine |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 30079,
"end": 31019
} | class ____(CharField):
"""Support both IPAddressField and GenericIPAddressField"""
default_error_messages = {
'invalid': _('Enter a valid IPv4 or IPv6 address.'),
}
def __init__(self, protocol='both', **kwargs):
self.protocol = protocol.lower()
self.unpack_ipv4 = (self.protocol == 'both')
super().__init__(**kwargs)
validators = ip_address_validators(protocol, self.unpack_ipv4)
self.validators.extend(validators)
def to_internal_value(self, data):
if not isinstance(data, str):
self.fail('invalid', value=data)
if ':' in data:
try:
if self.protocol in ('both', 'ipv6'):
return clean_ipv6_address(data, self.unpack_ipv4)
except DjangoValidationError:
self.fail('invalid', value=data)
return super().to_internal_value(data)
# Number types...
| IPAddressField |
python | sqlalchemy__sqlalchemy | test/orm/test_of_type.py | {
"start": 14435,
"end": 17456
} | class ____(
_PolymorphicTestBase, _PolymorphicAliasedJoins
):
def _polymorphic_join_target(self, cls):
return (
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.people_company_id"
)
def _test_with_polymorphic_join_exec_contains_eager_two_result(self):
return (
"SELECT pjoin_1.people_person_id AS pjoin_1_people_person_id, "
"pjoin_1.people_company_id AS pjoin_1_people_company_id, "
"pjoin_1.people_name AS pjoin_1_people_name, pjoin_1.people_type "
"AS pjoin_1_people_type, pjoin_1.engineers_person_id AS "
"pjoin_1_engineers_person_id, pjoin_1.engineers_status AS "
"pjoin_1_engineers_status, pjoin_1.engineers_engineer_name "
"AS pjoin_1_engineers_engineer_name, "
"pjoin_1.engineers_primary_language AS "
"pjoin_1_engineers_primary_language, pjoin_1.managers_person_id "
"AS pjoin_1_managers_person_id, pjoin_1.managers_status "
"AS pjoin_1_managers_status, pjoin_1.managers_manager_name "
"AS pjoin_1_managers_manager_name, companies.company_id "
"AS companies_company_id, companies.name AS companies_name "
"FROM companies JOIN (SELECT people.person_id AS "
"people_person_id, people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.person_id AS engineers_person_id, engineers.status "
"AS engineers_status, engineers.engineer_name AS "
"engineers_engineer_name, engineers.primary_language "
"AS engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.people_company_id "
"ORDER BY companies.company_id, pjoin_1.people_person_id"
)
| PolymorphicAliasedJoinsTest |
python | openai__openai-python | src/openai/types/container_list_params.py | {
"start": 209,
"end": 893
} | class ____(TypedDict, total=False):
after: str
"""A cursor for use in pagination.
`after` is an object ID that defines your place in the list. For instance, if
you make a list request and receive 100 objects, ending with obj_foo, your
subsequent call can include after=obj_foo in order to fetch the next page of the
list.
"""
limit: int
"""A limit on the number of objects to be returned.
Limit can range between 1 and 100, and the default is 20.
"""
order: Literal["asc", "desc"]
"""Sort order by the `created_at` timestamp of the objects.
`asc` for ascending order and `desc` for descending order.
"""
| ContainerListParams |
python | walkccc__LeetCode | solutions/624. Maximum Distance in Arrays/624.py | {
"start": 0,
"end": 249
} | class ____:
def maxDistance(self, arrays: list[list[int]]) -> int:
ans = 0
mn = 10000
mx = -10000
for A in arrays:
ans = max(ans, A[-1] - mn, mx - A[0])
mn = min(mn, A[0])
mx = max(mx, A[-1])
return ans
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/oracle_to_gcs.py | {
"start": 1257,
"end": 5869
} | class ____(BaseSQLToGCSOperator):
"""
Copy data from Oracle to Google Cloud Storage in JSON, CSV or Parquet format.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OracleToGCSOperator`
:param oracle_conn_id: Reference to a specific
:ref:`Oracle hook <howto/connection:oracle>`.
:param ensure_utc: Ensure TIMESTAMP columns exported as UTC. If set to
`False`, TIMESTAMP columns will be exported using the Oracle server's
default timezone.
"""
ui_color = "#a0e08c"
type_map = {
oracledb.DB_TYPE_BINARY_DOUBLE: "DECIMAL",
oracledb.DB_TYPE_BINARY_FLOAT: "DECIMAL",
oracledb.DB_TYPE_BINARY_INTEGER: "INTEGER",
oracledb.DB_TYPE_BOOLEAN: "BOOLEAN",
oracledb.DB_TYPE_DATE: "TIMESTAMP",
oracledb.DB_TYPE_NUMBER: "NUMERIC",
oracledb.DB_TYPE_TIMESTAMP: "TIMESTAMP",
oracledb.DB_TYPE_TIMESTAMP_LTZ: "TIMESTAMP",
oracledb.DB_TYPE_TIMESTAMP_TZ: "TIMESTAMP",
}
def __init__(self, *, oracle_conn_id="oracle_default", ensure_utc=False, **kwargs):
super().__init__(**kwargs)
self.ensure_utc = ensure_utc
self.oracle_conn_id = oracle_conn_id
@cached_property
def db_hook(self) -> OracleHook:
return OracleHook(oracle_conn_id=self.oracle_conn_id)
def query(self):
"""Query Oracle and returns a cursor to the results."""
conn = self.db_hook.get_conn()
cursor = conn.cursor()
if self.ensure_utc:
# Ensure TIMESTAMP results are in UTC
tz_query = "SET time_zone = '+00:00'"
self.log.info("Executing: %s", tz_query)
cursor.execute(tz_query)
self.log.info("Executing: %s", self.sql)
cursor.execute(self.sql)
return cursor
def field_to_bigquery(self, field) -> dict[str, str]:
field_type = self.type_map.get(field[1], "STRING")
field_mode = "NULLABLE" if not field[6] or field_type == "TIMESTAMP" else "REQUIRED"
return {
"name": field[0],
"type": field_type,
"mode": field_mode,
}
def convert_type(self, value, schema_type, **kwargs):
"""
Take a value from Oracle db and convert it to a value safe for JSON/Google Cloud Storage/BigQuery.
* Datetimes are converted to UTC seconds.
* Decimals are converted to floats.
* Dates are converted to ISO formatted string if given schema_type is
DATE, or UTC seconds otherwise.
* Binary type fields are converted to integer if given schema_type is
INTEGER, or encoded with base64 otherwise. Imported BYTES data must
be base64-encoded according to BigQuery documentation:
https://cloud.google.com/bigquery/data-types
:param value: Oracle db column value
:param schema_type: BigQuery data type
"""
if value is None:
return value
if isinstance(value, datetime):
value = calendar.timegm(value.timetuple())
elif isinstance(value, timedelta):
value = value.total_seconds()
elif isinstance(value, Decimal):
value = float(value)
elif isinstance(value, date):
if schema_type == "DATE":
value = value.isoformat()
else:
value = calendar.timegm(value.timetuple())
elif isinstance(value, bytes):
if schema_type == "INTEGER":
value = int.from_bytes(value, "big")
else:
value = base64.standard_b64encode(value).decode("ascii")
return value
def get_openlineage_facets_on_start(self) -> OperatorLineage | None:
from airflow.providers.common.compat.openlineage.facet import SQLJobFacet
from airflow.providers.common.compat.openlineage.utils.sql import get_openlineage_facets_with_sql
from airflow.providers.openlineage.extractors import OperatorLineage
sql_parsing_result = get_openlineage_facets_with_sql(
hook=self.db_hook,
sql=self.sql,
conn_id=self.oracle_conn_id,
database=self.db_hook.service_name or self.db_hook.sid,
)
gcs_output_datasets = self._get_openlineage_output_datasets()
if sql_parsing_result:
sql_parsing_result.outputs = gcs_output_datasets
return sql_parsing_result
return OperatorLineage(outputs=gcs_output_datasets, job_facets={"sql": SQLJobFacet(self.sql)})
| OracleToGCSOperator |
python | kamyu104__LeetCode-Solutions | Python/maximum-product-of-three-elements-after-one-replacement.py | {
"start": 38,
"end": 388
} | class ____(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
L = 2
top = [0]*L
for x in nums:
x = abs(x)
for i in xrange(L):
if x > top[i]:
x, top[i] = top[i], x
return top[0]*top[1]*10**5
| Solution |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 11811,
"end": 12168
} | class ____(GroupType):
type_id = 1006
slug = "performance_n_plus_one_db_queries"
description = "N+1 Query"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.DB_QUERY.value
noise_config = NoiseConfig()
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| PerformanceNPlusOneGroupType |
python | django__django | tests/admin_views/admin.py | {
"start": 27778,
"end": 27916
} | class ____(admin.ModelAdmin):
form = FormWithVisibleAndHiddenField
fieldsets = EmptyModelVisibleAdmin.fieldsets
| EmptyModelMixinAdmin |
python | scrapy__scrapy | scrapy/settings/__init__.py | {
"start": 2446,
"end": 23760
} | class ____(MutableMapping[_SettingsKey, Any]):
"""
Instances of this class behave like dictionaries, but store priorities
along with their ``(key, value)`` pairs, and can be frozen (i.e. marked
immutable).
Key-value entries can be passed on initialization with the ``values``
argument, and they would take the ``priority`` level (unless ``values`` is
already an instance of :class:`~scrapy.settings.BaseSettings`, in which
case the existing priority levels will be kept). If the ``priority``
argument is a string, the priority name will be looked up in
:attr:`~scrapy.settings.SETTINGS_PRIORITIES`. Otherwise, a specific integer
should be provided.
Once the object is created, new settings can be loaded or updated with the
:meth:`~scrapy.settings.BaseSettings.set` method, and can be accessed with
the square bracket notation of dictionaries, or with the
:meth:`~scrapy.settings.BaseSettings.get` method of the instance and its
value conversion variants. When requesting a stored key, the value with the
highest priority will be retrieved.
"""
__default = object()
def __init__(self, values: _SettingsInput = None, priority: int | str = "project"):
self.frozen: bool = False
self.attributes: dict[_SettingsKey, SettingsAttribute] = {}
if values:
self.update(values, priority)
def __getitem__(self, opt_name: _SettingsKey) -> Any:
if opt_name not in self:
return None
return self.attributes[opt_name].value
def __contains__(self, name: Any) -> bool:
return name in self.attributes
def add_to_list(self, name: _SettingsKey, item: Any) -> None:
"""Append *item* to the :class:`list` setting with the specified *name*
if *item* is not already in that list.
This change is applied regardless of the priority of the *name*
setting. The setting priority is not affected by this change either.
"""
value: list[str] = self.getlist(name)
if item not in value:
self.set(name, [*value, item], self.getpriority(name) or 0)
def remove_from_list(self, name: _SettingsKey, item: Any) -> None:
"""Remove *item* from the :class:`list` setting with the specified
*name*.
If *item* is missing, raise :exc:`ValueError`.
This change is applied regardless of the priority of the *name*
setting. The setting priority is not affected by this change either.
"""
value: list[str] = self.getlist(name)
if item not in value:
raise ValueError(f"{item!r} not found in the {name} setting ({value!r}).")
self.set(name, [v for v in value if v != item], self.getpriority(name) or 0)
def get(self, name: _SettingsKey, default: Any = None) -> Any:
"""
Get a setting value without affecting its original type.
:param name: the setting name
:type name: str
:param default: the value to return if no setting is found
:type default: object
"""
if name == "CONCURRENT_REQUESTS_PER_IP" and (
isinstance(self[name], int) and self[name] != 0
):
warnings.warn(
"The CONCURRENT_REQUESTS_PER_IP setting is deprecated, use CONCURRENT_REQUESTS_PER_DOMAIN instead.",
ScrapyDeprecationWarning,
stacklevel=2,
)
return self[name] if self[name] is not None else default
def getbool(self, name: _SettingsKey, default: bool = False) -> bool:
"""
Get a setting value as a boolean.
``1``, ``'1'``, `True`` and ``'True'`` return ``True``,
while ``0``, ``'0'``, ``False``, ``'False'`` and ``None`` return ``False``.
For example, settings populated through environment variables set to
``'0'`` will return ``False`` when using this method.
:param name: the setting name
:type name: str
:param default: the value to return if no setting is found
:type default: object
"""
got = self.get(name, default)
try:
return bool(int(got))
except ValueError:
if got in ("True", "true"):
return True
if got in ("False", "false"):
return False
raise ValueError(
"Supported values for boolean settings "
"are 0/1, True/False, '0'/'1', "
"'True'/'False' and 'true'/'false'"
)
def getint(self, name: _SettingsKey, default: int = 0) -> int:
"""
Get a setting value as an int.
:param name: the setting name
:type name: str
:param default: the value to return if no setting is found
:type default: object
"""
return int(self.get(name, default))
def getfloat(self, name: _SettingsKey, default: float = 0.0) -> float:
"""
Get a setting value as a float.
:param name: the setting name
:type name: str
:param default: the value to return if no setting is found
:type default: object
"""
return float(self.get(name, default))
def getlist(
self, name: _SettingsKey, default: list[Any] | None = None
) -> list[Any]:
"""
Get a setting value as a list. If the setting original type is a list,
a copy of it will be returned. If it's a string it will be split by
",". If it is an empty string, an empty list will be returned.
For example, settings populated through environment variables set to
``'one,two'`` will return a list ['one', 'two'] when using this method.
:param name: the setting name
:type name: str
:param default: the value to return if no setting is found
:type default: object
"""
value = self.get(name, default or [])
if not value:
return []
if isinstance(value, str):
value = value.split(",")
return list(value)
def getdict(
self, name: _SettingsKey, default: dict[Any, Any] | None = None
) -> dict[Any, Any]:
"""
Get a setting value as a dictionary. If the setting original type is a
dictionary, a copy of it will be returned. If it is a string it will be
evaluated as a JSON dictionary. In the case that it is a
:class:`~scrapy.settings.BaseSettings` instance itself, it will be
converted to a dictionary, containing all its current settings values
as they would be returned by :meth:`~scrapy.settings.BaseSettings.get`,
and losing all information about priority and mutability.
:param name: the setting name
:type name: str
:param default: the value to return if no setting is found
:type default: object
"""
value = self.get(name, default or {})
if isinstance(value, str):
value = json.loads(value)
return dict(value)
def getdictorlist(
self,
name: _SettingsKey,
default: dict[Any, Any] | list[Any] | tuple[Any] | None = None,
) -> dict[Any, Any] | list[Any]:
"""Get a setting value as either a :class:`dict` or a :class:`list`.
If the setting is already a dict or a list, a copy of it will be
returned.
If it is a string it will be evaluated as JSON, or as a comma-separated
list of strings as a fallback.
For example, settings populated from the command line will return:
- ``{'key1': 'value1', 'key2': 'value2'}`` if set to
``'{"key1": "value1", "key2": "value2"}'``
- ``['one', 'two']`` if set to ``'["one", "two"]'`` or ``'one,two'``
:param name: the setting name
:type name: string
:param default: the value to return if no setting is found
:type default: any
"""
value = self.get(name, default)
if value is None:
return {}
if isinstance(value, str):
try:
value_loaded = json.loads(value)
if not isinstance(value_loaded, (dict, list)):
raise ValueError(
f"JSON string for setting '{name}' must evaluate to a dict or list, "
f"got {type(value_loaded).__name__}: {value_loaded!r}"
)
return value_loaded
except ValueError:
return value.split(",")
if isinstance(value, tuple):
return list(value)
if not isinstance(value, (dict, list)):
raise ValueError(
f"Setting '{name}' must be a dict, list, tuple, or string, "
f"got {type(value).__name__}: {value!r}"
)
return copy.deepcopy(value)
def getwithbase(self, name: _SettingsKey) -> BaseSettings:
"""Get a composition of a dictionary-like setting and its `_BASE`
counterpart.
:param name: name of the dictionary-like setting
:type name: str
"""
if not isinstance(name, str):
raise ValueError(f"Base setting key must be a string, got {name}")
compbs = BaseSettings()
compbs.update(self[name + "_BASE"])
compbs.update(self[name])
return compbs
def getpriority(self, name: _SettingsKey) -> int | None:
"""
Return the current numerical priority value of a setting, or ``None`` if
the given ``name`` does not exist.
:param name: the setting name
:type name: str
"""
if name not in self:
return None
return self.attributes[name].priority
def maxpriority(self) -> int:
"""
Return the numerical value of the highest priority present throughout
all settings, or the numerical value for ``default`` from
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` if there are no settings
stored.
"""
if len(self) > 0:
return max(cast("int", self.getpriority(name)) for name in self)
return get_settings_priority("default")
def replace_in_component_priority_dict(
self,
name: _SettingsKey,
old_cls: type,
new_cls: type,
priority: int | None = None,
) -> None:
"""Replace *old_cls* with *new_cls* in the *name* :ref:`component
priority dictionary <component-priority-dictionaries>`.
If *old_cls* is missing, or has :data:`None` as value, :exc:`KeyError`
is raised.
If *old_cls* was present as an import string, even more than once,
those keys are dropped and replaced by *new_cls*.
If *priority* is specified, that is the value assigned to *new_cls* in
the component priority dictionary. Otherwise, the value of *old_cls* is
used. If *old_cls* was present multiple times (possible with import
strings) with different values, the value assigned to *new_cls* is one
of them, with no guarantee about which one it is.
This change is applied regardless of the priority of the *name*
setting. The setting priority is not affected by this change either.
"""
component_priority_dict = self.getdict(name)
old_priority = None
for cls_or_path in tuple(component_priority_dict):
if load_object(cls_or_path) != old_cls:
continue
if (old_priority := component_priority_dict.pop(cls_or_path)) is None:
break
if old_priority is None:
raise KeyError(
f"{old_cls} not found in the {name} setting ({component_priority_dict!r})."
)
component_priority_dict[new_cls] = (
old_priority if priority is None else priority
)
self.set(name, component_priority_dict, priority=self.getpriority(name) or 0)
def __setitem__(self, name: _SettingsKey, value: Any) -> None:
self.set(name, value)
def set(
self, name: _SettingsKey, value: Any, priority: int | str = "project"
) -> None:
"""
Store a key/value attribute with a given priority.
Settings should be populated *before* configuring the Crawler object
(through the :meth:`~scrapy.crawler.Crawler.configure` method),
otherwise they won't have any effect.
:param name: the setting name
:type name: str
:param value: the value to associate with the setting
:type value: object
:param priority: the priority of the setting. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: str or int
"""
self._assert_mutability()
priority = get_settings_priority(priority)
if name not in self:
if isinstance(value, SettingsAttribute):
self.attributes[name] = value
else:
self.attributes[name] = SettingsAttribute(value, priority)
else:
self.attributes[name].set(value, priority)
def set_in_component_priority_dict(
self, name: _SettingsKey, cls: type, priority: int | None
) -> None:
"""Set the *cls* component in the *name* :ref:`component priority
dictionary <component-priority-dictionaries>` setting with *priority*.
If *cls* already exists, its value is updated.
If *cls* was present as an import string, even more than once, those
keys are dropped and replaced by *cls*.
This change is applied regardless of the priority of the *name*
setting. The setting priority is not affected by this change either.
"""
component_priority_dict = self.getdict(name)
for cls_or_path in tuple(component_priority_dict):
if not isinstance(cls_or_path, str):
continue
_cls = load_object(cls_or_path)
if _cls == cls:
del component_priority_dict[cls_or_path]
component_priority_dict[cls] = priority
self.set(name, component_priority_dict, self.getpriority(name) or 0)
def setdefault(
self,
name: _SettingsKey,
default: Any = None,
priority: int | str = "project",
) -> Any:
if name not in self:
self.set(name, default, priority)
return default
return self.attributes[name].value
def setdefault_in_component_priority_dict(
self, name: _SettingsKey, cls: type, priority: int | None
) -> None:
"""Set the *cls* component in the *name* :ref:`component priority
dictionary <component-priority-dictionaries>` setting with *priority*
if not already defined (even as an import string).
If *cls* is not already defined, it is set regardless of the priority
of the *name* setting. The setting priority is not affected by this
change either.
"""
component_priority_dict = self.getdict(name)
for cls_or_path in tuple(component_priority_dict):
if load_object(cls_or_path) == cls:
return
component_priority_dict[cls] = priority
self.set(name, component_priority_dict, self.getpriority(name) or 0)
def setdict(self, values: _SettingsInput, priority: int | str = "project") -> None:
self.update(values, priority)
def setmodule(
self, module: ModuleType | str, priority: int | str = "project"
) -> None:
"""
Store settings from a module with a given priority.
This is a helper function that calls
:meth:`~scrapy.settings.BaseSettings.set` for every globally declared
uppercase variable of ``module`` with the provided ``priority``.
:param module: the module or the path of the module
:type module: types.ModuleType or str
:param priority: the priority of the settings. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: str or int
"""
self._assert_mutability()
if isinstance(module, str):
module = import_module(module)
for key in dir(module):
if key.isupper():
self.set(key, getattr(module, key), priority)
# BaseSettings.update() doesn't support all inputs that MutableMapping.update() supports
def update(self, values: _SettingsInput, priority: int | str = "project") -> None: # type: ignore[override]
"""
Store key/value pairs with a given priority.
This is a helper function that calls
:meth:`~scrapy.settings.BaseSettings.set` for every item of ``values``
with the provided ``priority``.
If ``values`` is a string, it is assumed to be JSON-encoded and parsed
into a dict with ``json.loads()`` first. If it is a
:class:`~scrapy.settings.BaseSettings` instance, the per-key priorities
will be used and the ``priority`` parameter ignored. This allows
inserting/updating settings with different priorities with a single
command.
:param values: the settings names and values
:type values: dict or string or :class:`~scrapy.settings.BaseSettings`
:param priority: the priority of the settings. Should be a key of
:attr:`~scrapy.settings.SETTINGS_PRIORITIES` or an integer
:type priority: str or int
"""
self._assert_mutability()
if isinstance(values, str):
values = cast("dict[_SettingsKey, Any]", json.loads(values))
if values is not None:
if isinstance(values, BaseSettings):
for name, value in values.items():
self.set(name, value, cast("int", values.getpriority(name)))
else:
for name, value in values.items():
self.set(name, value, priority)
def delete(self, name: _SettingsKey, priority: int | str = "project") -> None:
if name not in self:
raise KeyError(name)
self._assert_mutability()
priority = get_settings_priority(priority)
if priority >= cast("int", self.getpriority(name)):
del self.attributes[name]
def __delitem__(self, name: _SettingsKey) -> None:
self._assert_mutability()
del self.attributes[name]
def _assert_mutability(self) -> None:
if self.frozen:
raise TypeError("Trying to modify an immutable Settings object")
def copy(self) -> Self:
"""
Make a deep copy of current settings.
This method returns a new instance of the :class:`Settings` class,
populated with the same values and their priorities.
Modifications to the new object won't be reflected on the original
settings.
"""
return copy.deepcopy(self)
def freeze(self) -> None:
"""
Disable further changes to the current settings.
After calling this method, the present state of the settings will become
immutable. Trying to change values through the :meth:`~set` method and
its variants won't be possible and will be alerted.
"""
self.frozen = True
def frozencopy(self) -> Self:
"""
Return an immutable copy of the current settings.
Alias for a :meth:`~freeze` call in the object returned by :meth:`copy`.
"""
copy = self.copy()
copy.freeze()
return copy
def __iter__(self) -> Iterator[_SettingsKey]:
return iter(self.attributes)
def __len__(self) -> int:
return len(self.attributes)
def _to_dict(self) -> dict[_SettingsKey, Any]:
return {
self._get_key(k): (v._to_dict() if isinstance(v, BaseSettings) else v)
for k, v in self.items()
}
def _get_key(self, key_value: Any) -> _SettingsKey:
return (
key_value
if isinstance(key_value, (bool, float, int, str, type(None)))
else str(key_value)
)
def copy_to_dict(self) -> dict[_SettingsKey, Any]:
"""
Make a copy of current settings and convert to a dict.
This method returns a new dict populated with the same values
and their priorities as the current settings.
Modifications to the returned dict won't be reflected on the original
settings.
This method can be useful for example for printing settings
in Scrapy shell.
"""
settings = self.copy()
return settings._to_dict()
# https://ipython.readthedocs.io/en/stable/config/integrating.html#pretty-printing
def _repr_pretty_(self, p: Any, cycle: bool) -> None:
if cycle:
p.text(repr(self))
else:
p.text(pformat(self.copy_to_dict()))
def pop(self, name: _SettingsKey, default: Any = __default) -> Any:
try:
value = self.attributes[name].value
except KeyError:
if default is self.__default:
raise
return default
self.__delitem__(name)
return value
| BaseSettings |
python | chroma-core__chroma | sample_apps/generative_benchmarking/functions/types.py | {
"start": 325,
"end": 400
} | class ____:
doc_scores: Dict[str, QueryResultItem]
@dataclass
| QueryResults |
python | crytic__slither | slither/tools/properties/__main__.py | {
"start": 1513,
"end": 4343
} | class ____(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(
self, parser: Any, *args: Any, **kwargs: Any
) -> None: # pylint: disable=signature-differs
logger.info(_all_properties())
parser.exit()
def parse_args() -> argparse.Namespace:
"""
Parse the underlying arguments for the program.
:return: Returns the arguments for the program.
"""
parser = argparse.ArgumentParser(
description="Generates code properties (e.g., invariants) that can be tested with unit tests or Echidna, entirely automatically.",
usage="slither-prop filename",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"filename", help="The filename of the contract or project directory to analyze."
)
parser.add_argument("--contract", help="The targeted contract.")
parser.add_argument(
"--scenario",
help="Test a specific scenario. Use --list-scenarios to see the available scenarios. Default Transferable",
default="Transferable",
)
parser.add_argument(
"--list-scenarios",
help="List available scenarios",
action=ListScenarios,
nargs=0,
default=False,
)
parser.add_argument(
"--list-properties",
help="List available properties",
action=ListProperties,
nargs=0,
default=False,
)
parser.add_argument(
"--address-owner", help=f"Owner address. Default {OWNER_ADDRESS}", default=None
)
parser.add_argument(
"--address-user", help=f"Owner address. Default {USER_ADDRESS}", default=None
)
parser.add_argument(
"--address-attacker",
help=f"Attacker address. Default {ATTACKER_ADDRESS}",
default=None,
)
# Add default arguments from crytic-compile
cryticparser.init(parser)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
return parser.parse_args()
def main() -> None:
args = parse_args()
# Perform slither analysis on the given filename
slither = Slither(args.filename, **vars(args))
contracts = slither.get_contract_from_name(args.contract)
if len(contracts) != 1:
if len(slither.contracts) == 1:
contract = slither.contracts[0]
else:
if args.contract is None:
to_log = "Specify the target: --contract ContractName"
else:
to_log = f"{args.contract} not found"
logger.error(to_log)
return
else:
contract = contracts[0]
addresses = Addresses(args.address_owner, args.address_user, args.address_attacker)
generate_erc20(contract, args.scenario, addresses)
if __name__ == "__main__":
main()
| ListProperties |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 6448,
"end": 6741
} | class ____(Exception):
"With multi=True code must return a Python dictionary"
def __init__(self, values):
self.values = values
_COUNTS_TABLE_CREATE_SQL = """
CREATE TABLE IF NOT EXISTS "{}"(
"table" TEXT PRIMARY KEY,
count INTEGER DEFAULT 0
);
""".strip()
| BadMultiValues |
python | allegroai__clearml | clearml/backend_api/services/v2_20/queues.py | {
"start": 72321,
"end": 73620
} | class ____(Response):
"""
Response of queues.move_task_backward endpoint.
:param position: The new position of the task entry in the queue (index, -1 represents bottom of queue)
:type position: int
"""
_service = "queues"
_action = "move_task_backward"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"position": {
"description": "The new position of the task entry in the queue (index, -1 represents bottom of queue)",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, position: Optional[int] = None, **kwargs: Any) -> None:
super(MoveTaskBackwardResponse, self).__init__(**kwargs)
self.position = position
@schema_property("position")
def position(self) -> Optional[int]:
return self._property_position
@position.setter
def position(self, value: Optional[int]) -> None:
if value is None:
self._property_position = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "position", six.integer_types)
self._property_position = value
| MoveTaskBackwardResponse |
python | networkx__networkx | networkx/algorithms/approximation/tests/test_treewidth.py | {
"start": 5447,
"end": 8868
} | class ____:
"""Unit tests for the treewidth_min_fill_in function."""
@classmethod
def setup_class(cls):
"""Setup for different kinds of trees"""
cls.complete = nx.Graph()
cls.complete.add_edge(1, 2)
cls.complete.add_edge(2, 3)
cls.complete.add_edge(1, 3)
cls.small_tree = nx.Graph()
cls.small_tree.add_edge(1, 2)
cls.small_tree.add_edge(2, 3)
cls.small_tree.add_edge(3, 4)
cls.small_tree.add_edge(1, 4)
cls.small_tree.add_edge(2, 4)
cls.small_tree.add_edge(4, 5)
cls.small_tree.add_edge(5, 6)
cls.small_tree.add_edge(5, 7)
cls.small_tree.add_edge(6, 7)
cls.deterministic_graph = nx.Graph()
cls.deterministic_graph.add_edge(1, 2)
cls.deterministic_graph.add_edge(1, 3)
cls.deterministic_graph.add_edge(3, 4)
cls.deterministic_graph.add_edge(2, 4)
cls.deterministic_graph.add_edge(3, 5)
cls.deterministic_graph.add_edge(4, 5)
cls.deterministic_graph.add_edge(3, 6)
cls.deterministic_graph.add_edge(5, 6)
def test_petersen_graph(self):
"""Test Petersen graph tree decomposition result"""
G = nx.petersen_graph()
_, decomp = treewidth_min_fill_in(G)
is_tree_decomp(G, decomp)
def test_small_tree_treewidth(self):
"""Test if the computed treewidth of the known self.small_tree is 2"""
G = self.small_tree
# the order of removal should be [1,2,4]3[5,6,7]
# (with [] denoting any order of the containing nodes)
# resulting in treewidth 2 for the heuristic
treewidth, _ = treewidth_min_fill_in(G)
assert treewidth == 2
def test_heuristic_abort(self):
"""Test if min_fill_in returns None for fully connected graph"""
graph = {}
for u in self.complete:
graph[u] = set()
for v in self.complete[u]:
if u != v: # ignore self-loop
graph[u].add(v)
next_node = min_fill_in_heuristic(graph)
if next_node is None:
pass
else:
assert False
def test_empty_graph(self):
"""Test empty graph"""
G = nx.Graph()
_, _ = treewidth_min_fill_in(G)
def test_two_component_graph(self):
G = nx.Graph()
G.add_node(1)
G.add_node(2)
treewidth, _ = treewidth_min_fill_in(G)
assert treewidth == 0
def test_not_sortable_nodes(self):
G = nx.Graph([(0, "a")])
treewidth_min_fill_in(G)
def test_heuristic_first_steps(self):
"""Test first steps of min_fill_in heuristic"""
graph = {
n: set(self.deterministic_graph[n]) - {n} for n in self.deterministic_graph
}
elim_node = min_fill_in_heuristic(graph)
steps = []
while elim_node is not None:
steps.append(elim_node)
nbrs = graph[elim_node]
for u, v in itertools.permutations(nbrs, 2):
if v not in graph[u]:
graph[u].add(v)
for u in graph:
if elim_node in graph[u]:
graph[u].remove(elim_node)
del graph[elim_node]
elim_node = min_fill_in_heuristic(graph)
# check only the first 2 elements for equality
assert steps[:2] == [6, 5]
| TestTreewidthMinFillIn |
python | walkccc__LeetCode | solutions/3507. Minimum Pair Removal to Sort Array I/3507.py | {
"start": 0,
"end": 389
} | class ____:
def minimumPairRemoval(self, nums: list[int]) -> int:
ans = 0
while any(x > y for x, y in itertools.pairwise(nums)):
pairSums = [x + y for x, y in itertools.pairwise(nums)]
minPairSum = min(pairSums)
minPairIndex = pairSums.index(minPairSum)
nums[minPairIndex] = minPairSum
nums.pop(minPairIndex + 1)
ans += 1
return ans
| Solution |
python | django__django | tests/model_forms/tests.py | {
"start": 132891,
"end": 132973
} | class ____(forms.ModelForm, metaclass=CustomMetaclass):
pass
| CustomMetaclassForm |
python | urllib3__urllib3 | src/urllib3/response.py | {
"start": 15703,
"end": 44048
} | class ____(BaseHTTPResponse):
"""
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
def __init__(
self,
body: _TYPE_BODY = "",
headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,
status: int = 0,
version: int = 0,
version_string: str = "HTTP/?",
reason: str | None = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: _HttplibHTTPResponse | None = None,
pool: HTTPConnectionPool | None = None,
connection: HTTPConnection | None = None,
msg: _HttplibHTTPMessage | None = None,
retries: Retry | None = None,
enforce_content_length: bool = True,
request_method: str | None = None,
request_url: str | None = None,
auto_close: bool = True,
sock_shutdown: typing.Callable[[int], None] | None = None,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
version_string=version_string,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: _HttplibHTTPResponse | None = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
self._sock_shutdown = sock_shutdown
# Are we using the chunked-style of transfer encoding?
self.chunk_left: int | None = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# Used to return the correct amount of bytes for partial read()s
self._decoded_buffer = BytesQueueBuffer()
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> HTTPConnection | None:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
if bytes are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method: str | None) -> int | None:
"""
Set initial length value for Response content if available.
"""
length: int | None
content_length: str | None = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> typing.Generator[None]:
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except IncompleteRead as e:
if (
e.expected is not None
and e.partial is not None
and e.expected == -e.partial
):
arg = "Response may not contain content."
else:
arg = f"Connection broken: {e!r}"
raise ProtocolError(arg, e) from e
except (HTTPException, OSError) as e:
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def _fp_read(
self,
amt: int | None = None,
*,
read1: bool = False,
) -> bytes:
"""
Read a response with the thought that reading the number of bytes
larger than can fit in a 32-bit int at a time via SSL in some
known cases leads to an overflow error that has to be prevented
if `amt` or `self.length_remaining` indicate that a problem may
happen.
The known cases:
* CPython < 3.9.7 because of a bug
https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
* urllib3 injected with pyOpenSSL-backed SSL-support.
* CPython < 3.10 only when `amt` does not fit 32-bit int.
"""
assert self._fp
c_int_max = 2**31 - 1
if (
(amt and amt > c_int_max)
or (
amt is None
and self.length_remaining
and self.length_remaining > c_int_max
)
) and (util.IS_PYOPENSSL or sys.version_info < (3, 10)):
if read1:
return self._fp.read1(c_int_max)
buffer = io.BytesIO()
# Besides `max_chunk_amt` being a maximum chunk size, it
# affects memory overhead of reading a response by this
# method in CPython.
# `c_int_max` equal to 2 GiB - 1 byte is the actual maximum
# chunk size that does not lead to an overflow error, but
# 256 MiB is a compromise.
max_chunk_amt = 2**28
while amt is None or amt != 0:
if amt is not None:
chunk_amt = min(amt, max_chunk_amt)
amt -= chunk_amt
else:
chunk_amt = max_chunk_amt
data = self._fp.read(chunk_amt)
if not data:
break
buffer.write(data)
del data # to reduce peak memory usage by `max_chunk_amt`.
return buffer.getvalue()
elif read1:
return self._fp.read1(amt) if amt is not None else self._fp.read1()
else:
# StringIO doesn't like amt=None
return self._fp.read(amt) if amt is not None else self._fp.read()
def _raw_read(
self,
amt: int | None = None,
*,
read1: bool = False,
) -> bytes:
"""
Reads `amt` of bytes from the socket.
"""
if self._fp is None:
return None # type: ignore[return-value]
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
data = self._fp_read(amt, read1=read1) if not fp_closed else b""
if amt is not None and amt != 0 and not data:
# Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
elif read1 and (
(amt != 0 and not data) or self.length_remaining == len(data)
):
# All data has been read, but `self._fp.read1` in
# CPython 3.12 and older doesn't always close
# `http.client.HTTPResponse`, so we close it here.
# See https://github.com/python/cpython/issues/113199
self._fp.close()
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
return data
def read(
self,
amt: int | None = None,
decode_content: bool | None = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if amt and amt < 0:
# Negative numbers and `None` should be treated the same.
amt = None
elif amt is not None:
cache_content = False
if len(self._decoded_buffer) >= amt:
return self._decoded_buffer.get(amt)
data = self._raw_read(amt)
flush_decoder = amt is None or (amt != 0 and not data)
if not data and len(self._decoded_buffer) == 0:
return data
if amt is None:
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
else:
# do not waste memory on buffer when not decoding
if not decode_content:
if self._has_decoded_content:
raise RuntimeError(
"Calling read(decode_content=False) is not supported after "
"read(decode_content=True) was called."
)
return data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
while len(self._decoded_buffer) < amt and data:
# TODO make sure to initially read enough data to get past the headers
# For example, the GZ file header takes 10 bytes, we don't want to read
# it one byte at a time
data = self._raw_read(amt)
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
data = self._decoded_buffer.get(amt)
return data
def read1(
self,
amt: int | None = None,
decode_content: bool | None = None,
) -> bytes:
"""
Similar to ``http.client.HTTPResponse.read1`` and documented
in :meth:`io.BufferedReader.read1`, but with an additional parameter:
``decode_content``.
:param amt:
How much of the content to read.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if decode_content is None:
decode_content = self.decode_content
if amt and amt < 0:
# Negative numbers and `None` should be treated the same.
amt = None
# try and respond without going to the network
if self._has_decoded_content:
if not decode_content:
raise RuntimeError(
"Calling read1(decode_content=False) is not supported after "
"read1(decode_content=True) was called."
)
if len(self._decoded_buffer) > 0:
if amt is None:
return self._decoded_buffer.get_all()
return self._decoded_buffer.get(amt)
if amt == 0:
return b""
# FIXME, this method's type doesn't say returning None is possible
data = self._raw_read(amt, read1=True)
if not decode_content or data is None:
return data
self._init_decoder()
while True:
flush_decoder = not data
decoded_data = self._decode(data, decode_content, flush_decoder)
self._decoded_buffer.put(decoded_data)
if decoded_data or flush_decoder:
break
data = self._raw_read(8192, read1=True)
if amt is None:
return self._decoded_buffer.get_all()
return self._decoded_buffer.get(amt)
def stream(
self, amt: int | None = 2**16, decode_content: bool | None = None
) -> typing.Generator[bytes]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp) or len(self._decoded_buffer) > 0:
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
# Overrides from io.IOBase
def readable(self) -> bool:
return True
def shutdown(self) -> None:
if not self._sock_shutdown:
raise ValueError("Cannot shutdown socket as self._sock_shutdown is not set")
if self._connection is None:
raise RuntimeError(
"Cannot shutdown as connection has already been released to the pool"
)
self._sock_shutdown(socket.SHUT_RD)
def close(self) -> None:
self._sock_shutdown = None
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
self.close()
if line:
# Invalid chunked protocol response, abort.
raise InvalidChunkLength(self, line) from None
else:
# Truncated at start of next chunk
raise ProtocolError("Response ended prematurely") from None
def _handle_chunk(self, amt: int | None) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: int | None = None, decode_content: bool | None = None
) -> typing.Generator[bytes]:
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None: # type: ignore[union-attr]
return None
if amt and amt < 0:
# Negative numbers and `None` should be treated the same,
# but httplib handles only `None` correctly.
amt = None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b"\r\n":
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
@property
def url(self) -> str | None:
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
return self._request_url
@url.setter
def url(self, url: str | None) -> None:
self._request_url = url
def __iter__(self) -> typing.Iterator[bytes]:
buffer: list[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
| HTTPResponse |
python | PyCQA__flake8 | tests/integration/subdir/aplugin.py | {
"start": 115,
"end": 326
} | class ____:
"""Extension test plugin in its own directory."""
def __init__(self, tree):
"""Construct an instance of test plugin."""
def run(self):
"""Do nothing."""
| ExtensionTestPlugin2 |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 10384,
"end": 13590
} | class ____(nn.Module):
"""
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by TestDetrFrozenBatchNorm2d as defined above.
"""
def __init__(self, config):
super().__init__()
self.config = config
# For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API
if config.use_timm_backbone:
# We default to values which were previously hard-coded. This enables configurability from the config
# using backbone arguments, while keeping the default behavior the same.
requires_backends(self, ["timm"])
kwargs = getattr(config, "backbone_kwargs", {})
kwargs = {} if kwargs is None else kwargs.copy()
out_indices = kwargs.pop("out_indices", (2, 3, 4) if config.num_feature_levels > 1 else (4,))
num_channels = kwargs.pop("in_chans", config.num_channels)
if config.dilation:
kwargs["output_stride"] = kwargs.get("output_stride", 16)
backbone = create_model(
config.backbone,
pretrained=config.use_pretrained_backbone,
features_only=True,
out_indices=out_indices,
in_chans=num_channels,
**kwargs,
)
else:
backbone = load_backbone(config)
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = (
self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
)
backbone_model_type = None
if config.backbone is not None:
backbone_model_type = config.backbone
elif config.backbone_config is not None:
backbone_model_type = config.backbone_config.model_type
else:
raise ValueError("Either `backbone` or `backbone_config` should be provided in the config")
if "resnet" in backbone_model_type:
for name, parameter in self.model.named_parameters():
if config.use_timm_backbone:
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
parameter.requires_grad_(False)
else:
if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
parameter.requires_grad_(False)
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
| TestDetrConvEncoder |
python | ray-project__ray | python/ray/dashboard/memory_utils.py | {
"start": 7235,
"end": 18614
} | class ____:
def __init__(
self,
entries: List[MemoryTableEntry],
group_by_type: GroupByType = GroupByType.NODE_ADDRESS,
sort_by_type: SortingType = SortingType.PID,
):
self.table = entries
# Group is a list of memory tables grouped by a group key.
self.group = {}
self.summary = defaultdict(int)
# NOTE YOU MUST SORT TABLE BEFORE GROUPING.
# self._group_by(..)._sort_by(..) != self._sort_by(..)._group_by(..)
if group_by_type and sort_by_type:
self.setup(group_by_type, sort_by_type)
elif group_by_type:
self._group_by(group_by_type)
elif sort_by_type:
self._sort_by(sort_by_type)
def setup(self, group_by_type: GroupByType, sort_by_type: SortingType):
"""Setup memory table.
This will sort entries first and group them after.
Sort order will be still kept.
"""
self._sort_by(sort_by_type)._group_by(group_by_type)
for group_memory_table in self.group.values():
group_memory_table.summarize()
self.summarize()
return self
def insert_entry(self, entry: MemoryTableEntry):
self.table.append(entry)
def summarize(self):
# Reset summary.
total_object_size = 0
total_local_ref_count = 0
total_pinned_in_memory = 0
total_used_by_pending_task = 0
total_captured_in_objects = 0
total_actor_handles = 0
for entry in self.table:
if entry.object_size > 0:
total_object_size += entry.object_size
if entry.reference_type == ReferenceType.LOCAL_REFERENCE.value:
total_local_ref_count += 1
elif entry.reference_type == ReferenceType.PINNED_IN_MEMORY.value:
total_pinned_in_memory += 1
elif entry.reference_type == ReferenceType.USED_BY_PENDING_TASK.value:
total_used_by_pending_task += 1
elif entry.reference_type == ReferenceType.CAPTURED_IN_OBJECT.value:
total_captured_in_objects += 1
elif entry.reference_type == ReferenceType.ACTOR_HANDLE.value:
total_actor_handles += 1
self.summary = {
"total_object_size": total_object_size,
"total_local_ref_count": total_local_ref_count,
"total_pinned_in_memory": total_pinned_in_memory,
"total_used_by_pending_task": total_used_by_pending_task,
"total_captured_in_objects": total_captured_in_objects,
"total_actor_handles": total_actor_handles,
}
return self
def _sort_by(self, sorting_type: SortingType):
if sorting_type == SortingType.PID:
self.table.sort(key=lambda entry: entry.pid)
elif sorting_type == SortingType.OBJECT_SIZE:
self.table.sort(key=lambda entry: entry.object_size)
elif sorting_type == SortingType.REFERENCE_TYPE:
self.table.sort(key=lambda entry: entry.reference_type)
else:
raise ValueError(f"Give sorting type: {sorting_type} is invalid.")
return self
def _group_by(self, group_by_type: GroupByType):
"""Group entries and summarize the result.
NOTE: Each group is another MemoryTable.
"""
# Reset group
self.group = {}
# Build entries per group.
group = defaultdict(list)
for entry in self.table:
group[entry.group_key(group_by_type)].append(entry)
# Build a group table.
for group_key, entries in group.items():
self.group[group_key] = MemoryTable(
entries, group_by_type=None, sort_by_type=None
)
for group_key, group_memory_table in self.group.items():
group_memory_table.summarize()
return self
def as_dict(self):
return {
"summary": self.summary,
"group": {
group_key: {
"entries": group_memory_table.get_entries(),
"summary": group_memory_table.summary,
}
for group_key, group_memory_table in self.group.items()
},
}
def get_entries(self) -> List[dict]:
return [entry.as_dict() for entry in self.table]
def __repr__(self):
return str(self.as_dict())
def __str__(self):
return self.__repr__()
def construct_memory_table(
workers_stats: List,
group_by: GroupByType = GroupByType.NODE_ADDRESS,
sort_by=SortingType.OBJECT_SIZE,
) -> MemoryTable:
memory_table_entries = []
for core_worker_stats in workers_stats:
pid = core_worker_stats["pid"]
is_driver = core_worker_stats.get("workerType") == "DRIVER"
node_address = core_worker_stats["ipAddress"]
object_refs = core_worker_stats.get("objectRefs", [])
for object_ref in object_refs:
memory_table_entry = MemoryTableEntry(
object_ref=object_ref,
node_address=node_address,
is_driver=is_driver,
pid=pid,
)
if memory_table_entry.is_valid():
memory_table_entries.append(memory_table_entry)
memory_table = MemoryTable(
memory_table_entries, group_by_type=group_by, sort_by_type=sort_by
)
return memory_table
def track_reference_size(group):
"""Returns dictionary mapping reference type
to memory usage for a given memory table group."""
d = defaultdict(int)
table_name = {
"LOCAL_REFERENCE": "total_local_ref_count",
"PINNED_IN_MEMORY": "total_pinned_in_memory",
"USED_BY_PENDING_TASK": "total_used_by_pending_task",
"CAPTURED_IN_OBJECT": "total_captured_in_objects",
"ACTOR_HANDLE": "total_actor_handles",
}
for entry in group["entries"]:
size = entry["object_size"]
if size == -1:
# size not recorded
size = 0
d[table_name[entry["reference_type"]]] += size
return d
def memory_summary(
state,
group_by="NODE_ADDRESS",
sort_by="OBJECT_SIZE",
line_wrap=True,
unit="B",
num_entries=None,
) -> str:
# Get terminal size
import shutil
size = shutil.get_terminal_size((80, 20)).columns
line_wrap_threshold = 137
# Unit conversions
units = {"B": 10**0, "KB": 10**3, "MB": 10**6, "GB": 10**9}
# Fetch core memory worker stats, store as a dictionary
core_worker_stats = []
for raylet in state.node_table():
if not raylet["Alive"]:
continue
try:
stats = node_stats_to_dict(
node_stats(raylet["NodeManagerAddress"], raylet["NodeManagerPort"])
)
except RuntimeError:
continue
core_worker_stats.extend(stats["coreWorkersStats"])
assert type(stats) is dict and "coreWorkersStats" in stats
# Build memory table with "group_by" and "sort_by" parameters
group_by, sort_by = get_group_by_type(group_by), get_sorting_type(sort_by)
memory_table = construct_memory_table(
core_worker_stats, group_by, sort_by
).as_dict()
assert "summary" in memory_table and "group" in memory_table
# Build memory summary
mem = ""
group_by, sort_by = group_by.name.lower().replace(
"_", " "
), sort_by.name.lower().replace("_", " ")
summary_labels = [
"Mem Used by Objects",
"Local References",
"Pinned",
"Used by task",
"Captured in Objects",
"Actor Handles",
]
summary_string = "{:<19} {:<16} {:<12} {:<13} {:<19} {:<13}\n"
object_ref_labels = [
"IP Address",
"PID",
"Type",
"Call Site",
"Status",
"Attempt",
"Size",
"Reference Type",
"Object Ref",
]
object_ref_string = "{:<13} | {:<8} | {:<7} | {:<9} \
| {:<9} | {:<8} | {:<8} | {:<14} | {:<10}\n"
if size > line_wrap_threshold and line_wrap:
object_ref_string = "{:<15} {:<5} {:<6} {:<22} {:<14} {:<8} {:<6} \
{:<18} {:<56}\n"
mem += f"Grouping by {group_by}...\
Sorting by {sort_by}...\
Display {num_entries if num_entries is not None else 'all'} \
entries per group...\n\n\n"
for key, group in memory_table["group"].items():
# Group summary
summary = group["summary"]
ref_size = track_reference_size(group)
for k, v in summary.items():
if k == "total_object_size":
summary[k] = str(v / units[unit]) + f" {unit}"
else:
summary[k] = str(v) + f", ({ref_size[k] / units[unit]} {unit})"
mem += f"--- Summary for {group_by}: {key} ---\n"
mem += summary_string.format(*summary_labels)
mem += summary_string.format(*summary.values()) + "\n"
# Memory table per group
mem += f"--- Object references for {group_by}: {key} ---\n"
mem += object_ref_string.format(*object_ref_labels)
n = 1 # Counter for num entries per group
for entry in group["entries"]:
if num_entries is not None and n > num_entries:
break
entry["object_size"] = (
str(entry["object_size"] / units[unit]) + f" {unit}"
if entry["object_size"] > -1
else "?"
)
num_lines = 1
if size > line_wrap_threshold and line_wrap:
call_site_length = 22
if len(entry["call_site"]) == 0:
entry["call_site"] = ["disabled"]
else:
entry["call_site"] = [
entry["call_site"][i : i + call_site_length]
for i in range(0, len(entry["call_site"]), call_site_length)
]
task_status_length = 12
entry["task_status"] = [
entry["task_status"][i : i + task_status_length]
for i in range(0, len(entry["task_status"]), task_status_length)
]
num_lines = max(len(entry["call_site"]), len(entry["task_status"]))
else:
mem += "\n"
object_ref_values = [
entry["node_ip_address"],
entry["pid"],
entry["type"],
entry["call_site"],
entry["task_status"],
entry["attempt_number"],
entry["object_size"],
entry["reference_type"],
entry["object_ref"],
]
for i in range(len(object_ref_values)):
if not isinstance(object_ref_values[i], list):
object_ref_values[i] = [object_ref_values[i]]
object_ref_values[i].extend(
["" for x in range(num_lines - len(object_ref_values[i]))]
)
for i in range(num_lines):
row = [elem[i] for elem in object_ref_values]
mem += object_ref_string.format(*row)
mem += "\n"
n += 1
mem += (
"To record callsite information for each ObjectRef created, set "
"env variable RAY_record_ref_creation_sites=1\n\n"
)
return mem
| MemoryTable |
python | zarr-developers__zarr-python | tests/conftest.py | {
"start": 4705,
"end": 15035
} | class ____:
shape: tuple[int, ...]
dtype: str
order: MemoryOrder
@pytest.fixture
def array_fixture(request: pytest.FixtureRequest) -> npt.NDArray[Any]:
array_request: ArrayRequest = request.param
return (
np.arange(np.prod(array_request.shape))
.reshape(array_request.shape, order=array_request.order)
.astype(array_request.dtype)
)
@pytest.fixture(params=(2, 3), ids=["zarr2", "zarr3"])
def zarr_format(request: pytest.FixtureRequest) -> ZarrFormat:
if request.param == 2:
return 2
elif request.param == 3:
return 3
msg = f"Invalid zarr format requested. Got {request.param}, expected on of (2,3)."
raise ValueError(msg)
def _clear_registries() -> None:
registries = zarr.registry._collect_entrypoints()
for registry in registries:
registry.lazy_load_list.clear()
@pytest.fixture
def set_path() -> Generator[None, None, None]:
tests_dir = str(pathlib.Path(__file__).parent.absolute())
sys.path.append(tests_dir)
_clear_registries()
zarr.registry._collect_entrypoints()
yield
sys.path.remove(tests_dir)
_clear_registries()
zarr.registry._collect_entrypoints()
config.reset()
def pytest_addoption(parser: Any) -> None:
parser.addoption(
"--run-slow-hypothesis",
action="store_true",
default=False,
help="run slow hypothesis tests",
)
def pytest_collection_modifyitems(config: Any, items: Any) -> None:
if config.getoption("--run-slow-hypothesis"):
return
skip_slow_hyp = pytest.mark.skip(reason="need --run-slow-hypothesis option to run")
for item in items:
if "slow_hypothesis" in item.keywords:
item.add_marker(skip_slow_hyp)
settings.register_profile(
"default",
parent=settings.get_profile("default"),
max_examples=300,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow],
deadline=None,
verbosity=Verbosity.verbose,
)
settings.register_profile(
"ci",
parent=settings.get_profile("ci"),
max_examples=300,
derandomize=True, # more like regression testing
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.too_slow],
)
settings.register_profile(
"nightly",
max_examples=500,
parent=settings.get_profile("ci"),
derandomize=False,
stateful_step_count=100,
)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "default"))
# TODO: uncomment these overrides when we can get mypy to accept them
"""
@overload
def create_array_metadata(
*,
shape: ShapeLike,
dtype: npt.DTypeLike,
chunks: tuple[int, ...] | Literal["auto"],
shards: None,
filters: FiltersLike,
compressors: CompressorsLike,
serializer: SerializerLike,
fill_value: Any | None,
order: MemoryOrder | None,
zarr_format: Literal[2],
attributes: dict[str, JSON] | None,
chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None,
dimension_names: None,
) -> ArrayV2Metadata: ...
@overload
def create_array_metadata(
*,
shape: ShapeLike,
dtype: npt.DTypeLike,
chunks: tuple[int, ...] | Literal["auto"],
shards: ShardsLike | None,
filters: FiltersLike,
compressors: CompressorsLike,
serializer: SerializerLike,
fill_value: Any | None,
order: None,
zarr_format: Literal[3],
attributes: dict[str, JSON] | None,
chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None,
dimension_names: Iterable[str] | None,
) -> ArrayV3Metadata: ...
"""
def create_array_metadata(
*,
shape: ShapeLike,
dtype: npt.DTypeLike,
chunks: tuple[int, ...] | Literal["auto"] = "auto",
shards: ShardsLike | None = None,
filters: FiltersLike = "auto",
compressors: CompressorsLike = "auto",
serializer: SerializerLike = "auto",
fill_value: Any = 0,
order: MemoryOrder | None = None,
zarr_format: ZarrFormat,
attributes: dict[str, JSON] | None = None,
chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None,
dimension_names: DimensionNames = None,
) -> ArrayV2Metadata | ArrayV3Metadata:
"""
Create array metadata
"""
dtype_parsed = get_data_type_from_native_dtype(dtype)
shape_parsed = parse_shapelike(shape)
chunk_key_encoding_parsed = _parse_chunk_key_encoding(
chunk_key_encoding, zarr_format=zarr_format
)
item_size = 1
if isinstance(dtype_parsed, HasItemSize):
item_size = dtype_parsed.item_size
shard_shape_parsed, chunk_shape_parsed = _auto_partition(
array_shape=shape_parsed,
shard_shape=shards,
chunk_shape=chunks,
item_size=item_size,
)
if order is None:
order_parsed = zarr_config.get("array.order")
else:
order_parsed = order
chunks_out: tuple[int, ...]
if zarr_format == 2:
filters_parsed, compressor_parsed = _parse_chunk_encoding_v2(
compressor=compressors, filters=filters, dtype=dtype_parsed
)
chunk_key_encoding_parsed = cast("V2ChunkKeyEncoding", chunk_key_encoding_parsed)
return ArrayV2Metadata(
shape=shape_parsed,
dtype=dtype_parsed,
chunks=chunk_shape_parsed,
order=order_parsed,
dimension_separator=chunk_key_encoding_parsed.separator,
fill_value=fill_value,
compressor=compressor_parsed,
filters=filters_parsed,
attributes=attributes,
)
elif zarr_format == 3:
array_array, array_bytes, bytes_bytes = _parse_chunk_encoding_v3(
compressors=compressors,
filters=filters,
serializer=serializer,
dtype=dtype_parsed,
)
sub_codecs: tuple[Codec, ...] = (*array_array, array_bytes, *bytes_bytes)
codecs_out: tuple[Codec, ...]
if shard_shape_parsed is not None:
index_location = None
if isinstance(shards, dict):
index_location = ShardingCodecIndexLocation(shards.get("index_location", None))
if index_location is None:
index_location = ShardingCodecIndexLocation.end
sharding_codec = ShardingCodec(
chunk_shape=chunk_shape_parsed,
codecs=sub_codecs,
index_location=index_location,
)
sharding_codec.validate(
shape=chunk_shape_parsed,
dtype=dtype_parsed,
chunk_grid=RegularChunkGrid(chunk_shape=shard_shape_parsed),
)
codecs_out = (sharding_codec,)
chunks_out = shard_shape_parsed
else:
chunks_out = chunk_shape_parsed
codecs_out = sub_codecs
return ArrayV3Metadata(
shape=shape_parsed,
data_type=dtype_parsed,
chunk_grid=RegularChunkGrid(chunk_shape=chunks_out),
chunk_key_encoding=chunk_key_encoding_parsed,
fill_value=fill_value,
codecs=codecs_out,
attributes=attributes,
dimension_names=dimension_names,
)
raise ValueError(f"Invalid Zarr format: {zarr_format}")
# TODO: uncomment these overrides when we can get mypy to accept them
"""
@overload
def meta_from_array(
array: np.ndarray[Any, Any],
chunks: tuple[int, ...] | Literal["auto"],
shards: None,
filters: FiltersLike,
compressors: CompressorsLike,
serializer: SerializerLike,
fill_value: Any | None,
order: MemoryOrder | None,
zarr_format: Literal[2],
attributes: dict[str, JSON] | None,
chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None,
dimension_names: Iterable[str] | None,
) -> ArrayV2Metadata: ...
@overload
def meta_from_array(
array: np.ndarray[Any, Any],
chunks: tuple[int, ...] | Literal["auto"],
shards: ShardsLike | None,
filters: FiltersLike,
compressors: CompressorsLike,
serializer: SerializerLike,
fill_value: Any | None,
order: None,
zarr_format: Literal[3],
attributes: dict[str, JSON] | None,
chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None,
dimension_names: Iterable[str] | None,
) -> ArrayV3Metadata: ...
"""
def meta_from_array(
array: np.ndarray[Any, Any],
*,
chunks: tuple[int, ...] | Literal["auto"] = "auto",
shards: ShardsLike | None = None,
filters: FiltersLike = "auto",
compressors: CompressorsLike = "auto",
serializer: SerializerLike = "auto",
fill_value: Any = 0,
order: MemoryOrder | None = None,
zarr_format: ZarrFormat = 3,
attributes: dict[str, JSON] | None = None,
chunk_key_encoding: ChunkKeyEncoding | ChunkKeyEncodingLike | None = None,
dimension_names: DimensionNames = None,
) -> ArrayV3Metadata | ArrayV2Metadata:
"""
Create array metadata from an array
"""
return create_array_metadata(
shape=array.shape,
dtype=array.dtype,
chunks=chunks,
shards=shards,
filters=filters,
compressors=compressors,
serializer=serializer,
fill_value=fill_value,
order=order,
zarr_format=zarr_format,
attributes=attributes,
chunk_key_encoding=chunk_key_encoding,
dimension_names=dimension_names,
)
def skip_object_dtype(dtype: ZDType[Any, Any]) -> None:
if dtype.dtype_cls is type(np.dtype("O")):
msg = (
f"{dtype} uses the numpy object data type, which is not a valid target for data "
"type resolution"
)
pytest.skip(msg)
def nan_equal(a: object, b: object) -> bool:
"""
Convenience function for equality comparison between two values ``a`` and ``b``, that might both
be NaN. Returns True if both ``a`` and ``b`` are NaN, otherwise returns a == b
"""
if math.isnan(a) and math.isnan(b): # type: ignore[arg-type]
return True
return a == b
def deep_nan_equal(a: object, b: object) -> bool:
if isinstance(a, Mapping) and isinstance(b, Mapping):
return all(deep_nan_equal(a[k], b[k]) for k in a)
if isinstance(a, Sequence) and isinstance(b, Sequence):
return all(deep_nan_equal(a[i], b[i]) for i in range(len(a)))
return nan_equal(a, b)
| ArrayRequest |
python | numba__numba | numba/tests/test_ufuncs.py | {
"start": 59070,
"end": 59781
} | class ____(_LoopTypesTester):
_ufuncs = [np.power] # issue #757
_required_types = 'bBhHiIlLqQfdFD'
_skip_types = 'mMO' + _LoopTypesTester._skip_types
def _arg_for_type(self, a_letter_type, index=0):
res = super(self.__class__, self)._arg_for_type(a_letter_type,
index=index)
if a_letter_type in 'bBhHiIlLqQ' and index == 1:
# For integer power, avoid a negative exponent, as it triggers
# undefined behavior that may differ in results from Numba
# to the compiler used to compile NumPy
res[res < 0] = 3
return res
TestLoopTypesPower.autogenerate()
| TestLoopTypesPower |
python | doocs__leetcode | solution/2300-2399/2360.Longest Cycle in a Graph/Solution.py | {
"start": 0,
"end": 579
} | class ____:
def longestCycle(self, edges: List[int]) -> int:
n = len(edges)
vis = [False] * n
ans = -1
for i in range(n):
if vis[i]:
continue
j = i
cycle = []
while j != -1 and not vis[j]:
vis[j] = True
cycle.append(j)
j = edges[j]
if j == -1:
continue
m = len(cycle)
k = next((k for k in range(m) if cycle[k] == j), inf)
ans = max(ans, m - k)
return ans
| Solution |
python | huggingface__transformers | tests/models/chameleon/test_modeling_chameleon.py | {
"start": 1452,
"end": 7358
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=35,
is_training=False,
use_input_mask=True,
use_labels=True,
vocab_size=99,
image_token_id=4,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
vq_num_embeds=5,
vq_embed_dim=5,
vq_channel_multiplier=[1, 2],
vq_img_token_start_id=10, # has to be less than vocab size when added with vq_num_embeds
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.image_token_id = image_token_id
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.scope = scope
self.vq_num_embeds = vq_num_embeds
self.vq_embed_dim = vq_embed_dim
self.vq_channel_multiplier = vq_channel_multiplier
self.vq_img_token_start_id = vq_img_token_start_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
# create dummy vocab map for image2bpe mapping if it needs remapping
# we assume that vocab size is big enough to account for image tokens somewhere in the beginning
# same way as in real ckpt, when img tokens are in first half of embeds
# we will need "vq_num_embeds" amount of tokens
vocab_map = {i: chr(i) for i in range(self.vocab_size)}
vocab_map[self.image_token_id] = "<image>"
start = self.vq_img_token_start_id
end = self.vq_img_token_start_id + self.vq_num_embeds
for i in range(start, end):
image_token_infix = "".join(chr(ord("A") + int(c)) for c in str(i))
# dummy str for each image token, anything starting with IMGIMG
vocab_map[i] = f"IMGIMG{image_token_infix}Z"
return ChameleonConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
vocabulary_map={v: k for k, v in vocab_map.items()},
vq_config=self.get_vq_config(),
)
def get_vq_config(self):
return {
"embed_dim": self.vq_embed_dim,
"num_embeddings": self.vq_num_embeds,
"latent_channels": self.vq_embed_dim,
"in_channels": 3,
"base_channels": 32, # we have a GroupNorm of 32 groups, so can't do less
"channel_multiplier": self.vq_channel_multiplier,
}
def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = ChameleonModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| ChameleonModelTester |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB156.py | {
"start": 843,
"end": 931
} | class ____:
def method(self):
"01234567"
def function():
"""01234567"""
| C |
python | pandas-dev__pandas | pandas/io/formats/format.py | {
"start": 55458,
"end": 55905
} | class ____(_Datetime64Formatter):
values: DatetimeArray
def _format_strings(self) -> list[str]:
"""we by definition have a TZ"""
ido = self.values._is_dates_only
values = self.values.astype(object)
formatter = self.formatter or get_format_datetime64(
ido, date_format=self.date_format
)
fmt_values = [formatter(x) for x in values]
return fmt_values
| _Datetime64TZFormatter |
python | django__django | tests/get_or_create/models.py | {
"start": 629,
"end": 713
} | class ____(models.Model):
text = models.CharField(max_length=255, unique=True)
| Tag |
python | pandas-dev__pandas | pandas/tests/io/test_parquet.py | {
"start": 10934,
"end": 11473
} | class ____:
def check_error_on_write(self, df, engine, exc, err_msg, temp_file_path):
# check that we are raising the exception on writing
with pytest.raises(exc, match=err_msg):
to_parquet(df, temp_file_path, engine, compression=None)
def check_external_error_on_write(self, df, engine, exc, temp_file_path):
# check that an external library is raising the exception on writing
with tm.external_error_raised(exc):
to_parquet(df, temp_file_path, engine, compression=None)
| Base |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 5757,
"end": 5878
} | class ____(WeaviateBaseError):
"""Is raised if weaviate is not available on the given url+port."""
| WeaviateStartUpError |
python | huggingface__transformers | src/transformers/models/superglue/modeling_superglue.py | {
"start": 6370,
"end": 8072
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
Loss computed during training.
matches (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Index of keypoint matched in the other image.
matching_scores (`torch.FloatTensor` of shape `(batch_size, 2, num_matches)`):
Scores of predicted matches.
keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
Absolute (x, y) coordinates of predicted keypoints in a given image.
mask (`torch.IntTensor` of shape `(batch_size, num_keypoints)`):
Mask indicating which values in matches and matching_scores are keypoint matching information.
hidden_states (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of each stage) of shape `(batch_size, 2, num_channels,
num_keypoints)`, returned when `output_hidden_states=True` is passed or when
`config.output_hidden_states=True`)
attentions (`tuple[torch.FloatTensor, ...]`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, 2, num_heads, num_keypoints,
num_keypoints)`, returned when `output_attentions=True` is passed or when `config.output_attentions=True`)
"""
loss: Optional[torch.FloatTensor] = None
matches: Optional[torch.FloatTensor] = None
matching_scores: Optional[torch.FloatTensor] = None
keypoints: Optional[torch.FloatTensor] = None
mask: Optional[torch.IntTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
| SuperGlueKeypointMatchingOutput |
python | huggingface__transformers | src/transformers/models/biogpt/modeling_biogpt.py | {
"start": 14195,
"end": 14484
} | class ____(PreTrainedModel):
config: BioGptConfig
base_model_prefix = "biogpt"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
@auto_docstring
| BioGptPreTrainedModel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.