language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__pipenv | pipenv/patched/pip/_internal/models/pylock.py | {
"start": 5344,
"end": 6286
} | class ____:
lock_version: str = "1.0"
# (not supported) environments: Optional[List[str]]
# (not supported) requires_python: Optional[str]
# (not supported) extras: List[str] = []
# (not supported) dependency_groups: List[str] = []
created_by: str = "pip"
packages: List[Package] = dataclasses.field(default_factory=list)
# (not supported) tool: Optional[Dict[str, Any]]
def as_toml(self) -> str:
return tomli_w.dumps(dataclasses.asdict(self, dict_factory=_toml_dict_factory))
@classmethod
def from_install_requirements(
cls, install_requirements: Iterable[InstallRequirement], base_dir: Path
) -> Self:
return cls(
packages=sorted(
(
Package.from_install_requirement(ireq, base_dir)
for ireq in install_requirements
),
key=lambda p: p.name,
)
)
| Pylock |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_dc_transforms.py | {
"start": 2331,
"end": 33464
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
@testing.fixture(params=["(MAD, DB)", "(DB, MAD)"])
def dc_decl_base(self, request, metadata):
_md = metadata
if request.param == "(MAD, DB)":
class Base(MappedAsDataclass, DeclarativeBase):
_mad_before = True
metadata = _md
type_annotation_map = {
str: String().with_variant(
String(50), "mysql", "mariadb", "oracle"
)
}
else:
# test #8665 by reversing the order of the classes
class Base(DeclarativeBase, MappedAsDataclass):
_mad_before = False
metadata = _md
type_annotation_map = {
str: String().with_variant(
String(50), "mysql", "mariadb", "oracle"
)
}
yield Base
Base.registry.dispose()
def test_basic_constructor_repr_base_cls(
self, dc_decl_base: Type[MappedAsDataclass]
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
bs: Mapped[List["B"]] = relationship( # noqa: F821
default_factory=list
)
class B(dc_decl_base):
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
a_id: Mapped[Optional[int]] = mapped_column(
ForeignKey("a.id"), init=False
)
x: Mapped[Optional[int]] = mapped_column(default=None)
A.__qualname__ = "some_module.A"
B.__qualname__ = "some_module.B"
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x", "bs"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET, mock.ANY),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(B.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
a2 = A("10", x=5, bs=[B("data1"), B("data2", x=12)])
eq_(
repr(a2),
"some_module.A(id=None, data='10', x=5, "
"bs=[some_module.B(id=None, data='data1', a_id=None, x=None), "
"some_module.B(id=None, data='data2', a_id=None, x=12)])",
)
a3 = A("data")
eq_(repr(a3), "some_module.A(id=None, data='data', x=None, bs=[])")
# TODO: get this test to work with future anno mode as well
# anno only: @testing.exclusions.closed("doesn't work for future annotations mode yet") # noqa: E501
def test_generic_class(self):
"""further test for #8665"""
T_Value = TypeVar("T_Value")
class SomeBaseClass(DeclarativeBase):
pass
class GenericSetting(
MappedAsDataclass, SomeBaseClass, Generic[T_Value]
):
__tablename__ = "xx"
id: Mapped[int] = mapped_column(
Integer, primary_key=True, init=False
)
key: Mapped[str] = mapped_column(String, init=True)
value: Mapped[T_Value] = mapped_column(
JSON, init=True, default_factory=lambda: {}
)
new_instance: GenericSetting[Dict[str, Any]] = ( # noqa: F841
GenericSetting(key="x", value={"foo": "bar"})
)
def test_no_anno_doesnt_go_into_dc(
self, dc_decl_base: Type[MappedAsDataclass]
):
class User(dc_decl_base):
__tablename__: ClassVar[Optional[str]] = "user"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
username: Mapped[str]
password: Mapped[str]
addresses: Mapped[List["Address"]] = relationship( # noqa: F821
default_factory=list
)
class Address(dc_decl_base):
__tablename__: ClassVar[Optional[str]] = "address"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
# should not be in the dataclass constructor
user_id = mapped_column(ForeignKey(User.id))
email_address: Mapped[str]
a1 = Address("email@address")
eq_(a1.email_address, "email@address")
def test_warn_on_non_dc_mixin(self):
class _BaseMixin:
create_user: Mapped[int] = mapped_column()
update_user: Mapped[Optional[int]] = mapped_column(
default=None, init=False
)
class Base(DeclarativeBase, MappedAsDataclass, _BaseMixin):
pass
class SubMixin:
foo: Mapped[str]
bar: Mapped[str] = mapped_column()
with testing.expect_raises_message(
exc.InvalidRequestError,
r"When transforming .* to a dataclass, attribute\(s\) "
r"'foo', 'bar' originates from superclass .*SubMixin",
):
class User(SubMixin, Base):
__tablename__ = "sys_user"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
username: Mapped[str] = mapped_column(String)
password: Mapped[str] = mapped_column(String)
def test_basic_constructor_repr_cls_decorator(
self, registry: _RegistryType
):
@registry.mapped_as_dataclass()
class A:
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
bs: Mapped[List["B"]] = relationship( # noqa: F821
default_factory=list
)
@registry.mapped_as_dataclass()
class B:
__tablename__ = "b"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
a_id = mapped_column(ForeignKey("a.id"), init=False)
data: Mapped[str]
x: Mapped[Optional[int]] = mapped_column(default=None)
A.__qualname__ = "some_module.A"
B.__qualname__ = "some_module.B"
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x", "bs"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET, mock.ANY),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(B.__init__),
pyinspect.FullArgSpec(
args=["self", "data", "x"],
varargs=None,
varkw=None,
defaults=(LoaderCallableStatus.DONT_SET,),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
a2 = A("10", x=5, bs=[B("data1"), B("data2", x=12)])
# note a_id isn't included because it wasn't annotated
eq_(
repr(a2),
"some_module.A(id=None, data='10', x=5, "
"bs=[some_module.B(id=None, data='data1', x=None), "
"some_module.B(id=None, data='data2', x=12)])",
)
a3 = A("data")
eq_(repr(a3), "some_module.A(id=None, data='data', x=None, bs=[])")
# TODO: get this test to work with future anno mode as well
# anno only: @testing.exclusions.closed("doesn't work for future annotations mode yet") # noqa: E501
@testing.variation("dc_type", ["fn_decorator", "decorator", "superclass"])
def test_dataclass_fn(self, dc_type: Variation):
annotations = {}
def dc_callable(kls, **kw) -> Type[Any]:
annotations[kls] = kls.__annotations__
return dataclasses.dataclass(kls, **kw) # type: ignore
if dc_type.fn_decorator:
reg = registry()
@mapped_as_dataclass(reg, dataclass_callable=dc_callable)
class MappedClass:
__tablename__ = "mapped_class"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
eq_(annotations, {MappedClass: {"id": int, "name": str}})
elif dc_type.decorator:
reg = registry()
@reg.mapped_as_dataclass(dataclass_callable=dc_callable)
class MappedClass:
__tablename__ = "mapped_class"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
eq_(annotations, {MappedClass: {"id": int, "name": str}})
elif dc_type.superclass:
class Base(DeclarativeBase):
pass
class Mixin(MappedAsDataclass, dataclass_callable=dc_callable):
id: Mapped[int] = mapped_column(primary_key=True)
class MappedClass(Mixin, Base):
__tablename__ = "mapped_class"
name: Mapped[str]
eq_(
annotations,
{Mixin: {"id": int}, MappedClass: {"id": int, "name": str}},
)
else:
dc_type.fail()
def test_default_fn(self, dc_decl_base: Type[MappedAsDataclass]):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(default="d1")
data2: Mapped[str] = mapped_column(default_factory=lambda: "d2")
a1 = A()
eq_(a1.data, "d1")
eq_(a1.data2, "d2")
def test_default_factory_vs_collection_class(
self, dc_decl_base: Type[MappedAsDataclass]
):
# this is currently the error raised by dataclasses. We can instead
# do this validation ourselves, but overall I don't know that we
# can hit every validation and rule that's in dataclasses
with expect_raises_message(
ValueError, "cannot specify both default and default_factory"
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column(
default="d1", default_factory=lambda: "d2"
)
def test_combine_args_from_pep593(self, decl_base: Type[DeclarativeBase]):
"""test that we can set up column-level defaults separate from
dataclass defaults with a pep593 setup; however the dataclass
defaults need to override the insert_defaults so that they
take place on INSERT
"""
# anno only: global intpk, str30, s_str30, user_fk
intpk = Annotated[int, mapped_column(primary_key=True)]
str30 = Annotated[
str, mapped_column(String(30), insert_default=func.foo())
]
s_str30 = Annotated[
str,
mapped_column(String(30), server_default="some server default"),
]
user_fk = Annotated[int, mapped_column(ForeignKey("user_account.id"))]
class User(MappedAsDataclass, decl_base):
__tablename__ = "user_account"
# we need this case for dataclasses that can't derive things
# from Annotated yet at the typing level
id: Mapped[intpk] = mapped_column(init=False)
name_plain: Mapped[str30] = mapped_column()
name_no_init: Mapped[str30] = mapped_column(init=False)
name_none: Mapped[Optional[str30]] = mapped_column(default=None)
name_insert_none: Mapped[Optional[str30]] = mapped_column(
insert_default=None, init=False
)
name: Mapped[str30] = mapped_column(default="hi")
name_insert: Mapped[str30] = mapped_column(
insert_default="hi", init=False
)
name2: Mapped[s_str30] = mapped_column(default="there")
name2_insert: Mapped[s_str30] = mapped_column(
insert_default="there", init=False
)
addresses: Mapped[List["Address"]] = relationship( # noqa: F821
back_populates="user", default_factory=list
)
class Address(MappedAsDataclass, decl_base):
__tablename__ = "address"
id: Mapped[intpk] = mapped_column(init=False)
email_address: Mapped[str]
user_id: Mapped[user_fk] = mapped_column(init=False)
user: Mapped[Optional["User"]] = relationship(
back_populates="addresses", default=None
)
is_true(User.__table__.c.id.primary_key)
# the default from the Annotated overrides mapped_cols that have
# nothing for default or insert default
is_true(User.__table__.c.name_plain.default.arg.compare(func.foo()))
is_true(User.__table__.c.name_no_init.default.arg.compare(func.foo()))
# mapped cols that have None for default or insert default, that
# default overrides
is_true(User.__table__.c.name_none.default is None)
is_true(User.__table__.c.name_insert_none.default is None)
# mapped cols that have a value for default or insert default, that
# default overrides
is_true(User.__table__.c.name.default.arg == "hi")
is_true(User.__table__.c.name2.default.arg == "there")
is_true(User.__table__.c.name_insert.default.arg == "hi")
is_true(User.__table__.c.name2_insert.default.arg == "there")
eq_(User.__table__.c.name2.server_default.arg, "some server default")
is_true(Address.__table__.c.user_id.references(User.__table__.c.id))
u1 = User(name_plain="name")
eq_(u1.name_none, None)
eq_(u1.name_insert_none, None)
eq_(u1.name, "hi")
eq_(u1.name2, "there")
eq_(u1.name_insert, None)
eq_(u1.name2_insert, None)
def test_inheritance(self, dc_decl_base: Type[MappedAsDataclass]):
class Person(dc_decl_base):
__tablename__ = "person"
person_id: Mapped[int] = mapped_column(
primary_key=True, init=False
)
name: Mapped[str]
type: Mapped[str] = mapped_column(init=False)
__mapper_args__ = {"polymorphic_on": type}
class Engineer(Person):
__tablename__ = "engineer"
person_id: Mapped[int] = mapped_column(
ForeignKey("person.person_id"), primary_key=True, init=False
)
status: Mapped[str] = mapped_column(String(30))
engineer_name: Mapped[str]
primary_language: Mapped[str]
__mapper_args__ = {"polymorphic_identity": "engineer"}
e1 = Engineer("nm", "st", "en", "pl")
eq_(e1.name, "nm")
eq_(e1.status, "st")
eq_(e1.engineer_name, "en")
eq_(e1.primary_language, "pl")
def test_non_mapped_fields_wo_mapped_or_dc(
self, dc_decl_base: Type[MappedAsDataclass]
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: str
ctrl_one: str = dataclasses.field()
some_field: int = dataclasses.field(default=5)
a1 = A("data", "ctrl_one", 5)
eq_(
dataclasses.asdict(a1),
{
"ctrl_one": "ctrl_one",
"data": "data",
"id": None,
"some_field": 5,
},
)
def test_non_mapped_fields_wo_mapped_or_dc_w_inherits(
self, dc_decl_base: Type[MappedAsDataclass]
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: str
ctrl_one: str = dataclasses.field()
some_field: int = dataclasses.field(default=5)
class B(A):
b_data: Mapped[str] = mapped_column(default="bd")
# ensure we didnt break dataclasses contract of removing Field
# issue #8880
eq_(A.__dict__["some_field"], 5)
assert "ctrl_one" not in A.__dict__
b1 = B(data="data", ctrl_one="ctrl_one", some_field=5, b_data="x")
eq_(
dataclasses.asdict(b1),
{
"ctrl_one": "ctrl_one",
"data": "data",
"id": None,
"some_field": 5,
"b_data": "x",
},
)
def test_init_var(self, dc_decl_base: Type[MappedAsDataclass]):
class User(dc_decl_base):
__tablename__ = "user_account"
id: Mapped[int] = mapped_column(init=False, primary_key=True)
name: Mapped[str]
password: InitVar[str]
repeat_password: InitVar[str]
password_hash: Mapped[str] = mapped_column(
init=False, nullable=False
)
def __post_init__(self, password: str, repeat_password: str):
if password != repeat_password:
raise ValueError("passwords do not match")
self.password_hash = f"some hash... {password}"
u1 = User(name="u1", password="p1", repeat_password="p1")
eq_(u1.password_hash, "some hash... p1")
self.assert_compile(
select(User),
"SELECT user_account.id, user_account.name, "
"user_account.password_hash FROM user_account",
)
def test_integrated_dc(self, dc_decl_base: Type[MappedAsDataclass]):
"""We will be telling users "this is a dataclass that is also
mapped". Therefore, they will want *any* kind of attribute to do what
it would normally do in a dataclass, including normal types without any
field and explicit use of dataclasses.field(). additionally, we'd like
``Mapped`` to mean "persist this attribute". So the absence of
``Mapped`` should also mean something too.
"""
class A(dc_decl_base):
__tablename__ = "a"
ctrl_one: str = dataclasses.field()
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str]
some_field: int = dataclasses.field(default=5)
some_none_field: Optional[str] = dataclasses.field(default=None)
some_other_int_field: int = 10
# some field is part of the constructor
a1 = A("ctrlone", "datafield")
eq_(
dataclasses.asdict(a1),
{
"ctrl_one": "ctrlone",
"data": "datafield",
"id": None,
"some_field": 5,
"some_none_field": None,
"some_other_int_field": 10,
},
)
a2 = A(
"ctrlone",
"datafield",
some_field=7,
some_other_int_field=12,
some_none_field="x",
)
eq_(
dataclasses.asdict(a2),
{
"ctrl_one": "ctrlone",
"data": "datafield",
"id": None,
"some_field": 7,
"some_none_field": "x",
"some_other_int_field": 12,
},
)
# only Mapped[] is mapped
self.assert_compile(select(A), "SELECT a.id, a.data FROM a")
eq_(
pyinspect.getfullargspec(A.__init__),
pyinspect.FullArgSpec(
args=[
"self",
"ctrl_one",
"data",
"some_field",
"some_none_field",
"some_other_int_field",
],
varargs=None,
varkw=None,
defaults=(5, None, 10),
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
def test_dc_on_top_of_non_dc(self, decl_base: Type[DeclarativeBase]):
class Person(decl_base):
__tablename__ = "person"
person_id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str]
type: Mapped[str] = mapped_column()
__mapper_args__ = {"polymorphic_on": type}
class Engineer(MappedAsDataclass, Person):
__tablename__ = "engineer"
person_id: Mapped[int] = mapped_column(
ForeignKey("person.person_id"), primary_key=True, init=False
)
status: Mapped[str] = mapped_column(String(30))
engineer_name: Mapped[str]
primary_language: Mapped[str]
__mapper_args__ = {"polymorphic_identity": "engineer"}
e1 = Engineer("st", "en", "pl")
eq_(e1.status, "st")
eq_(e1.engineer_name, "en")
eq_(e1.primary_language, "pl")
eq_(
pyinspect.getfullargspec(Person.__init__),
# the boring **kw __init__
pyinspect.FullArgSpec(
args=["self"],
varargs=None,
varkw="kwargs",
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
eq_(
pyinspect.getfullargspec(Engineer.__init__),
# the exciting dataclasses __init__
pyinspect.FullArgSpec(
args=["self", "status", "engineer_name", "primary_language"],
varargs=None,
varkw=None,
defaults=None,
kwonlyargs=[],
kwonlydefaults=None,
annotations={},
),
)
def test_compare(self, dc_decl_base: Type[MappedAsDataclass]):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, compare=False)
data: Mapped[str]
a1 = A(id=0, data="foo")
a2 = A(id=1, data="foo")
eq_(a1, a2)
def test_kw_only_attribute(self, dc_decl_base: Type[MappedAsDataclass]):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
data: Mapped[str] = mapped_column(kw_only=True)
fas = pyinspect.getfullargspec(A.__init__)
eq_(fas.args, ["self", "id"])
eq_(fas.kwonlyargs, ["data"])
@testing.combinations(True, False, argnames="unsafe_hash")
def test_hash_attribute(
self, dc_decl_base: Type[MappedAsDataclass], unsafe_hash
):
class A(dc_decl_base, unsafe_hash=unsafe_hash):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, hash=False)
data: Mapped[str] = mapped_column(hash=True)
a = A(id=1, data="x")
if not unsafe_hash or not dc_decl_base._mad_before:
with expect_raises(TypeError):
a_hash1 = hash(a)
else:
a_hash1 = hash(a)
a.id = 41
eq_(hash(a), a_hash1)
a.data = "y"
ne_(hash(a), a_hash1)
def test_kw_only_dataclass_constant(
self, dc_decl_base: Type[MappedAsDataclass]
):
class Mixin(MappedAsDataclass):
a: Mapped[int] = mapped_column(primary_key=True)
b: Mapped[int] = mapped_column(default=1)
class Child(Mixin, dc_decl_base):
__tablename__ = "child"
_: dataclasses.KW_ONLY
c: Mapped[int]
c1 = Child(1, c=5)
eq_(c1, Child(a=1, b=1, c=5))
def test_mapped_column_overrides(self, dc_decl_base):
"""test #8688"""
class TriggeringMixin(MappedAsDataclass):
mixin_value: Mapped[int] = mapped_column(BigInteger)
class NonTriggeringMixin(MappedAsDataclass):
mixin_value: Mapped[int]
class Foo(dc_decl_base, TriggeringMixin):
__tablename__ = "foo"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
foo_value: Mapped[float] = mapped_column(default=78)
class Bar(dc_decl_base, NonTriggeringMixin):
__tablename__ = "bar"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
bar_value: Mapped[float] = mapped_column(default=78)
f1 = Foo(mixin_value=5)
eq_(f1.foo_value, 78)
b1 = Bar(mixin_value=5)
eq_(b1.bar_value, 78)
def test_mixing_MappedAsDataclass_with_decorator_raises(self, registry):
"""test #9211"""
class Mixin(MappedAsDataclass):
id: Mapped[int] = mapped_column(primary_key=True, init=False)
with expect_raises_message(
exc.InvalidRequestError,
"Class .*Foo.* is already a dataclass; ensure that "
"base classes / decorator styles of establishing dataclasses "
"are not being mixed. ",
):
@mapped_as_dataclass(registry)
class Foo(Mixin):
bar_value: Mapped[float] = mapped_column(default=78)
def test_MappedAsDataclass_table_provided(self, registry):
"""test #11973"""
with expect_raises_message(
exc.InvalidRequestError,
"Class .*Foo.* already defines a '__table__'. "
"ORM Annotated Dataclasses do not support a pre-existing "
"'__table__' element",
):
@registry.mapped_as_dataclass
class Foo:
__table__ = Table("foo", registry.metadata)
foo: Mapped[float]
def test_dataclass_exception_wrapped(self, dc_decl_base):
with expect_raises_message(
exc.InvalidRequestError,
r"Python dataclasses error encountered when creating dataclass "
r"for \'Foo\': .*Please refer to Python dataclasses.*",
) as ec:
class Foo(dc_decl_base):
id: Mapped[int] = mapped_column(primary_key=True, init=False)
foo_value: Mapped[float] = mapped_column(default=78)
foo_no_value: Mapped[float] = mapped_column()
__tablename__ = "foo"
is_true(isinstance(ec.error.__cause__, TypeError))
def test_dataclass_default(self, dc_decl_base):
"""test for #9879"""
def c10():
return 10
def c20():
return 20
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
def_init: Mapped[int] = mapped_column(default=42)
call_init: Mapped[int] = mapped_column(default_factory=c10)
def_no_init: Mapped[int] = mapped_column(default=13, init=False)
call_no_init: Mapped[int] = mapped_column(
default_factory=c20, init=False
)
a = A(id=100)
eq_(a.def_init, 42)
eq_(a.call_init, 10)
eq_(a.def_no_init, 13)
eq_(a.call_no_init, 20)
fields = {f.name: f for f in dataclasses.fields(A)}
eq_(fields["def_init"].default, LoaderCallableStatus.DONT_SET)
eq_(fields["call_init"].default_factory, c10)
eq_(fields["def_no_init"].default, dataclasses.MISSING)
ne_(fields["def_no_init"].default_factory, dataclasses.MISSING)
eq_(fields["call_no_init"].default_factory, c20)
def test_dataclass_default_callable(self, dc_decl_base):
"""test for #9936"""
def cd():
return 42
with expect_deprecated(
"Callable object passed to the ``default`` parameter for "
"attribute 'value' in a ORM-mapped Dataclasses context is "
"ambiguous, and this use will raise an error in a future "
"release. If this callable is intended to produce Core level ",
"Callable object passed to the ``default`` parameter for "
"attribute 'no_init' in a ORM-mapped Dataclasses context is "
"ambiguous, and this use will raise an error in a future "
"release. If this callable is intended to produce Core level ",
):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
value: Mapped[int] = mapped_column(default=cd)
no_init: Mapped[int] = mapped_column(default=cd, init=False)
a = A(id=100)
is_false("no_init" in a.__dict__)
eq_(a.value, cd)
eq_(a.no_init, None)
fields = {f.name: f for f in dataclasses.fields(A)}
eq_(fields["value"].default, cd)
eq_(fields["no_init"].default, cd)
def test_dataclass_metadata(self, dc_decl_base):
class A(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
value: Mapped[str] = mapped_column(
dataclass_metadata={"meta_key": "meta_value"}
)
fields = {f.name: f for f in dataclasses.fields(A)}
eq_(fields["id"].metadata, {})
eq_(fields["value"].metadata, {"meta_key": "meta_value"})
@testing.requires.python314
def test_apply_dc_deferred_annotations(self, dc_decl_base):
"""test for #12952"""
class Message(dc_decl_base):
__tablename__ = "message"
id: Mapped[int] = mapped_column(primary_key=True)
content: Mapped[str]
user_id: Mapped[int] = mapped_column(ForeignKey("user.id"))
# annotation is unquoted and refers to nonexistent class (and if
# this is test_dc_transforms.py, __future__ annotations is not
# turned on), so would be rejected by any python interpreter < 3.14
# up front. with python 3.14, the dataclass scan takes place
# and has to fetch the annotations using get_annotations()
# so that refs are turned into FwdRef without being resolved
user: Mapped[UnavailableUser] = relationship( # type: ignore # noqa
back_populates="messages"
)
# The key assertion: Message should be a dataclass
is_true(dataclasses.is_dataclass(Message))
# Verify the dataclass has proper __init__ signature
sig = pyinspect.signature(Message.__init__)
is_true("id" in sig.parameters)
is_true("content" in sig.parameters)
is_true("user_id" in sig.parameters)
| DCTransformsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/result.py | {
"start": 25710,
"end": 31648
} | class ____(AsyncCommon[_R], util.TypingOnly):
"""A :class:`_asyncio.AsyncResult` that's typed as returning plain
Python tuples instead of rows.
Since :class:`_engine.Row` acts like a tuple in every way already,
this class is a typing only class, regular :class:`_asyncio.AsyncResult` is
still used at runtime.
"""
__slots__ = ()
if TYPE_CHECKING:
async def partitions(
self, size: Optional[int] = None
) -> AsyncIterator[Sequence[_R]]:
"""Iterate through sub-lists of elements of the size given.
Equivalent to :meth:`_result.Result.partitions` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
async def fetchone(self) -> Optional[_R]:
"""Fetch one tuple.
Equivalent to :meth:`_result.Result.fetchone` except that
tuple values, rather than :class:`_engine.Row`
objects, are returned.
"""
...
async def fetchall(self) -> Sequence[_R]:
"""A synonym for the :meth:`_engine.ScalarResult.all` method."""
...
async def fetchmany(self, size: Optional[int] = None) -> Sequence[_R]:
"""Fetch many objects.
Equivalent to :meth:`_result.Result.fetchmany` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
async def all(self) -> Sequence[_R]: # noqa: A001
"""Return all scalar values in a list.
Equivalent to :meth:`_result.Result.all` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
def __aiter__(self) -> AsyncIterator[_R]: ...
async def __anext__(self) -> _R: ...
async def first(self) -> Optional[_R]:
"""Fetch the first object or ``None`` if no object is present.
Equivalent to :meth:`_result.Result.first` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
async def one_or_none(self) -> Optional[_R]:
"""Return at most one object or raise an exception.
Equivalent to :meth:`_result.Result.one_or_none` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
async def one(self) -> _R:
"""Return exactly one object or raise an exception.
Equivalent to :meth:`_result.Result.one` except that
tuple values, rather than :class:`_engine.Row` objects,
are returned.
"""
...
@overload
async def scalar_one(self: AsyncTupleResult[Tuple[_T]]) -> _T: ...
@overload
async def scalar_one(self) -> Any: ...
async def scalar_one(self) -> Any:
"""Return exactly one scalar result or raise an exception.
This is equivalent to calling :meth:`_engine.Result.scalars`
and then :meth:`_engine.AsyncScalarResult.one`.
.. seealso::
:meth:`_engine.AsyncScalarResult.one`
:meth:`_engine.Result.scalars`
"""
...
@overload
async def scalar_one_or_none(
self: AsyncTupleResult[Tuple[_T]],
) -> Optional[_T]: ...
@overload
async def scalar_one_or_none(self) -> Optional[Any]: ...
async def scalar_one_or_none(self) -> Optional[Any]:
"""Return exactly one or no scalar result.
This is equivalent to calling :meth:`_engine.Result.scalars`
and then :meth:`_engine.AsyncScalarResult.one_or_none`.
.. seealso::
:meth:`_engine.AsyncScalarResult.one_or_none`
:meth:`_engine.Result.scalars`
"""
...
@overload
async def scalar(
self: AsyncTupleResult[Tuple[_T]],
) -> Optional[_T]: ...
@overload
async def scalar(self) -> Any: ...
async def scalar(self) -> Any:
"""Fetch the first column of the first row, and close the result
set.
Returns ``None`` if there are no rows to fetch.
No validation is performed to test if additional rows remain.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.CursorResult.close`
method will have been called.
:return: a Python scalar value , or ``None`` if no rows remain.
"""
...
_RT = TypeVar("_RT", bound="Result[Unpack[TupleAny]]")
async def _ensure_sync_result(result: _RT, calling_method: Any) -> _RT:
cursor_result: CursorResult[Any]
try:
is_cursor = result._is_cursor
except AttributeError:
# legacy execute(DefaultGenerator) case
return result
if not is_cursor:
cursor_result = getattr(result, "raw", None) # type: ignore
else:
cursor_result = result # type: ignore
if cursor_result and cursor_result.context._is_server_side:
await greenlet_spawn(cursor_result.close)
raise async_exc.AsyncMethodRequired(
"Can't use the %s.%s() method with a "
"server-side cursor. "
"Use the %s.stream() method for an async "
"streaming result set."
% (
calling_method.__self__.__class__.__name__,
calling_method.__name__,
calling_method.__self__.__class__.__name__,
)
)
if is_cursor and cursor_result.cursor is not None:
await cursor_result.cursor._async_soft_close()
return result
| AsyncTupleResult |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 190597,
"end": 191126
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("id", "body", "body_version", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
body = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="body")
body_version = sgqlc.types.Field(String, graphql_name="bodyVersion")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateTeamDiscussionCommentInput |
python | huggingface__transformers | src/transformers/models/falcon/modeling_falcon.py | {
"start": 50482,
"end": 55933
} | class ____(FalconPreTrainedModel):
def __init__(self, config: FalconConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = FalconModel(config)
self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
| FalconForSequenceClassification |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/svd_op_test.py | {
"start": 1840,
"end": 10015
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The input to svd should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*0"):
linalg_ops.svd(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*1"):
linalg_ops.svd(vector)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testThrowDeterminismError(self):
shape = [6, 1]
seed = [42, 24]
matrix = stateless_random_ops.stateless_random_normal(shape, seed)
with test_util.deterministic_ops():
if test_util.is_gpu_available(cuda_only=True):
with self.assertRaisesRegex(
errors_impl.UnimplementedError,
"Determinism is not yet supported for SVD of matrices with 1 column."
):
self.evaluate(linalg_ops.svd(matrix))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testDeterminism(self):
shape = [6, 5]
seed = [42, 24]
matrix = stateless_random_ops.stateless_random_normal(shape, seed)
with test_util.deterministic_ops():
if test_util.is_gpu_available(cuda_only=True):
s1, u1, v1 = self.evaluate(linalg_ops.svd(matrix))
for _ in range(5):
s2, u2, v2 = self.evaluate(linalg_ops.svd(matrix))
self.assertAllEqual(s1, s2)
self.assertAllEqual(u1, u2)
self.assertAllEqual(v1, v2)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def DISABLED_testBadInputs(self):
# TODO(b/185822300): re-enable after the bug is fixed in CUDA-11.x
# The input to svd should be a tensor of at least rank 2.
for bad_val in [np.nan, np.inf]:
matrix = np.array([[1, bad_val], [0, 1]])
s, u, v = linalg_ops.svd(matrix, compute_uv=True)
s, u, v = self.evaluate([s, u, v])
for i in range(2):
self.assertTrue(np.isnan(s[i]))
for j in range(2):
self.assertTrue(np.isnan(u[i, j]))
self.assertTrue(np.isnan(v[i, j]))
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testExecuteMultipleWithoutError(self):
all_ops = []
shape = [6, 5]
seed = [42, 24]
for compute_uv_ in True, False:
for full_matrices_ in True, False:
matrix1 = stateless_random_ops.stateless_random_normal(shape, seed)
matrix2 = stateless_random_ops.stateless_random_normal(shape, seed)
self.assertAllEqual(matrix1, matrix2)
if compute_uv_:
s1, u1, v1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2, u2, v2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2, u1, u2, v1, v2]
else:
s1 = linalg_ops.svd(
matrix1, compute_uv=compute_uv_, full_matrices=full_matrices_)
s2 = linalg_ops.svd(
matrix2, compute_uv=compute_uv_, full_matrices=full_matrices_)
all_ops += [s1, s2]
val = self.evaluate(all_ops)
for i in range(0, len(val), 2):
self.assertAllEqual(val[i], val[i + 1])
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testEmptyBatches(self):
matrices = constant_op.constant(1.0, shape=[0, 2, 2])
s, u, v = self.evaluate(linalg_ops.svd(matrices))
self.assertAllEqual(s, np.zeros([0, 2]))
self.assertAllEqual(u, np.zeros([0, 2, 2]))
self.assertAllEqual(v, np.zeros([0, 2, 2]))
def _GetSvdOpTest(dtype_, shape_, use_static_shape_, compute_uv_,
full_matrices_):
def CompareSingularValues(self, x, y, tol):
atol = (x[0] + y[0]) * tol if len(x) else tol
self.assertAllClose(x, y, atol=atol)
def CompareSingularVectors(self, x, y, rank, tol):
# We only compare the first 'rank' singular vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Singular vectors are only unique up to sign (complex phase factor for
# complex matrices), so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=2 * tol)
def CheckApproximation(self, a, u, s, v, full_matrices_, tol):
# Tests that a ~= u*diag(s)*transpose(v).
batch_shape = a.shape[:-2]
m = a.shape[-2]
n = a.shape[-1]
diag_s = math_ops.cast(array_ops.matrix_diag(s), dtype=dtype_)
if full_matrices_:
if m > n:
zeros = array_ops.zeros(batch_shape + (m - n, n), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 2)
elif n > m:
zeros = array_ops.zeros(batch_shape + (m, n - m), dtype=dtype_)
diag_s = array_ops.concat([diag_s, zeros], a.ndim - 1)
a_recon = math_ops.matmul(u, diag_s)
a_recon = math_ops.matmul(a_recon, v, adjoint_b=True)
self.assertAllClose(a_recon, a, rtol=tol, atol=tol)
def CheckUnitary(self, x, tol):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
self.assertAllClose(identity, xx, atol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
if not use_static_shape_ and context.executing_eagerly():
return
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
tol = 3e-4 if is_single else 1e-12
if test.is_gpu_available():
# The gpu version returns results that are much less accurate.
tol *= 200
np.random.seed(42)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
if compute_uv_:
s_tf, u_tf, v_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val, u_tf_val, v_tf_val = self.evaluate([s_tf, u_tf, v_tf])
else:
with self.session() as sess:
s_tf_val, u_tf_val, v_tf_val = sess.run([s_tf, u_tf, v_tf],
feed_dict={x_tf: x_np})
else:
s_tf = linalg_ops.svd(
x_tf, compute_uv=compute_uv_, full_matrices=full_matrices_)
if use_static_shape_:
s_tf_val = self.evaluate(s_tf)
else:
with self.session() as sess:
s_tf_val = sess.run(s_tf, feed_dict={x_tf: x_np})
if compute_uv_:
u_np, s_np, v_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
else:
s_np = np.linalg.svd(
x_np, compute_uv=compute_uv_, full_matrices=full_matrices_)
# We explicitly avoid the situation where numpy eliminates a first
# dimension that is equal to one.
s_np = np.reshape(s_np, s_tf_val.shape)
CompareSingularValues(self, s_np, s_tf_val, tol)
if compute_uv_:
CompareSingularVectors(self, u_np, u_tf_val, min(shape_[-2:]), tol)
CompareSingularVectors(self, np.conj(np.swapaxes(v_np, -2, -1)), v_tf_val,
min(shape_[-2:]), tol)
CheckApproximation(self, x_np, u_tf_val, s_tf_val, v_tf_val,
full_matrices_, tol)
CheckUnitary(self, u_tf_val, tol)
CheckUnitary(self, v_tf_val, tol)
return Test
| SvdOpTest |
python | django__django | tests/fixtures_regress/models.py | {
"start": 2091,
"end": 2422
} | class ____(models.Model):
name = models.CharField(max_length=255, unique=True)
main = models.ForeignKey("self", models.SET_NULL, null=True)
objects = TestManager()
class Meta:
ordering = ("name",)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
| Store |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis11.py | {
"start": 315,
"end": 1391
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis11.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [45705472, 54518528]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"min": 0, "max": 20})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | google__jax | jax/_src/dispatch.py | {
"start": 6140,
"end": 14337
} | class ____:
__slots__ = ['fmt', 'fun_name', 'event', 'start_time']
def __init__(self, fmt: str, fun_name: str, event: str | None = None):
self.fmt = fmt
self.fun_name = fun_name
self.event = event
def __enter__(self):
self.start_time = time.time()
if self.event is not None:
record_scalar(
self.event, self.start_time, fun_name=self.fun_name
)
def __exit__(self, exc_type, exc_value, traceback):
if _on_exit:
return
end_time = time.time()
elapsed_time = end_time - self.start_time
log_priority = logging.WARNING if config.log_compiles.value else logging.DEBUG
if logger.isEnabledFor(log_priority):
logger.log(log_priority, self.fmt.format(
fun_name=self.fun_name, elapsed_time=elapsed_time))
if self.event is not None:
record_event_duration_secs(
self.event, elapsed_time, fun_name=self.fun_name
)
record_event_time_span(
self.event, self.start_time, end_time, fun_name=self.fun_name
)
log_elapsed_time = LogElapsedTimeContextManager
def should_tuple_args(num_args: int, platform: str) -> bool:
# CPU and GPU do not need tuples as they use host-side data structures that
# do not have small bounds.
# TPU only needs a tuple for very long lists
if platform == "tpu":
return num_args > 2000
else:
return False
def jaxpr_has_primitive(jaxpr: core.Jaxpr, prim_name: str) -> bool:
"""Whether there is a primitive given by user anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if prim_name in eqn.primitive.name:
return True
for subjaxpr in core.subjaxprs(jaxpr):
if jaxpr_has_primitive(subjaxpr, prim_name):
return True
return False
# Use this registry with caution. It will void the guarantee that lowering to
# stablehlo is oblivious of physical devices.
prim_requires_devices_during_lowering: set[core.Primitive] = set()
@util.weakref_lru_cache
def jaxpr_has_prim_requiring_devices(jaxpr: core.Jaxpr) -> bool:
for eqn in jaxpr.eqns:
if eqn.primitive in prim_requires_devices_during_lowering:
return True
for subjaxpr in core.subjaxprs(jaxpr):
if jaxpr_has_prim_requiring_devices(subjaxpr):
return True
return False
@util.weakref_lru_cache
def get_intermediate_shardings(
jaxpr: core.Jaxpr) -> Sequence[tuple[Sharding, SourceInfo]]:
from jax._src import shard_map # pytype: disable=import-error
out = []
for eqn in jaxpr.eqns:
if eqn.primitive is pjit.sharding_constraint_p:
s = eqn.params['sharding']
if isinstance(s, NamedSharding) and isinstance(s.mesh, AbstractMesh):
continue
source_info = SourceInfo(eqn.source_info, eqn.primitive.name)
out.append((s, source_info))
elif eqn.primitive is pjit.jit_p:
source_info = SourceInfo(eqn.source_info, eqn.primitive.name)
out.extend((i, source_info) for i in eqn.params['in_shardings'])
out.extend((o, source_info) for o in eqn.params['out_shardings'])
elif eqn.primitive is shard_map.shard_map_p:
mesh = eqn.params['mesh']
if isinstance(mesh, AbstractMesh):
continue
source_info = SourceInfo(eqn.source_info, eqn.primitive.name)
out.extend((NamedSharding(mesh, spec), source_info)
for spec in [*eqn.params['in_specs'], *eqn.params['out_specs']])
elif eqn.primitive is device_put_p:
source_info = SourceInfo(eqn.source_info, eqn.primitive.name)
out.extend((s, source_info) for s in eqn.params['devices']
if isinstance(s, Sharding) and s.memory_kind is not None)
for subjaxpr in core.subjaxprs(jaxpr):
out.extend(get_intermediate_shardings(subjaxpr))
return out
def jaxpr_has_bints(jaxpr: core.Jaxpr) -> bool:
return (any(type(v.aval.dtype) is core.bint for v in jaxpr.invars
if isinstance(v.aval, (core.ShapedArray, core.DShapedArray))) or
any(_is_bint_axis_size(d)
for j in itertools.chain([jaxpr], core.subjaxprs(jaxpr))
for e in j.eqns for v in e.outvars
if isinstance(v.aval, core.DShapedArray) for d in v.aval.shape))
def _is_bint_axis_size(d: core.AxisSize) -> bool:
if isinstance(d, core.DArray):
assert not d.shape
return type(d.dtype) is core.bint
elif isinstance(d, core.Var):
return (isinstance(d.aval, core.DShapedArray) and
type(d.aval.dtype) is core.bint)
return False
def check_arg(arg: Any):
if not (isinstance(arg, core.Tracer) or core.valid_jaxtype(arg)):
raise TypeError(f"Argument '{arg}' of type {type(arg)} is not a valid "
"JAX type.")
def needs_check_special() -> bool:
return config.debug_infs.value or config.debug_nans.value
def check_special(name: str, bufs: Sequence[basearray.Array]) -> None:
if needs_check_special():
for buf in bufs:
_check_special(name, buf.dtype, buf)
def _check_special(name: str, dtype: np.dtype, buf: basearray.Array) -> None:
if dtypes.issubdtype(dtype, np.inexact):
if config.debug_nans.value and np.any(np.isnan(np.asarray(buf))):
raise InternalFloatingPointError(name, "nan")
if config.debug_infs.value and np.any(np.isinf(np.asarray(buf))):
raise InternalFloatingPointError(name, "inf")
def _identity_fn(x):
return x
def _different_device_order_reshard(
x: array.ArrayImpl, target_sharding: NamedSharding, copy: ArrayCopySemantics
) -> array.ArrayImpl:
x._check_if_deleted()
inp_sharding = x.sharding
assert isinstance(inp_sharding, NamedSharding)
donate_argnums = 0 if copy == ArrayCopySemantics.DONATE_INPUT else None
if inp_sharding._device_assignment == target_sharding._device_assignment:
return api.jit(_identity_fn, out_shardings=target_sharding,
donate_argnums=donate_argnums)(x)
if inp_sharding.is_fully_replicated:
permute_order = None
else:
permute_order = np.vectorize(target_sharding._device_assignment.index,
otypes=[int])(inp_sharding._device_assignment)
new_mesh = Mesh(
target_sharding.mesh.devices.reshape(inp_sharding.mesh.axis_sizes),
inp_sharding.mesh.axis_names)
new_s = NamedSharding(
new_mesh, inp_sharding.spec, memory_kind=target_sharding.memory_kind,
_logical_device_ids=(None if permute_order is None else
tuple(permute_order.tolist())))
new_x = xc.reorder_shards(x, new_s, ArrayCopySemantics.REUSE_INPUT) # type: ignore
return api.jit(_identity_fn, out_shardings=target_sharding,
donate_argnums=donate_argnums)(new_x)
@util.cache(max_size=2048, trace_context_in_key=False)
def _is_supported_cross_host_transfer(ndim, src_sharding, dst_sharding):
"""Returns True if src->dst is a supported cross-host transfer."""
if (src_sharding._internal_device_list.device_kind !=
dst_sharding._internal_device_list.device_kind):
return False
if (src_sharding._to_xla_hlo_sharding(ndim) !=
dst_sharding._to_xla_hlo_sharding(ndim)):
return False
# This check excludes the case where the source and destination shardings
# have the same process index sets but there are shards that require
# cross-host transfers. This case is supportable but expensive to check for.
different_process_inds = (
src_sharding._internal_device_list.process_indices !=
dst_sharding._internal_device_list.process_indices)
backend = xla_bridge.get_backend()
# If a cross-host device transfer is requested but the backend does not
# support it, then the user must set the flags to enable DCN-based transfers.
if (different_process_inds and
not getattr(backend, 'supports_cross_host_transfers', False) and
not xla_bridge.CROSS_HOST_TRANSFER_SOCKET_ADDRESS.value):
raise ValueError(
f"The backend ({backend.platform}, {backend.platform_version}) does "
"not support cross-host device transfers via ICI/NCCL. Please set "
"jax_cross_host_transfer_socket_address and (optionally) "
"jax_cross_host_transport_addresses flags to enable DCN-based cross "
"host device transfers.")
return different_process_inds
@dataclasses.dataclass(frozen=True)
| LogElapsedTimeContextManager |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 12398,
"end": 12961
} | class ____(Token):
""" The AST equivalence of Python's NoneType
The corresponding instance of Python's ``None`` is ``none``.
Examples
========
>>> from sympy.codegen.ast import none, Variable
>>> from sympy import pycode
>>> print(pycode(Variable('x').as_Declaration(value=none)))
x = None
"""
def __eq__(self, other):
return other is None or isinstance(other, NoneToken)
def _hashable_content(self):
return ()
def __hash__(self):
return super().__hash__()
none = NoneToken()
| NoneToken |
python | huggingface__transformers | src/transformers/models/qwen3/modeling_qwen3.py | {
"start": 19960,
"end": 23388
} | class ____(Qwen3PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Qwen3Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Qwen3ForCausalLM
>>> model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-8B")
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-8B")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Qwen3ForCausalLM |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_template.py | {
"start": 5247,
"end": 5598
} | class ____(FigureManagerBase):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle.
For non-interactive backends, the base class is sufficient. For
interactive backends, see the documentation of the `.FigureManagerBase`
class for the list of methods that can/should be overridden.
"""
| FigureManagerTemplate |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/callbackProtocol1.py | {
"start": 830,
"end": 1343
} | class ____(Protocol):
def __call__(self, *vals: bytes, **kwargs: str) -> None:
pass
def func1(*a: bytes, **b: str):
pass
def func2(*a: bytes):
pass
def func3(*a: str, **b: str):
pass
def func4(*a: bytes, **b: bytes):
pass
def func5(**b: str):
pass
var2: TestClass2 = func1
# This should generate an error.
var2 = func2
# This should generate an error.
var2 = func3
# This should generate an error.
var2 = func4
# This should generate an error.
var2 = func5
| TestClass2 |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 38666,
"end": 40506
} | class ____(TestCase):
def test_qs_exists_handles_type_error(self):
class TypeErrorQueryset:
def exists(self):
raise TypeError
assert qs_exists(TypeErrorQueryset()) is False
def test_qs_exists_handles_value_error(self):
class ValueErrorQueryset:
def exists(self):
raise ValueError
assert qs_exists(ValueErrorQueryset()) is False
def test_qs_exists_handles_data_error(self):
class DataErrorQueryset:
def exists(self):
raise DataError
assert qs_exists(DataErrorQueryset()) is False
def test_validator_raises_error_if_not_all_fields_are_provided(self):
validator = BaseUniqueForValidator(queryset=object(), field='foo',
date_field='bar')
attrs = {'foo': 'baz'}
with pytest.raises(ValidationError):
validator.enforce_required_fields(attrs)
def test_validator_raises_error_when_abstract_method_called(self):
validator = BaseUniqueForValidator(queryset=object(), field='foo',
date_field='bar')
with pytest.raises(NotImplementedError):
validator.filter_queryset(
attrs=None, queryset=None, field_name='', date_field_name=''
)
def test_equality_operator(self):
mock_queryset = MagicMock()
validator = BaseUniqueForValidator(queryset=mock_queryset, field='foo',
date_field='bar')
validator2 = BaseUniqueForValidator(queryset=mock_queryset, field='foo',
date_field='bar')
assert validator == validator2
validator2.date_field = "bar2"
assert validator != validator2
| ValidatorsTests |
python | spyder-ide__spyder | spyder/api/widgets/toolbars.py | {
"start": 1150,
"end": 1337
} | class ____:
Top = Qt.TopToolBarArea
Bottom = Qt.BottomToolBarArea
# ---- Event filters
# ----------------------------------------------------------------------------
| ToolbarLocation |
python | doocs__leetcode | lcof2/剑指 Offer II 119. 最长连续序列/Solution.py | {
"start": 0,
"end": 402
} | class ____:
def longestConsecutive(self, nums: List[int]) -> int:
n = len(nums)
if n < 2:
return n
nums.sort()
ans = t = 1
for a, b in pairwise(nums):
if a == b:
continue
if a + 1 == b:
t += 1
ans = max(ans, t)
else:
t = 1
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/qwen3_moe/modeling_qwen3_moe.py | {
"start": 32164,
"end": 32561
} | class ____(GenericForQuestionAnswering, Qwen3MoePreTrainedModel):
base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
__all__ = [
"Qwen3MoeForCausalLM",
"Qwen3MoeForQuestionAnswering",
"Qwen3MoeModel",
"Qwen3MoePreTrainedModel",
"Qwen3MoeForSequenceClassification",
"Qwen3MoeForTokenClassification",
]
| Qwen3MoeForQuestionAnswering |
python | graphql-python__graphene | graphene/relay/tests/test_mutation_async.py | {
"start": 376,
"end": 670
} | class ____(ClientIDMutation):
class Input:
what = String()
phrase = String()
@staticmethod
async def mutate_and_get_payload(self, info, what, client_mutation_id=None):
return SaySomethingAsync(phrase=str(what))
# MyEdge = MyNode.Connection.Edge
| SaySomethingAsync |
python | bokeh__bokeh | examples/advanced/extensions/gears/gear.py | {
"start": 186,
"end": 1905
} | class ____(Glyph):
""" Render gears.
The details and nomenclature concerning gear construction can
be quite involved. For more information, consult the `Wikipedia
article for Gear`_.
.. _Wikipedia article for Gear: http://en.wikipedia.org/wiki/Gear
"""
__view_module__ = "gears"
x = NumberSpec(help="""
The x-coordinates of the center of the gears.
""")
y = NumberSpec(help="""
The y-coordinates of the center of the gears.
""")
angle = AngleSpec(default=0, help="""
The angle the gears are rotated from horizontal. [rad]
""")
module = NumberSpec(help="""
A scaling factor, given by::
m = p / pi
where *p* is the circular pitch, defined as the distance from one
face of a tooth to the corresponding face of an adjacent tooth on
the same gear, measured along the pitch circle. [float]
""")
teeth = NumberSpec(help="""
How many teeth the gears have. [int]
""")
pressure_angle = NumberSpec(default=20, help="""
The complement of the angle between the direction that the teeth
exert force on each other, and the line joining the centers of the
two gears. [deg]
""")
shaft_size = NumberSpec(default=0.3, help="""
The central gear shaft size as a percentage of the overall gear
size. [float]
""")
internal = BoolSpec(default=False, help="""
Whether the gear teeth are internal. [bool]
""")
line_props = Include(LineProps, help="""
The %s values for the gears.
""")
fill_props = Include(FillProps, help="""
The %s values for the gears.
""")
hatch_props = Include(HatchProps, help="""
The %s values for the gears.
""")
| Gear |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 7864,
"end": 7979
} | class ____(BaseModel):
status: Literal[
"disabled",
] = Field(..., description="")
| ClusterStatusOneOf |
python | pydantic__pydantic | pydantic/fields.py | {
"start": 3453,
"end": 38654
} | class ____(_repr.Representation):
"""This class holds information about a field.
`FieldInfo` is used for any field definition regardless of whether the [`Field()`][pydantic.fields.Field]
function is explicitly used.
!!! warning
The `FieldInfo` class is meant to expose information about a field in a Pydantic model or dataclass.
`FieldInfo` instances shouldn't be instantiated directly, nor mutated.
If you need to derive a new model from another one and are willing to alter `FieldInfo` instances,
refer to this [dynamic model example](../examples/dynamic_models.md).
Attributes:
annotation: The type annotation of the field.
default: The default value of the field.
default_factory: A callable to generate the default value. The callable can either take 0 arguments
(in which case it is called as is) or a single argument containing the already validated data.
alias: The alias name of the field.
alias_priority: The priority of the field's alias.
validation_alias: The validation alias of the field.
serialization_alias: The serialization alias of the field.
title: The title of the field.
field_title_generator: A callable that takes a field name and returns title for it.
description: The description of the field.
examples: List of examples of the field.
exclude: Whether to exclude the field from the model serialization.
exclude_if: A callable that determines whether to exclude a field during serialization based on its value.
discriminator: Field name or Discriminator for discriminating the type in a tagged union.
deprecated: A deprecation message, an instance of `warnings.deprecated` or the `typing_extensions.deprecated` backport,
or a boolean. If `True`, a default deprecation message will be emitted when accessing the field.
json_schema_extra: A dict or callable to provide extra JSON schema properties.
frozen: Whether the field is frozen.
validate_default: Whether to validate the default value of the field.
repr: Whether to include the field in representation of the model.
init: Whether the field should be included in the constructor of the dataclass.
init_var: Whether the field should _only_ be included in the constructor of the dataclass, and not stored.
kw_only: Whether the field should be a keyword-only argument in the constructor of the dataclass.
metadata: The metadata list. Contains all the data that isn't expressed as direct `FieldInfo` attributes, including:
* Type-specific constraints, such as `gt` or `min_length` (these are converted to metadata classes such as `annotated_types.Gt`).
* Any other arbitrary object used within [`Annotated`][typing.Annotated] metadata
(e.g. [custom types handlers](../concepts/types.md#as-an-annotation) or any object not recognized by Pydantic).
"""
# TODO PEP 747: use TypeForm:
annotation: type[Any] | None
default: Any
default_factory: Callable[[], Any] | Callable[[dict[str, Any]], Any] | None
alias: str | None
alias_priority: int | None
validation_alias: str | AliasPath | AliasChoices | None
serialization_alias: str | None
title: str | None
field_title_generator: Callable[[str, FieldInfo], str] | None
description: str | None
examples: list[Any] | None
exclude: bool | None
exclude_if: Callable[[Any], bool] | None
discriminator: str | types.Discriminator | None
deprecated: Deprecated | str | bool | None
json_schema_extra: JsonDict | Callable[[JsonDict], None] | None
frozen: bool | None
validate_default: bool | None
repr: bool
init: bool | None
init_var: bool | None
kw_only: bool | None
metadata: list[Any]
__slots__ = (
'annotation',
'default',
'default_factory',
'alias',
'alias_priority',
'validation_alias',
'serialization_alias',
'title',
'field_title_generator',
'description',
'examples',
'exclude',
'exclude_if',
'discriminator',
'deprecated',
'json_schema_extra',
'frozen',
'validate_default',
'repr',
'init',
'init_var',
'kw_only',
'metadata',
'_attributes_set',
'_qualifiers',
'_complete',
'_original_assignment',
'_original_annotation',
'_final',
)
# used to convert kwargs to metadata/constraints,
# None has a special meaning - these items are collected into a `PydanticGeneralMetadata`
metadata_lookup: ClassVar[dict[str, Callable[[Any], Any] | None]] = {
'strict': types.Strict,
'gt': annotated_types.Gt,
'ge': annotated_types.Ge,
'lt': annotated_types.Lt,
'le': annotated_types.Le,
'multiple_of': annotated_types.MultipleOf,
'min_length': annotated_types.MinLen,
'max_length': annotated_types.MaxLen,
'pattern': None,
'allow_inf_nan': None,
'max_digits': None,
'decimal_places': None,
'union_mode': None,
'coerce_numbers_to_str': None,
'fail_fast': types.FailFast,
}
def __init__(self, **kwargs: Unpack[_FieldInfoInputs]) -> None:
"""This class should generally not be initialized directly; instead, use the `pydantic.fields.Field` function
or one of the constructor classmethods.
See the signature of `pydantic.fields.Field` for more details about the expected arguments.
"""
# Tracking the explicitly set attributes is necessary to correctly merge `Field()` functions
# (e.g. with `Annotated[int, Field(alias='a'), Field(alias=None)]`, even though `None` is the default value,
# we need to track that `alias=None` was explicitly set):
self._attributes_set = {k: v for k, v in kwargs.items() if v is not _Unset and k not in self.metadata_lookup}
kwargs = {k: _DefaultValues.get(k) if v is _Unset else v for k, v in kwargs.items()} # type: ignore
self.annotation = kwargs.get('annotation')
# Note: in theory, the second `pop()` arguments are not required below, as defaults are already set from `_DefaultsValues`.
default = kwargs.pop('default', PydanticUndefined)
if default is Ellipsis:
self.default = PydanticUndefined
self._attributes_set.pop('default', None)
else:
self.default = default
self.default_factory = kwargs.pop('default_factory', None)
if self.default is not PydanticUndefined and self.default_factory is not None:
raise TypeError('cannot specify both default and default_factory')
self.alias = kwargs.pop('alias', None)
self.validation_alias = kwargs.pop('validation_alias', None)
self.serialization_alias = kwargs.pop('serialization_alias', None)
alias_is_set = any(alias is not None for alias in (self.alias, self.validation_alias, self.serialization_alias))
self.alias_priority = kwargs.pop('alias_priority', None) or 2 if alias_is_set else None
self.title = kwargs.pop('title', None)
self.field_title_generator = kwargs.pop('field_title_generator', None)
self.description = kwargs.pop('description', None)
self.examples = kwargs.pop('examples', None)
self.exclude = kwargs.pop('exclude', None)
self.exclude_if = kwargs.pop('exclude_if', None)
self.discriminator = kwargs.pop('discriminator', None)
# For compatibility with FastAPI<=0.110.0, we preserve the existing value if it is not overridden
self.deprecated = kwargs.pop('deprecated', getattr(self, 'deprecated', None))
self.repr = kwargs.pop('repr', True)
self.json_schema_extra = kwargs.pop('json_schema_extra', None)
self.validate_default = kwargs.pop('validate_default', None)
self.frozen = kwargs.pop('frozen', None)
# currently only used on dataclasses
self.init = kwargs.pop('init', None)
self.init_var = kwargs.pop('init_var', None)
self.kw_only = kwargs.pop('kw_only', None)
self.metadata = self._collect_metadata(kwargs) # type: ignore
# Private attributes:
self._qualifiers: set[Qualifier] = set()
# Used to rebuild FieldInfo instances:
self._complete = True
self._original_annotation: Any = PydanticUndefined
self._original_assignment: Any = PydanticUndefined
# Used to track whether the `FieldInfo` instance represents the data about a field (and is exposed in `model_fields`/`__pydantic_fields__`),
# or if it is the result of the `Field()` function being used as metadata in an `Annotated` type/as an assignment
# (not an ideal pattern, see https://github.com/pydantic/pydantic/issues/11122):
self._final = False
@staticmethod
def from_field(default: Any = PydanticUndefined, **kwargs: Unpack[_FromFieldInfoInputs]) -> FieldInfo:
"""Create a new `FieldInfo` object with the `Field` function.
Args:
default: The default value for the field. Defaults to Undefined.
**kwargs: Additional arguments dictionary.
Raises:
TypeError: If 'annotation' is passed as a keyword argument.
Returns:
A new FieldInfo object with the given parameters.
Example:
This is how you can create a field with default value like this:
```python
import pydantic
class MyModel(pydantic.BaseModel):
foo: int = pydantic.Field(4)
```
"""
if 'annotation' in kwargs:
raise TypeError('"annotation" is not permitted as a Field keyword argument')
return FieldInfo(default=default, **kwargs)
@staticmethod
def from_annotation(annotation: type[Any], *, _source: AnnotationSource = AnnotationSource.ANY) -> FieldInfo:
"""Creates a `FieldInfo` instance from a bare annotation.
This function is used internally to create a `FieldInfo` from a bare annotation like this:
```python
import pydantic
class MyModel(pydantic.BaseModel):
foo: int # <-- like this
```
We also account for the case where the annotation can be an instance of `Annotated` and where
one of the (not first) arguments in `Annotated` is an instance of `FieldInfo`, e.g.:
```python
from typing import Annotated
import annotated_types
import pydantic
class MyModel(pydantic.BaseModel):
foo: Annotated[int, annotated_types.Gt(42)]
bar: Annotated[int, pydantic.Field(gt=42)]
```
Args:
annotation: An annotation object.
Returns:
An instance of the field metadata.
"""
try:
inspected_ann = inspect_annotation(
annotation,
annotation_source=_source,
unpack_type_aliases='skip',
)
except ForbiddenQualifier as e:
raise PydanticForbiddenQualifier(e.qualifier, annotation)
# TODO check for classvar and error?
# No assigned value, this happens when using a bare `Final` qualifier (also for other
# qualifiers, but they shouldn't appear here). In this case we infer the type as `Any`
# because we don't have any assigned value.
type_expr: Any = Any if inspected_ann.type is UNKNOWN else inspected_ann.type
final = 'final' in inspected_ann.qualifiers
metadata = inspected_ann.metadata
attr_overrides = {'annotation': type_expr}
if final:
attr_overrides['frozen'] = True
field_info = FieldInfo._construct(metadata, **attr_overrides)
field_info._qualifiers = inspected_ann.qualifiers
field_info._final = True
return field_info
@staticmethod
def from_annotated_attribute(
annotation: type[Any], default: Any, *, _source: AnnotationSource = AnnotationSource.ANY
) -> FieldInfo:
"""Create `FieldInfo` from an annotation with a default value.
This is used in cases like the following:
```python
from typing import Annotated
import annotated_types
import pydantic
class MyModel(pydantic.BaseModel):
foo: int = 4 # <-- like this
bar: Annotated[int, annotated_types.Gt(4)] = 4 # <-- or this
spam: Annotated[int, pydantic.Field(gt=4)] = 4 # <-- or this
```
Args:
annotation: The type annotation of the field.
default: The default value of the field.
Returns:
A field object with the passed values.
"""
if annotation is not MISSING and annotation is default:
raise PydanticUserError(
'Error when building FieldInfo from annotated attribute. '
"Make sure you don't have any field name clashing with a type annotation.",
code='unevaluable-type-annotation',
)
try:
inspected_ann = inspect_annotation(
annotation,
annotation_source=_source,
unpack_type_aliases='skip',
)
except ForbiddenQualifier as e:
raise PydanticForbiddenQualifier(e.qualifier, annotation)
# TODO check for classvar and error?
# TODO infer from the default, this can be done in v3 once we treat final fields with
# a default as proper fields and not class variables:
type_expr: Any = Any if inspected_ann.type is UNKNOWN else inspected_ann.type
final = 'final' in inspected_ann.qualifiers
metadata = inspected_ann.metadata
# HACK 1: the order in which the metadata is merged is inconsistent; we need to prepend
# metadata from the assignment at the beginning of the metadata. Changing this is only
# possible in v3 (at least). See https://github.com/pydantic/pydantic/issues/10507
prepend_metadata: list[Any] | None = None
attr_overrides = {'annotation': type_expr}
if final:
attr_overrides['frozen'] = True
# HACK 2: FastAPI is subclassing `FieldInfo` and historically expected the actual
# instance's type to be preserved when constructing new models with its subclasses as assignments.
# This code is never reached by Pydantic itself, and in an ideal world this shouldn't be necessary.
if not metadata and isinstance(default, FieldInfo) and type(default) is not FieldInfo:
field_info = default._copy()
field_info._attributes_set.update(attr_overrides)
for k, v in attr_overrides.items():
setattr(field_info, k, v)
return field_info
if isinstance(default, FieldInfo):
default_copy = default._copy() # Copy unnecessary when we remove HACK 1.
prepend_metadata = default_copy.metadata
default_copy.metadata = []
metadata = metadata + [default_copy]
if 'init_var' in inspected_ann.qualifiers:
# Only relevant for dataclasses, when `f: InitVar[<type>] = Field(...)`
# is used:
attr_overrides['init_var'] = True
elif isinstance(default, dataclasses.Field):
from_field = FieldInfo._from_dataclass_field(default)
prepend_metadata = from_field.metadata # Unnecessary when we remove HACK 1.
from_field.metadata = []
metadata = metadata + [from_field]
if 'init_var' in inspected_ann.qualifiers:
attr_overrides['init_var'] = True
if (init := getattr(default, 'init', None)) is not None:
attr_overrides['init'] = init
if (kw_only := getattr(default, 'kw_only', None)) is not None:
attr_overrides['kw_only'] = kw_only
else:
# `default` is the actual default value
attr_overrides['default'] = default
field_info = FieldInfo._construct(
prepend_metadata + metadata if prepend_metadata is not None else metadata, **attr_overrides
)
field_info._qualifiers = inspected_ann.qualifiers
field_info._final = True
return field_info
@classmethod
def _construct(cls, metadata: list[Any], **attr_overrides: Any) -> Self:
"""Construct the final `FieldInfo` instance, by merging the possibly existing `FieldInfo` instances from the metadata.
With the following example:
```python {test="skip" lint="skip"}
class Model(BaseModel):
f: Annotated[int, Gt(1), Field(description='desc', lt=2)]
```
`metadata` refers to the metadata elements of the `Annotated` form. This metadata is iterated over from left to right:
- If the element is a `Field()` function (which is itself a `FieldInfo` instance), the field attributes (such as
`description`) are saved to be set on the final `FieldInfo` instance.
On the other hand, some kwargs (such as `lt`) are stored as `metadata` (see `FieldInfo.__init__()`, calling
`FieldInfo._collect_metadata()`). In this case, the final metadata list is extended with the one from this instance.
- Else, the element is considered as a single metadata object, and is appended to the final metadata list.
Args:
metadata: The list of metadata elements to merge together. If the `FieldInfo` instance to be constructed is for
a field with an assigned `Field()`, this `Field()` assignment should be added as the last element of the
provided metadata.
**attr_overrides: Extra attributes that should be set on the final merged `FieldInfo` instance.
Returns:
The final merged `FieldInfo` instance.
"""
merged_metadata: list[Any] = []
merged_kwargs: dict[str, Any] = {}
for meta in metadata:
if isinstance(meta, FieldInfo):
merged_metadata.extend(meta.metadata)
new_js_extra: JsonDict | None = None
current_js_extra = meta.json_schema_extra
if current_js_extra is not None and 'json_schema_extra' in merged_kwargs:
# We need to merge `json_schema_extra`'s:
existing_js_extra = merged_kwargs['json_schema_extra']
if isinstance(existing_js_extra, dict):
if isinstance(current_js_extra, dict):
new_js_extra = {
**existing_js_extra,
**current_js_extra,
}
elif callable(current_js_extra):
warn(
'Composing `dict` and `callable` type `json_schema_extra` is not supported. '
'The `callable` type is being ignored. '
"If you'd like support for this behavior, please open an issue on pydantic.",
UserWarning,
)
elif callable(existing_js_extra) and isinstance(current_js_extra, dict):
warn(
'Composing `dict` and `callable` type `json_schema_extra` is not supported. '
'The `callable` type is being ignored. '
"If you'd like support for this behavior, please open an issue on pydantic.",
UserWarning,
)
# HACK: It is common for users to define "make model partial" (or similar) utilities, that
# convert all model fields to be optional (i.e. have a default value). To do so, they mutate
# each `FieldInfo` instance from `model_fields` to set a `default`, and use `create_model()`
# with `Annotated[<orig_type> | None, mutated_field_info]`` as an annotation. However, such
# mutations (by doing simple assignments) are only accidentally working, because we also
# need to track attributes explicitly set in `_attributes_set` (relying on default values for
# each attribute is *not* enough, for instance with `Annotated[int, Field(alias='a'), Field(alias=None)]`
# the resulting `FieldInfo` should have `alias=None`).
# To mitigate this, we add a special case when a "final" `FieldInfo` instance (that is an instance coming
# from `model_fields`) is used in annotated metadata (or assignment). In this case, we assume *all* attributes
# were explicitly set, and as such we use all of them (and this will correctly pick up the mutations).
# In theory, this shouldn't really be supported, you are only supposed to use the `Field()` function, not
# a `FieldInfo` instance directly (granted, `Field()` returns a `FieldInfo`, see
# https://github.com/pydantic/pydantic/issues/11122):
if meta._final:
merged_kwargs.update({attr: getattr(meta, attr) for attr in _Attrs})
else:
merged_kwargs.update(meta._attributes_set)
if new_js_extra is not None:
merged_kwargs['json_schema_extra'] = new_js_extra
elif typing_objects.is_deprecated(meta):
merged_kwargs['deprecated'] = meta
else:
merged_metadata.append(meta)
merged_kwargs.update(attr_overrides)
merged_field_info = cls(**merged_kwargs)
merged_field_info.metadata = merged_metadata
return merged_field_info
@staticmethod
@typing_extensions.deprecated(
"The 'merge_field_infos()' method is deprecated and will be removed in a future version. "
'If you relied on this method, please open an issue in the Pydantic issue tracker.',
category=None,
)
def merge_field_infos(*field_infos: FieldInfo, **overrides: Any) -> FieldInfo:
"""Merge `FieldInfo` instances keeping only explicitly set attributes.
Later `FieldInfo` instances override earlier ones.
Returns:
FieldInfo: A merged FieldInfo instance.
"""
if len(field_infos) == 1:
# No merging necessary, but we still need to make a copy and apply the overrides
field_info = field_infos[0]._copy()
field_info._attributes_set.update(overrides)
default_override = overrides.pop('default', PydanticUndefined)
if default_override is Ellipsis:
default_override = PydanticUndefined
if default_override is not PydanticUndefined:
field_info.default = default_override
for k, v in overrides.items():
setattr(field_info, k, v)
return field_info # type: ignore
merged_field_info_kwargs: dict[str, Any] = {}
metadata = {}
for field_info in field_infos:
attributes_set = field_info._attributes_set.copy()
try:
json_schema_extra = attributes_set.pop('json_schema_extra')
existing_json_schema_extra = merged_field_info_kwargs.get('json_schema_extra')
if existing_json_schema_extra is None:
merged_field_info_kwargs['json_schema_extra'] = json_schema_extra
if isinstance(existing_json_schema_extra, dict):
if isinstance(json_schema_extra, dict):
merged_field_info_kwargs['json_schema_extra'] = {
**existing_json_schema_extra,
**json_schema_extra,
}
if callable(json_schema_extra):
warn(
'Composing `dict` and `callable` type `json_schema_extra` is not supported.'
'The `callable` type is being ignored.'
"If you'd like support for this behavior, please open an issue on pydantic.",
PydanticJsonSchemaWarning,
)
elif callable(json_schema_extra):
# if ever there's a case of a callable, we'll just keep the last json schema extra spec
merged_field_info_kwargs['json_schema_extra'] = json_schema_extra
except KeyError:
pass
# later FieldInfo instances override everything except json_schema_extra from earlier FieldInfo instances
merged_field_info_kwargs.update(attributes_set)
for x in field_info.metadata:
if not isinstance(x, FieldInfo):
metadata[type(x)] = x
merged_field_info_kwargs.update(overrides)
field_info = FieldInfo(**merged_field_info_kwargs)
field_info.metadata = list(metadata.values())
return field_info
@staticmethod
def _from_dataclass_field(dc_field: DataclassField[Any]) -> FieldInfo:
"""Return a new `FieldInfo` instance from a `dataclasses.Field` instance.
Args:
dc_field: The `dataclasses.Field` instance to convert.
Returns:
The corresponding `FieldInfo` instance.
Raises:
TypeError: If any of the `FieldInfo` kwargs does not match the `dataclass.Field` kwargs.
"""
default = dc_field.default
if default is dataclasses.MISSING:
default = _Unset
if dc_field.default_factory is dataclasses.MISSING:
default_factory = _Unset
else:
default_factory = dc_field.default_factory
# use the `Field` function so in correct kwargs raise the correct `TypeError`
dc_field_metadata = {k: v for k, v in dc_field.metadata.items() if k in _FIELD_ARG_NAMES}
if sys.version_info >= (3, 14) and dc_field.doc is not None:
dc_field_metadata['description'] = dc_field.doc
return Field(default=default, default_factory=default_factory, repr=dc_field.repr, **dc_field_metadata) # pyright: ignore[reportCallIssue]
@staticmethod
def _collect_metadata(kwargs: dict[str, Any]) -> list[Any]:
"""Collect annotations from kwargs.
Args:
kwargs: Keyword arguments passed to the function.
Returns:
A list of metadata objects - a combination of `annotated_types.BaseMetadata` and
`PydanticMetadata`.
"""
metadata: list[Any] = []
general_metadata = {}
for key, value in list(kwargs.items()):
try:
marker = FieldInfo.metadata_lookup[key]
except KeyError:
continue
del kwargs[key]
if value is not None:
if marker is None:
general_metadata[key] = value
else:
metadata.append(marker(value))
if general_metadata:
metadata.append(_fields.pydantic_general_metadata(**general_metadata))
return metadata
@property
def deprecation_message(self) -> str | None:
"""The deprecation message to be emitted, or `None` if not set."""
if self.deprecated is None:
return None
if isinstance(self.deprecated, bool):
return 'deprecated' if self.deprecated else None
return self.deprecated if isinstance(self.deprecated, str) else self.deprecated.message
@property
def default_factory_takes_validated_data(self) -> bool | None:
"""Whether the provided default factory callable has a validated data parameter.
Returns `None` if no default factory is set.
"""
if self.default_factory is not None:
return _fields.takes_validated_data_argument(self.default_factory)
@overload
def get_default(
self, *, call_default_factory: Literal[True], validated_data: dict[str, Any] | None = None
) -> Any: ...
@overload
def get_default(self, *, call_default_factory: Literal[False] = ...) -> Any: ...
def get_default(self, *, call_default_factory: bool = False, validated_data: dict[str, Any] | None = None) -> Any:
"""Get the default value.
We expose an option for whether to call the default_factory (if present), as calling it may
result in side effects that we want to avoid. However, there are times when it really should
be called (namely, when instantiating a model via `model_construct`).
Args:
call_default_factory: Whether to call the default factory or not.
validated_data: The already validated data to be passed to the default factory.
Returns:
The default value, calling the default factory if requested or `None` if not set.
"""
if self.default_factory is None:
return _utils.smart_deepcopy(self.default)
elif call_default_factory:
if self.default_factory_takes_validated_data:
fac = cast('Callable[[dict[str, Any]], Any]', self.default_factory)
if validated_data is None:
raise ValueError(
"The default factory requires the 'validated_data' argument, which was not provided when calling 'get_default'."
)
return fac(validated_data)
else:
fac = cast('Callable[[], Any]', self.default_factory)
return fac()
else:
return None
def is_required(self) -> bool:
"""Check if the field is required (i.e., does not have a default value or factory).
Returns:
`True` if the field is required, `False` otherwise.
"""
return self.default is PydanticUndefined and self.default_factory is None
def rebuild_annotation(self) -> Any:
"""Attempts to rebuild the original annotation for use in function signatures.
If metadata is present, it adds it to the original annotation using
`Annotated`. Otherwise, it returns the original annotation as-is.
Note that because the metadata has been flattened, the original annotation
may not be reconstructed exactly as originally provided, e.g. if the original
type had unrecognized annotations, or was annotated with a call to `pydantic.Field`.
Returns:
The rebuilt annotation.
"""
if not self.metadata:
return self.annotation
else:
# Annotated arguments must be a tuple
return Annotated[(self.annotation, *self.metadata)] # type: ignore
def apply_typevars_map(
self,
typevars_map: Mapping[TypeVar, Any] | None,
globalns: GlobalsNamespace | None = None,
localns: MappingNamespace | None = None,
) -> None:
"""Apply a `typevars_map` to the annotation.
This method is used when analyzing parametrized generic types to replace typevars with their concrete types.
This method applies the `typevars_map` to the annotation in place.
Args:
typevars_map: A dictionary mapping type variables to their concrete types.
globalns: The globals namespace to use during type annotation evaluation.
localns: The locals namespace to use during type annotation evaluation.
See Also:
pydantic._internal._generics.replace_types is used for replacing the typevars with
their concrete types.
"""
annotation = _generics.replace_types(self.annotation, typevars_map)
annotation, evaluated = _typing_extra.try_eval_type(annotation, globalns, localns)
self.annotation = annotation
if not evaluated:
self._complete = False
self._original_annotation = self.annotation
def asdict(self) -> _FieldInfoAsDict:
"""Return a dictionary representation of the `FieldInfo` instance.
The returned value is a dictionary with three items:
* `annotation`: The type annotation of the field.
* `metadata`: The metadata list.
* `attributes`: A mapping of the remaining `FieldInfo` attributes to their values (e.g. `alias`, `title`).
"""
return {
'annotation': self.annotation,
'metadata': self.metadata,
'attributes': {attr: getattr(self, attr) for attr in _Attrs},
}
def _copy(self) -> Self:
"""Return a copy of the `FieldInfo` instance."""
# Note: we can't define a custom `__copy__()`, as `FieldInfo` is being subclassed
# by some third-party libraries with extra attributes defined (and as `FieldInfo`
# is slotted, we can't make a copy of the `__dict__`).
copied = copy(self)
for attr_name in ('metadata', '_attributes_set', '_qualifiers'):
# Apply "deep-copy" behavior on collections attributes:
value = getattr(copied, attr_name).copy()
setattr(copied, attr_name, value)
return copied
def __repr_args__(self) -> ReprArgs:
yield 'annotation', _repr.PlainRepr(_repr.display_as_type(self.annotation))
yield 'required', self.is_required()
for s in self.__slots__:
# TODO: properly make use of the protocol (https://rich.readthedocs.io/en/stable/pretty.html#rich-repr-protocol)
# By yielding a three-tuple:
if s in (
'annotation',
'_attributes_set',
'_qualifiers',
'_complete',
'_original_assignment',
'_original_annotation',
'_final',
):
continue
elif s == 'metadata' and not self.metadata:
continue
elif s == 'repr' and self.repr is True:
continue
if s == 'frozen' and self.frozen is False:
continue
if s == 'validation_alias' and self.validation_alias == self.alias:
continue
if s == 'serialization_alias' and self.serialization_alias == self.alias:
continue
if s == 'default' and self.default is not PydanticUndefined:
yield 'default', self.default
elif s == 'default_factory' and self.default_factory is not None:
yield 'default_factory', _repr.PlainRepr(_repr.display_as_type(self.default_factory))
else:
value = getattr(self, s)
if value is not None and value is not PydanticUndefined:
yield s, value
| FieldInfo |
python | django__django | tests/user_commands/tests.py | {
"start": 984,
"end": 1920
} | class ____(SimpleTestCase):
def test_unhandled_exceptions(self):
cases = [
StringIO("Hello world"),
TextIOWrapper(BytesIO(b"Hello world")),
]
for out in cases:
with self.subTest(out=out):
wrapper = OutputWrapper(out)
out.close()
unraisable_exceptions = []
def unraisablehook(unraisable):
unraisable_exceptions.append(unraisable)
sys.__unraisablehook__(unraisable)
with mock.patch.object(sys, "unraisablehook", unraisablehook):
del wrapper
self.assertEqual(unraisable_exceptions, [])
# A minimal set of apps to avoid system checks running on all apps.
@override_settings(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"user_commands",
],
)
| OutputWrapperTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/system.py | {
"start": 53562,
"end": 54484
} | class ____:
"""The ``context`` object available to a type check function on a DagsterType."""
def __init__(
self,
run_id: str,
log_manager: DagsterLogManager,
scoped_resources_builder: ScopedResourcesBuilder,
dagster_type: DagsterType,
):
self._run_id = run_id
self._log = log_manager
self._resources = scoped_resources_builder.build(dagster_type.required_resource_keys)
@public
@property
def resources(self) -> "Resources":
"""An object whose attributes contain the resources available to this op."""
return self._resources
@public
@property
def run_id(self) -> str:
"""The id of this job run."""
return self._run_id
@public
@property
def log(self) -> DagsterLogManager:
"""Centralized log dispatch from user code."""
return self._log
@public
| TypeCheckContext |
python | protocolbuffers__protobuf | python/google/protobuf/internal/containers.py | {
"start": 12373,
"end": 15854
} | class ____(MutableMapping[_K, _V]):
"""Simple, type-checked, dict-like container for holding repeated scalars."""
# Disallows assignment to other attributes.
__slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener',
'_entry_descriptor']
def __init__(
self,
message_listener: Any,
key_checker: Any,
value_checker: Any,
entry_descriptor: Any,
) -> None:
"""
Args:
message_listener: A MessageListener implementation.
The ScalarMap will call this object's Modified() method when it
is modified.
key_checker: A type_checkers.ValueChecker instance to run on keys
inserted into this container.
value_checker: A type_checkers.ValueChecker instance to run on values
inserted into this container.
entry_descriptor: The MessageDescriptor of a map entry: key and value.
"""
self._message_listener = message_listener
self._key_checker = key_checker
self._value_checker = value_checker
self._entry_descriptor = entry_descriptor
self._values = {}
def __getitem__(self, key: _K) -> _V:
try:
return self._values[key]
except KeyError:
key = self._key_checker.CheckValue(key)
val = self._value_checker.DefaultValue()
self._values[key] = val
return val
def __contains__(self, item: _K) -> bool:
# We check the key's type to match the strong-typing flavor of the API.
# Also this makes it easier to match the behavior of the C++ implementation.
self._key_checker.CheckValue(item)
return item in self._values
@overload
def get(self, key: _K) -> Optional[_V]:
...
@overload
def get(self, key: _K, default: _T) -> Union[_V, _T]:
...
# We need to override this explicitly, because our defaultdict-like behavior
# will make the default implementation (from our base class) always insert
# the key.
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def __setitem__(self, key: _K, value: _V) -> _T:
checked_key = self._key_checker.CheckValue(key)
checked_value = self._value_checker.CheckValue(value)
self._values[checked_key] = checked_value
self._message_listener.Modified()
def __delitem__(self, key: _K) -> None:
del self._values[key]
self._message_listener.Modified()
def __len__(self) -> int:
return len(self._values)
def __iter__(self) -> Iterator[_K]:
return iter(self._values)
def __repr__(self) -> str:
return repr(self._values)
def setdefault(self, key: _K, value: Optional[_V] = None) -> _V:
if value == None:
raise ValueError('The value for scalar map setdefault must be set.')
if key not in self._values:
self.__setitem__(key, value)
return self[key]
def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None:
self._values.update(other._values)
self._message_listener.Modified()
def InvalidateIterators(self) -> None:
# It appears that the only way to reliably invalidate iterators to
# self._values is to ensure that its size changes.
original = self._values
self._values = original.copy()
original[None] = None
# This is defined in the abstract base, but we can do it much more cheaply.
def clear(self) -> None:
self._values.clear()
self._message_listener.Modified()
def GetEntryClass(self) -> Any:
return self._entry_descriptor._concrete_class
| ScalarMap |
python | joblib__joblib | joblib/externals/loky/cloudpickle_wrapper.py | {
"start": 119,
"end": 819
} | class ____:
def __init__(self, obj, keep_wrapper=False):
self._obj = obj
self._keep_wrapper = keep_wrapper
def __reduce__(self):
_pickled_object = dumps(self._obj)
if not self._keep_wrapper:
return loads, (_pickled_object,)
return _reconstruct_wrapper, (_pickled_object, self._keep_wrapper)
def __getattr__(self, attr):
# Ensure that the wrapped object can be used seemlessly as the
# previous object.
if attr not in ["_obj", "_keep_wrapper"]:
return getattr(self._obj, attr)
return getattr(self, attr)
# Make sure the wrapped object conserves the callable property
| CloudpickledObjectWrapper |
python | tensorflow__tensorflow | tensorflow/python/data/ops/unique_op.py | {
"start": 1072,
"end": 1915
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""A dataset containing the unique elements of an input dataset."""
def __init__(self, input_dataset, name=None):
"""See `tf.data.Dataset.unique` for details."""
self._input_dataset = input_dataset
for ty in nest.flatten(dataset_ops.get_legacy_output_types(input_dataset)):
if ty not in (dtypes.int32, dtypes.int64, dtypes.string):
raise TypeError(
f"`tf.data.Dataset.unique` does not support type {ty} -- only "
f"`tf.int32`, `tf.int64`, and `tf.string` are supported.")
self._name = name
variant_tensor = gen_experimental_dataset_ops.unique_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._common_args)
super().__init__(input_dataset, variant_tensor)
| _UniqueDataset |
python | chroma-core__chroma | chromadb/ingest/__init__.py | {
"start": 1061,
"end": 2512
} | class ____(Component):
"""Interface for writing embeddings to an ingest stream"""
@abstractmethod
def delete_log(self, collection_id: UUID) -> None:
pass
@abstractmethod
def purge_log(self, collection_id: UUID) -> None:
"""Truncates the log for the given collection, removing all seen records."""
pass
@abstractmethod
def submit_embedding(
self, collection_id: UUID, embedding: OperationRecord
) -> SeqId:
"""Add an embedding record to the given collections log. Returns the SeqID of the record."""
pass
@abstractmethod
def submit_embeddings(
self, collection_id: UUID, embeddings: Sequence[OperationRecord]
) -> Sequence[SeqId]:
"""Add a batch of embedding records to the given collections log. Returns the SeqIDs of
the records. The returned SeqIDs will be in the same order as the given
SubmitEmbeddingRecords. However, it is not guaranteed that the SeqIDs will be
processed in the same order as the given SubmitEmbeddingRecords. If the number
of records exceeds the maximum batch size, an exception will be thrown."""
pass
@property
@abstractmethod
def max_batch_size(self) -> int:
"""Return the maximum number of records that can be submitted in a single call
to submit_embeddings."""
pass
ConsumerCallbackFn = Callable[[Sequence[LogRecord]], None]
| Producer |
python | tensorflow__tensorflow | tensorflow/compiler/tests/reverse_sequence_op_test.py | {
"start": 946,
"end": 3362
} | class ____(xla_test.XLATestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
with self.session():
p = array_ops.placeholder(dtypes.as_dtype(x.dtype))
lengths = array_ops.placeholder(dtypes.as_dtype(seq_lengths.dtype))
with self.test_scope():
ans = array_ops.reverse_sequence(
p, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=lengths)
if expected_err_re is None:
tf_ans = ans.eval(feed_dict={p: x, lengths: seq_lengths})
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval(feed_dict={p: x, lengths: seq_lengths})
def testSimple(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
expected = np.array([[1, 2, 3], [6, 5, 4], [8, 7, 9]], dtype=np.int32)
self._testReverseSequence(
x,
batch_axis=0,
seq_axis=1,
seq_lengths=np.array([1, 3, 2], np.int32),
truth=expected)
def _testBasic(self, dtype, len_dtype):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLength(self):
for dtype in self.all_types:
for seq_dtype in self.all_types & {np.int32, np.int64}:
self._testBasic(dtype, seq_dtype)
if __name__ == "__main__":
test.main()
| ReverseSequenceTest |
python | pytorch__pytorch | torch/_inductor/codegen/simd.py | {
"start": 12298,
"end": 48096
} | class ____(Kernel[CSEVariableType], Generic[CSEVariableType]):
"""
Common base class for Triton/Halide codegen which both use flattened indexing rather than loop nests.
"""
sexpr: Callable[[sympy.Expr], str] = pexpr
kexpr: Callable[[sympy.Expr], str]
allow_block_ptr: bool = False
# pyrefly: ignore [bad-override]
kernel_name: str
def __init__(
self,
tiling: dict[str, sympy.Expr],
features: SIMDKernelFeatures,
pid_cache: Optional[dict[str, str]] = None,
override_persistent_reduction: Optional[bool] = None,
override_cooperative_reduction: Optional[bool] = None,
tiling_scores: Optional[dict[str, sympy.Expr]] = None,
mix_order_reduction: bool = False,
) -> None:
if pid_cache is None:
pid_cache = {}
super().__init__()
self.features = features
self.mutations = features.get_mutations()
self.body = IndentedBuffer()
self.indexing_code = IndentedBuffer()
self.numels = {
prefix: V.graph.sizevars.simplify(val) for prefix, val in tiling.items()
}
self.range_trees: list[IterationRangesRoot] = []
self.range_tree_nodes: dict[sympy.Symbol, IterationRangesEntry] = {}
self.iter_vars_count = itertools.count()
self.inside_reduction = features.is_reduction()
self.cooperative_reduction: bool = (
override_cooperative_reduction
if override_cooperative_reduction is not None
else self.should_use_cooperative_reduction()
)
self.tiling_scores: Optional[dict[str, sympy.Expr]] = tiling_scores
self.tiling: dict[str, sympy.Expr] = tiling
self.persistent_reduction: bool = (
override_persistent_reduction
if override_persistent_reduction is not None
else self.should_use_persistent_reduction()
)
self.mix_order_reduction: bool = mix_order_reduction
self.no_x_dim = self.want_no_x_dim()
self.code_hash: Optional[str] = None
# Info to enable multiple store_output calls for epilogue subtiling
self.store_output_ctr = itertools.count()
self.is_native_matmul = False
if config.triton.native_matmul:
for node in self.features.node_schedule:
if (
isinstance(node, scheduler.SchedulerNode)
and isinstance(node.node, ir.ComputedBuffer)
and node.node.get_reduction_type() == "dot"
):
self.is_native_matmul = True
break
# define this in a closure to make cache local to object
@functools.cache
def simplify_indexing(index: sympy.Expr):
index = V.graph.sizevars.simplify_with_ranges(index, self.var_ranges())
for tree in self.range_trees:
index = self.combine_contiguous_dims(index, tree)
return self.combine_modular_indexing_pairs(index)
self.simplify_indexing = simplify_indexing
self.initialize_range_tree(pid_cache)
self.rsplit_size = 0
self.saved_partial_accumulate: list[PartialAccumulate] = []
def _get_store_output_subgraph_name(self, i: int) -> str:
return f"<STORE_OUTPUT_{i}>"
def get_store_output_count(self):
total = next(self.store_output_ctr)
self.store_output_ctr = itertools.count(start=total - 1, step=1)
return total
@property
@cache_property_on_self
def num_reduction_dims(self) -> int:
return sum(prefix_is_reduction(prefix) for prefix in self.numels)
def dtype_to_str(self, dtype: torch.dtype) -> str:
raise NotImplementedError
def get_index_dtype_as_torch_dtype(self) -> torch.dtype:
return self.features.select_index_dtype()
@property
def index_dtype(self) -> str:
return self.dtype_to_str(self.get_index_dtype_as_torch_dtype())
def want_no_x_dim(self) -> bool:
return False
def construct_range_trees(
self,
pid_cache: Optional[dict[str, str]],
inside_reduction: bool,
is_reduction: bool,
numels: dict[str, sympy.Expr],
no_x_dim: bool,
) -> list[IterationRangesRoot]:
active_prefixes = OrderedSet(
prefix for prefix in all_prefixes if prefix in numels
)
no_r_dim = not inside_reduction or not is_reduction
def filtered_index_map(seq, mask) -> dict[Any, int]:
return {
val: idx for idx, val in enumerate(val for val in seq if val in mask)
}
grid_dims = ["x", "y", "z"]
pointwise_tensor_dims = list(reversed(grid_dims))
reduction_dims = ["r0_", "r1_"]
if no_x_dim:
tensor_dims = reduction_dims
elif no_r_dim:
tensor_dims = pointwise_tensor_dims
else:
tensor_dims = pointwise_tensor_dims + reduction_dims
# Filter out unused tensor dims.
# Convert to dicts for O(1) index lookup.
tensor_dim_map = filtered_index_map(tensor_dims, active_prefixes)
grid_dim_map = filtered_index_map(grid_dims, all_prefixes)
range_trees = []
for i, prefix in enumerate(active_prefixes):
is_reduction = prefix_is_reduction(prefix)
tensor_dim = tensor_dim_map.get(prefix)
grid_dim = grid_dim_map.get(prefix)
index = i if grid_dim is None else grid_dim
range_trees.append(
IterationRangesRoot(
f"{prefix}index",
numels[prefix],
prefix,
index,
self, # type: ignore[arg-type]
pid_cache=pid_cache,
is_loop=is_reduction and not self.persistent_reduction,
tensor_dim=tensor_dim,
grid_dim=grid_dim,
has_zdim="z" in numels,
)
)
return range_trees
def initialize_range_tree(self, pid_cache: dict[str, str]) -> None:
range_trees = self.construct_range_trees(
pid_cache,
self.inside_reduction,
self.features.is_reduction(),
self.numels,
self.no_x_dim,
)
self.range_trees.extend(range_trees)
def finalize_indexing(self, indices: Sequence[sympy.Expr]) -> None:
"""
Hook called right before codegen with every index that will be
used in the fused kernel.
"""
def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable) -> None:
prior = self.inside_reduction
self.inside_reduction = False
try:
return self.store(name, index, value)
finally:
self.inside_reduction = prior
def should_use_cooperative_reduction(self) -> bool:
return False # defined in subclass
def should_use_persistent_reduction(self) -> bool:
return False # defined in subclass
def var_ranges(self) -> dict[sympy.Symbol, sympy.Expr]:
return dict(
itertools.chain.from_iterable(
tree.var_ranges.items() for tree in self.range_trees
)
)
def triton_tensor_ndim(self) -> int:
return sum(int(tree.tensor_dim is not None) for tree in self.range_trees)
def indexing_size_str(self, i: int) -> str:
sizes = ["None"] * self.triton_tensor_ndim()
sizes[i] = ":"
return f"[{', '.join(sizes)}]"
def dense_size_list(self) -> list[str]:
sizes = ["1"] * self.triton_tensor_ndim()
for tree in self.range_trees:
if tree.tensor_dim is None:
continue
# pyrefly: ignore [missing-argument]
if not tree.is_reduction or self.inside_reduction:
sizes[tree.tensor_dim] = f"{tree.prefix.upper()}BLOCK"
return sizes
def create_constant_mask(self, entry) -> str:
x = entry.prefix
if entry.tensor_dim is None:
sizestr = self.dense_size_str()
return f"{x}mask = tl.full({sizestr}, True, tl.int1)"
sizes = ["None"] * self.triton_tensor_ndim()
sizes[entry.tensor_dim] = ":"
suffix = ", ".join(sizes)
out = f"{x}mask = tl.full([{x.upper()}BLOCK], True, tl.int1)[{suffix}]"
return out
def dense_size_str(self) -> str:
sizes = self.dense_size_list()
return f"[{', '.join(sizes)}]"
def combine_modular_indexing_pairs(self, index: sympy.Expr) -> sympy.Expr:
if not isinstance(index, ModularIndexing):
return index
x = index.args[0]
if (tree_node := self.range_tree_nodes.get(x)) is None:
return index
new_index = sympy_subs(index, {x: tree_node.expr})
new_index = V.graph.sizevars.combine_modular_indexing_pairs(new_index)
# the index now contains xindex/etc, which is nonstandard, fix it up
return sympy_subs(
new_index,
{
tree_node.root.index_sym(): tree_node.root.lookup(
sympy.S.One, tree_node.root.numel
).symbol()
},
)
def combine_contiguous_dims(
self, index: sympy.Expr, tree: IterationRangesRoot
) -> sympy.Expr:
if expand_res := V.graph.sizevars.expand_floor_div(index):
new_index, denominator = expand_res # type: ignore[misc]
return FloorDiv(self._combine_contiguous_dims(new_index, tree), denominator)
else:
return self._combine_contiguous_dims(index, tree)
def _combine_contiguous_dims(
self, index: sympy.Expr, tree: IterationRangesRoot
) -> sympy.Expr:
"""
More aggressive simplification to merge contiguous dims
"""
if isinstance(index, (sympy.Integer, sympy.Symbol)):
return index
index_vars, sizes = tree.vars_and_sizes(index)
if len(sizes) <= 1:
return index
new_sizes, reindex, _prune = V.graph.sizevars._simplify_loops(
index_vars, sizes, index_prevent_reordering([index], index_vars, sizes)
)
if new_sizes == sizes:
return index
new_index_vars = tree.construct(new_sizes)
new_index = sympy_subs(index, dict(zip(index_vars, reindex(new_index_vars))))
return new_index
def disable_reduction(self) -> contextlib.AbstractContextManager[None]:
should_flush = self.range_trees[-1].is_loop or self.cooperative_reduction
@contextlib.contextmanager
def ctx():
if not self.features.is_reduction():
assert not self.inside_reduction
yield
return
if should_flush:
# calling codegen_body() will flush all the pending buffers
# and write out a reduction loop
self.codegen_body()
self.inside_reduction = False
try:
yield
if should_flush:
# flush out any code before opening the next loop
self.codegen_body()
finally:
self.inside_reduction = True
return ctx()
def set_ranges(self, *lengths: sympy.Expr) -> list[sympy.Symbol]:
assert len(lengths) == len(self.range_trees)
return [
ranges.construct(length)
for length, ranges in zip(lengths, self.range_trees)
]
@staticmethod
def _split_iteration_ranges(
groups: Iterable[sympy.Expr], lengths: Sequence[Sequence[sympy.Expr]]
) -> tuple[
list[list[sympy.Expr]], list[list[Callable[[list[sympy.Expr]], sympy.Expr]]]
]:
# Special case: if a node's sizes are ([], []), there's nothing to split.
if all(len(length) == 0 for length in lengths):
return [[] for group in groups], []
sv = V.graph.sizevars
new_ranges: list[list[sympy.Expr]] = [[] for _ in groups]
remaining = [sv.simplify(g) for g in groups]
var_count = itertools.count()
def add_range(i: int, expr: sympy.Expr) -> int:
expr = sv.simplify(expr)
if not sv.statically_known_multiple_of(remaining[i], expr):
raise CantSplit
# guard on the last item out
remaining[i] = FloorDiv(remaining[i], expr)
new_ranges[i].append(expr)
return next(var_count)
def make_combined(
sizes: list[sympy.Expr], idxs: list[int]
) -> Callable[[list[sympy.Expr]], sympy.Expr]:
"""
Builds the nested expression:
((...((s1*v[i1] + v[i2]) * s2 + v[i3]) ... ) * sk + v[i(k+1)])
"""
assert len(idxs) == len(sizes) + 1
def getter(flat_vars: list[sympy.Expr]) -> sympy.Expr:
expr = flat_vars[idxs[0]]
for s, idx in zip(sizes, idxs[1:]):
expr = s * expr + flat_vars[idx]
return expr
return getter
return_getters_groups = []
current_group = 0
for length_group in lengths:
return_getters = []
for size in length_group:
if sv.statically_known_equals(size, 1): # type: ignore[arg-type]
return_getters.append(lambda _: sympy.S.Zero)
continue
while current_group < len(remaining) and sv.statically_known_equals(
remaining[current_group],
1, # type: ignore[arg-type]
):
# scroll to next group with remaining elements
current_group += 1
# During native matmul on bmm, we enforce tiling order (z, y, x, r).
# When fusing a bmm node with loop (z, y, x, r) with a pw node
# of shape (z*y*x, 1), we need to split the pw iteration range
# into three dimensions.
# The group becomes [z, y, x, 1], with lengths ([z*y*x], []).
# In this case, we decompose the combined size z*y*x into three
# consecutive groups. Previously, _split_iteration_ranges supported
# splitting into at most two dimensions, but we now extend it to do
# three splits when the total size is divisible by all three.
# is group having (z,y,x,r=1) form?
is_bmm_then_pw = len(remaining) == 4 and remaining[-1] == 1
if (
current_group + 2 < len(remaining)
and sv.statically_known_gt(
size, remaining[current_group] * remaining[current_group + 1]
)
and is_bmm_then_pw
):
# need to break size in three
if not sv.statically_known_multiple_of(
size, remaining[current_group] * remaining[current_group + 1]
):
raise CantSplit
size1 = remaining[current_group]
size2 = remaining[current_group + 1]
size3 = FloorDiv(size, size1 * size2)
return_getters.append(
make_combined(
[size2, size3],
[
add_range(current_group, size1),
add_range(current_group + 1, size2),
add_range(current_group + 2, size3),
],
)
)
# Two-dimensional tiling
elif current_group + 1 < len(remaining) and sv.statically_known_gt(
size, remaining[current_group]
):
# need to break size in two
if not sv.statically_known_multiple_of(
size, remaining[current_group]
):
raise CantSplit
size1 = remaining[current_group]
size2 = FloorDiv(size, remaining[current_group])
return_getters.append(
make_combined(
[size2],
[
add_range(current_group, size1),
add_range(current_group + 1, size2),
],
)
)
else:
if current_group < len(remaining):
return_getters.append(
operator.itemgetter(add_range(current_group, size))
)
return_getters_groups.append(return_getters)
assert all(V.graph.sizevars.size_hint(s) == 1 for s in remaining), (
f"failed to set ranges {remaining} {lengths}"
)
return new_ranges, return_getters_groups
@classmethod
def prepare_split_iteration_lengths(
cls,
groups: Iterable[sympy.Expr],
lengths: Sequence[Sequence[sympy.Expr]],
reduction_numel: sympy.Expr = sympy.S.One,
) -> Sequence[Sequence[sympy.Expr]]:
"Fill in the reduction numel of lengths if missing"
sizevars = V.graph.sizevars
if len(lengths[1]) == 0 and (
not sizevars.statically_known_equals(reduction_numel, sympy.S.One)
and sizevars.statically_known_equals(
sympy_product(groups),
sympy_product(lengths[0]) * reduction_numel,
)
):
return (lengths[0], [reduction_numel])
return lengths
@classmethod
def is_compatible(
cls,
groups: Iterable[sympy.Expr],
lengths: Sequence[Sequence[sympy.Expr]],
reduction_numel: sympy.Expr = sympy.S.One,
) -> bool:
lengths = cls.prepare_split_iteration_lengths(groups, lengths, reduction_numel)
try:
cls._split_iteration_ranges(groups, lengths)
return True
except CantSplit:
return False
def split_and_set_ranges(
self, lengths: Sequence[Sequence[sympy.Expr]]
) -> list[list[sympy.Expr]]:
"""
Split and set iteration ranges for the kernel based on the provided lengths.
This method maps the kernel's tiling structure to the node's iteration space,
handling both pointwise and reduction dimensions appropriately.
Args:
lengths: A sequence of sequences of symbolic expressions representing
the sizes of different dimensions for each node.
Returns:
A list of lists of symbolic expressions representing the mapped
iteration variables for each dimension.
"""
# Create a dictionary mapping each range tree prefix to its total number of elements
tiling = {rt.prefix: rt.numel for rt in self.range_trees}
# If we're not inside a reduction loop, set all reduction dimensions to 1
# This effectively disables reduction dimensions when not needed
if not self.inside_reduction:
for prefix in tiling:
if prefix_is_reduction(prefix):
tiling[prefix] = sympy.S.One
# Extract the values from the tiling dictionary to create groups
groups = [*tiling.values()]
# Map the kernel's group structure to the node's sizes and set the ranges
# using the set_ranges method, returning the resulting iteration variables
return self.map_kernel_groups_to_node_sizes(groups, lengths, self.set_ranges)
@classmethod
def map_kernel_groups_to_node_sizes(
cls,
groups: Sequence[sympy.Expr],
lengths: Sequence[Sequence[sympy.Expr]],
set_ranges,
) -> list[list[sympy.Expr]]:
"""
We may want to fuse `for i0 in s0*s1` into a tiled kernel with groups (s0, s1).
To do this we need to split up the iteration space of i0 into something like:
for i1 in s0:
for i2 in s1:
i0 = i1*s1 + i2
....
This function matches and resplits lengths to the groups of
this kernel to enable tiled + non-tiled fusions.
"""
if len(lengths) == len(groups) and all(
V.graph.sizevars.simplify(sympy_product(x) - g) == 0
for x, g in zip(lengths, groups)
):
return set_ranges(*lengths)
new_ranges, return_getters_groups = cls._split_iteration_ranges(groups, lengths)
itervars = [*itertools.chain.from_iterable(set_ranges(*new_ranges))]
return [[fn(itervars) for fn in fns] for fns in return_getters_groups]
def is_indirect_indexing(self, index: sympy.Expr) -> bool:
# tmpX means indirect indexing
return free_symbol_is_type(index, SymT.TMP)
def is_broadcasted(self, index: sympy.Expr) -> bool:
# Note. This may not be correct when there is indirect indexing
if self.is_indirect_indexing(index):
return False
index_numels = [1] * len(self.numels)
for symbol in index.free_symbols:
if symbol not in self.range_tree_nodes:
# Non-iterated variables, e.g. strides
continue
entry = self.range_tree_nodes[symbol] # type: ignore[index]
assert isinstance(entry.parent, IterationRangesRoot)
index_numels[entry.parent.index] *= entry.length
# If the index variables only iterate over a subset of the kernel
# numels, then it must be broadcasted.
simplify = V.graph.sizevars.simplify
return any(
simplify(idx_range) != simplify(iter_range) # type: ignore[arg-type]
for idx_range, iter_range in zip(index_numels, self.numels.values())
)
def index_to_str(self, index: sympy.Expr) -> str:
"""
Convert an index expr to a string that can be used in output code.
e.g. a sympy expression "s2" may actually appear as "ks1" in the generated kernel.
Index expressions often need to be passed in as arguments to the triton kernel.
Rename_indexing and codegen_indexing keep track of the needed indices and add
new parameters to the function signature.
"""
if isinstance(index, list):
return f"[{', '.join(map(self.index_to_str, index))}]"
return self.kexpr(self.rename_indexing(index)) # type: ignore[call-arg]
def prepare_indexing(
self,
index: sympy.Expr,
) -> sympy.Expr:
index = self.simplify_indexing(index)
index = sympy_subs(index, V.graph.sizevars.precomputed_replacements)
# if simple replacements didn't get rid of floor/ceil, try full subs
if len(index.atoms(sympy.floor)) or len(index.atoms(sympy.ceiling)):
index = index.subs(V.graph.sizevars.precomputed_replacements)
# last resort, if no range vars are in the expr, hoist it
# TODO instead of trying to blindly find complicated exprs, we should hoist the
# inputs/outputs sizes and strides, but at the time indexing is generated
# kernel inputs and outputs are not set yet, we'd need a deeper refactor
# to do it this way
if len(index.atoms(sympy.ceiling)):
for a in index.atoms(sympy.ceiling):
# for nested exprs, atoms yields top level first (?)
# so if everything goes fine, lower level replacements will come up empty
symbols = a.free_symbols
if len(symbols) > 0 and all(
symbol_is_type(s, (SymT.SIZE, SymT.PRECOMPUTED_SIZE))
for s in symbols
):
replacements = {a: V.graph.sizevars.lookup_precomputed_size(a)}
index = sympy_subs(index, replacements)
simp_index = self.simplify_indexing(index)
# Now that we are done simplifying we can unwrap Identity so that downstream handling
# for its contained expression will work. previously, tl.full wrapping of sympy.Integer
# would not occur
simp_index = (
simp_index if not isinstance(simp_index, Identity) else simp_index.args[0]
)
return self.codegen_indexing(simp_index)
def active_range_trees(self) -> list[IterationRangesRoot]:
return [
t
for t in self.range_trees
# pyrefly: ignore [missing-argument]
if not t.is_reduction or self.inside_reduction
]
def codegen_indexing(self, expr: sympy.Expr) -> sympy.Expr:
expr = V.graph.sizevars.simplify_with_ranges(expr, self.var_ranges())
for sym in sorted(expr.free_symbols, key=str):
if sym in self.range_tree_nodes:
# if indexing expression is complicated, we precompute it on the host side
# and send the result as a kernel argument
replacements = {}
for ps in self.range_tree_nodes[sym].precomputed_args(): # type: ignore[index]
replacements[ps] = V.graph.sizevars.lookup_precomputed_size(ps)
if len(replacements) > 0:
self.range_tree_nodes[sym].expr = sympy_subs( # type: ignore[index]
self.range_tree_nodes[sym].expr,
replacements, # type: ignore[index]
)
self.range_tree_nodes[sym].codegen() # type: ignore[index]
return expr
def codegen_nan_check(self) -> None:
raise NotImplementedError("NYI: codegen_nan_check")
def deallocate_workspaces(self):
wrapper = V.graph.wrapper_code
for ws in reversed(self.args.workspace_args):
wrapper.generate_workspace_deallocation(ws)
def call_kernel(
self, name: str, node: Optional[IRNode] = None, deallocate_ws: bool = True
) -> None:
raise NotImplementedError("NYI: call_kernel")
@contextlib.contextmanager
def mask_loads(
self, mask: Union[str, OpsWrapper], value: Union[int, float]
) -> Iterator[str]:
"""Context manager to add an additional mask to tl.load/store"""
prior = self._load_mask
prior_val = self._load_other
if prior:
mask = ops.logical_and(mask, prior)
mask = OpsWrapper._unwrap(mask)
self._load_mask = mask
self._load_other = value
try:
# TODO(jansel): do we need a reshape here?
yield mask
finally:
self._load_mask = prior
self._load_other = prior_val
def get_strides_of_load(self, index: sympy.Expr) -> dict[sympy.Symbol, sympy.Expr]:
"""
This gets the stride of the index for each of the tiling variables
(technically, it does it at index 0)
For example, if
xindex = x0 + 512*x1 + 1024*r0
x0 = (xindex//512)
x1 = (xindex % 512)
r0 = rindex // 1024
this function would return
{xindex: 512, rindex: 1024}
"""
index_to_tile_indexes = {k: v.expr for k, v in self.range_tree_nodes.items()}
index_in_tile_vars = sympy_subs(index, index_to_tile_indexes) # type: ignore[arg-type]
strides = {}
for range_tree in self.range_trees:
s = sympy_index_symbol(range_tree.name)
strides[s] = sympy_subs(index_in_tile_vars, {s: 1}) - sympy_subs(
index_in_tile_vars, {s: 0}
)
return strides
@staticmethod
def _map_tuple_or_scalar(fn, value):
if isinstance(value, tuple):
return tuple(map(fn, value))
return fn(value)
def estimate_flops(self) -> Optional[int]:
flops = [
node.estimate_flops()
for node in NodeScheduleMarker.only_nodes(self.features.node_schedule)
]
return sum(filter(None, flops))
def estimate_kernel_num_bytes(self):
"""
Try the best to estimate the total size (in bytes) of the
kernel's inputs and outputs, which is used for estimating the memory
throughput of this kernel. This information is used for checking how
far we are from the peak memory bandwidth. It's important that
we want to avoid overestimating the sizes of the inputs and outputs,
because it can wrongfully give us a very large memory traffic value,
which may be even larger than the theoretical bandwidth and thus
become very misleading. This is particularly problematic for cases
where we slice some inputs. In those cases, we should only count
the size of the "slices" instead of the original inputs, because
only the slices contribute to the real memory traffic.
"""
nbytes = []
ninplace_args = len(unique(self.args.inplace_buffers.values()))
_, call_args, _, _ = self.args.python_argdefs()
buf_accesses = self.features.buf_accesses()
# For pointwise and reduction kernels, this is the upper-bound numels
# for the output buffer.
# FIXME: This is not exactly right for cases like below:
# def foo(tensor0, tensor1):
# x0 = narrow(tensor0)
# return cat(x0, tensor1)
# For this example, we will end up overestimate the size for the
# slice s0. Potentially, we could have precise inputs information
# if we maintained the original inputs of the Pointwise kernel created
# for the "cat". However, I think it might be a bit overwhelming that
# we add such complexity only for handling some particular cases for
# benchmarking.
out_numel = V.graph.sizevars.size_hint(
sympy_product(self.numels.values()),
fallback=config.unbacked_symint_fallback,
)
for i, arg in enumerate(call_args):
# "buf" may be narrowed. In this case, the number of memory accesses
# should be estimated based on the reinterpreted layout.
# On the other hand, buf may be broadcasted. In this case,
# counting the size of the underline storage would give us
# a better estimation in terms of memory accesses.
if arg not in buf_accesses:
nbytes.append(0)
continue
arg_numel = V.graph.get_numel(arg)
buf_size = V.graph.sizevars.size_hint(
arg_numel, fallback=config.unbacked_symint_fallback
)
if buf_size > out_numel:
# This arg points to a buf that has been sliced.
# We need to count each individual slice to have
# a better estimation.
indices = OrderedSet[Any]()
no_index_dep_count = 0
for dep in buf_accesses[arg]:
if isinstance(dep, (StarDep, WeakDep)):
indices.add(f"no_index_dep_{no_index_dep_count}")
no_index_dep_count += 1
else:
indices.add(dep.index)
numel = len(indices) * out_numel
else:
numel = buf_size
dtype = V.graph.get_dtype(arg)
dtype_size = get_dtype_size(dtype)
# pyrefly: ignore [bad-argument-type]
nbytes.append(numel * dtype_size * (1 + int(i < ninplace_args)))
return sum(nbytes)
def warn_mix_layout(self, kernel_name):
"""
Print message if the kernel have mixed layout inputs.
Only care about 4D tensor for now.
"""
if (
len(self.args.input_buffers) == 1
and len(self.args.output_buffers) == 1
and len(self.args.inplace_buffers) == 0
):
# even if input buffer and output buffer have different layout,
# this can be a layout conversion kernel. No need to warn for
# the mix layouts.
return
argdefs, call_args, _signature, _ = self.args.python_argdefs()
uniform_stride_order = None
# pyrefly: ignore [bad-assignment]
for arg_name in call_args:
buf = V.graph.try_get_buffer(arg_name)
if not buf:
continue
layout = buf.get_layout()
if len(layout.size) == 4:
# ignore the tensor if only 1 dimension is non-zero
if len([x for x in layout.size if x == 1]) == 3:
continue
stride_order = ir.get_stride_order(layout.stride)
if uniform_stride_order is None:
uniform_stride_order = stride_order
elif uniform_stride_order != stride_order:
msg = yellow_text(
f"Expected stride order {uniform_stride_order}, but found stride order"
+ f" {stride_order} for kernel {kernel_name}"
)
log.warning(msg)
stride_order_list = [
ir.get_stride_order(
V.graph.get_buffer(name).get_layout().stride
)
if V.graph.try_get_buffer(name)
else None
for name in call_args
]
size_list = [
V.graph.get_buffer(name).get_layout().size
if V.graph.try_get_buffer(name)
else None
for name in call_args
]
source_list = [
"GraphInput"
if name in V.graph.graph_inputs
else "IntermediateBuffer"
if name in V.graph.name_to_buffer
else None
for name in call_args
]
argdef_names = [x.name for x in argdefs]
msg = yellow_text(
f" param names {argdef_names}\n buf names {call_args}\n strides {stride_order_list}"
+ f"\n sizes {size_list}\n sources {source_list}\n"
)
log.warning(msg)
return
msg = green_text(
f"All the inputs for the triton kernel {kernel_name} have uniform layout"
)
log.warning(msg)
def welford_reduce_fallback(self, dtype, value):
sum_ = ops.reduction(dtype, dtype, "sum", value)
self.inside_reduction = False
rnumel = ops.index_expr(self.features.reduction_numel, dtype)
mean = ops.truediv(sum_, rnumel)
self.inside_reduction = True
dx = ops.sub(value, mean)
dx2 = ops.mul(dx, dx)
m2 = ops.reduction(dtype, dtype, "sum", dx2)
return OpsWrapper._unwrap((mean, m2, rnumel))
def prepare_softmax_twopass_fallback(self, dtype, value):
vmax = ops.reduction(dtype, dtype, "max", value)
sub = ops.sub(value, vmax)
exp = ops.exp(sub)
vsum = ops.reduction(dtype, dtype, "sum", exp)
return OpsWrapper._unwrap((vmax, vsum))
def codegen_kernel(self):
raise NotImplementedError
def codegen_body(self):
pass
def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry):
pass
| SIMDKernel |
python | django__django | tests/i18n/test_extraction.py | {
"start": 48398,
"end": 48647
} | class ____(AdminScriptTestCase):
def test_makemessages_no_settings(self):
out, err = self.run_django_admin(["makemessages", "-l", "en", "-v", "0"])
self.assertNoOutput(err)
self.assertNoOutput(out)
| NoSettingsExtractionTests |
python | getsentry__sentry | tests/sentry/workflow_engine/test_base.py | {
"start": 2121,
"end": 2247
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
class Meta:
app_label = "fixtures"
| MockModel |
python | spyder-ide__spyder | spyder/dependencies.py | {
"start": 13141,
"end": 18600
} | class ____(object):
"""
Spyder's dependency
Version may starts with =, >=, > or < to specify the exact requirement;
multiple conditions may be separated by ',' (e.g. '>=0.13,<1.0')"""
OK = 'OK'
NOK = 'NOK'
def __init__(self, modname, package_name, features, required_version,
installed_version=None, kind=MANDATORY):
self.modname = modname
self.package_name = package_name
self.features = features
self.required_version = required_version
self.kind = kind
# Although this is not necessarily the case, it's customary that a
# package's distribution name be it's name on PyPI with hyphens
# replaced by underscores.
# Example:
# * Package name: python-lsp-black.
# * Distribution name: python_lsp_black
self.distribution_name = self.package_name.replace('-', '_')
if installed_version is None:
try:
self.installed_version = programs.get_module_version(modname)
if not self.installed_version:
# Use get_package_version and the distribution name
# because there are cases for which the version can't
# be obtained from the module (e.g. pylsp_black).
self.installed_version = programs.get_package_version(
self.distribution_name)
except Exception:
# NOTE: Don't add any exception type here!
# Modules can fail to import in several ways besides
# ImportError
self.installed_version = None
else:
self.installed_version = installed_version
def check(self):
"""Check if dependency is installed"""
if self.modname == 'spyder_kernels':
# TODO: Remove when spyder-kernels 3 is released!
return True
if self.required_version:
installed = programs.is_module_installed(
self.modname,
self.required_version,
distribution_name=self.distribution_name
)
return installed
else:
return True
def get_installed_version(self):
"""Return dependency status (string)"""
if self.check():
return '%s (%s)' % (self.installed_version, self.OK)
else:
return '%s (%s)' % (self.installed_version, self.NOK)
def get_status(self):
"""Return dependency status (string)"""
if self.check():
return self.OK
else:
return self.NOK
DEPENDENCIES = []
def add(modname, package_name, features, required_version,
installed_version=None, kind=MANDATORY):
"""Add Spyder dependency"""
global DEPENDENCIES
for dependency in DEPENDENCIES:
# Avoid showing an unnecessary error when running our tests.
if running_in_ci() and 'spyder_boilerplate' in modname:
continue
if dependency.modname == modname:
raise ValueError(
f"Dependency has already been registered: {modname}")
DEPENDENCIES += [Dependency(modname, package_name, features,
required_version,
installed_version, kind)]
def check(modname):
"""Check if required dependency is installed"""
for dependency in DEPENDENCIES:
if dependency.modname == modname:
return dependency.check()
else:
raise RuntimeError("Unknown dependency %s" % modname)
def status(deps=DEPENDENCIES, linesep=os.linesep):
"""Return a status of dependencies."""
maxwidth = 0
data = []
# Find maximum width
for dep in deps:
title = dep.modname
if dep.required_version is not None:
title += ' ' + dep.required_version
maxwidth = max([maxwidth, len(title)])
dep_order = {MANDATORY: '0', OPTIONAL: '1', PLUGIN: '2'}
order_dep = {'0': MANDATORY, '1': OPTIONAL, '2': PLUGIN}
data.append([dep_order[dep.kind], title, dep.get_installed_version()])
# Construct text and sort by kind and name
maxwidth += 1
text = ""
prev_order = '-1'
for order, title, version in sorted(
data, key=lambda x: x[0] + x[1].lower()):
if order != prev_order:
name = order_dep[order]
if name == MANDATORY:
text += f'# {name.capitalize()}:{linesep}'
else:
text += f'{linesep}# {name.capitalize()}:{linesep}'
prev_order = order
text += f'{title.ljust(maxwidth)}: {version}{linesep}'
# Remove spurious linesep when reporting deps to Github
if not linesep == '<br>':
text = text[:-1]
return text
def missing_dependencies():
"""Return the status of missing dependencies (if any)"""
missing_deps = []
for dependency in DEPENDENCIES:
if dependency.kind != OPTIONAL and not dependency.check():
missing_deps.append(dependency)
if missing_deps:
return status(deps=missing_deps, linesep='<br>')
else:
return ""
def declare_dependencies():
for dep in DESCRIPTIONS:
if dep.get('display', True):
add(dep['modname'], dep['package_name'],
dep['features'], dep['required_version'],
kind=dep.get('kind', MANDATORY))
| Dependency |
python | huggingface__transformers | src/transformers/models/led/modeling_led.py | {
"start": 42424,
"end": 47124
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: LEDConfig, layer_idx=None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = LEDDecoderAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = LEDDecoderAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`torch.FloatTensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`): Whether the base model outputs attentions.
This requires the attentions tensor to be reshaped in this function.
"""
residual = hidden_states
# Self-Attention
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (past_key_values,)
return outputs
| LEDDecoderLayer |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/operators/generic_transfer.py | {
"start": 1294,
"end": 8524
} | class ____(BaseOperator):
"""
Moves data from a connection to another.
Assuming that they both provide the required methods in their respective hooks.
The source hook needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is meant to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database. (templated)
:param destination_table: target table. (templated)
:param source_conn_id: source connection. (templated)
:param source_hook_params: source hook parameters.
:param destination_conn_id: destination connection. (templated)
:param destination_hook_params: destination hook parameters.
:param preoperator: sql statement or list of statements to be
executed prior to loading the data. (templated)
:param insert_args: extra params for `insert_rows` method.
:param page_size: number of records to be read in paginated mode (optional).
:param paginated_sql_statement_clause: SQL statement clause to be used for pagination (optional).
"""
template_fields: Sequence[str] = (
"source_conn_id",
"destination_conn_id",
"sql",
"destination_table",
"preoperator",
"insert_args",
"page_size",
"paginated_sql_statement_clause",
)
template_ext: Sequence[str] = (
".sql",
".hql",
)
template_fields_renderers = {"preoperator": "sql"}
ui_color = "#b0f07c"
def __init__(
self,
*,
sql: str | list[str],
destination_table: str,
source_conn_id: str,
source_hook_params: dict | None = None,
destination_conn_id: str,
destination_hook_params: dict | None = None,
preoperator: str | list[str] | None = None,
insert_args: dict | None = None,
page_size: int | None = None,
paginated_sql_statement_clause: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.source_hook_params = source_hook_params
self.destination_conn_id = destination_conn_id
self.destination_hook_params = destination_hook_params
self.preoperator = preoperator
self.insert_args = insert_args or {}
self.page_size = page_size
self.paginated_sql_statement_clause = paginated_sql_statement_clause or "{} LIMIT {} OFFSET {}"
@classmethod
def get_hook(cls, conn_id: str, hook_params: dict | None = None) -> DbApiHook:
"""
Return DbApiHook for this connection id.
:param conn_id: connection id
:param hook_params: hook parameters
:return: DbApiHook for this connection
"""
connection = BaseHook.get_connection(conn_id)
hook = connection.get_hook(hook_params=hook_params)
if not isinstance(hook, DbApiHook):
raise RuntimeError(f"Hook for connection {conn_id!r} must be of type {DbApiHook.__name__}")
return hook
@cached_property
def source_hook(self) -> DbApiHook:
return self.get_hook(conn_id=self.source_conn_id, hook_params=self.source_hook_params)
@cached_property
def destination_hook(self) -> DbApiHook:
return self.get_hook(conn_id=self.destination_conn_id, hook_params=self.destination_hook_params)
def get_paginated_sql(self, offset: int) -> str:
"""Format the paginated SQL statement using the current format."""
return self.paginated_sql_statement_clause.format(self.sql, self.page_size, offset)
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
super().render_template_fields(context=context, jinja_env=jinja_env)
# Make sure string are converted to integers
if isinstance(self.page_size, str):
self.page_size = int(self.page_size)
commit_every = self.insert_args.get("commit_every")
if isinstance(commit_every, str):
self.insert_args["commit_every"] = int(commit_every)
def execute(self, context: Context):
if self.preoperator:
self.log.info("Running preoperator")
self.log.info(self.preoperator)
self.destination_hook.run(self.preoperator)
if self.page_size and isinstance(self.sql, str):
self.defer(
trigger=SQLExecuteQueryTrigger(
conn_id=self.source_conn_id,
hook_params=self.source_hook_params,
sql=self.get_paginated_sql(0),
),
method_name=self.execute_complete.__name__,
)
else:
if isinstance(self.sql, str):
self.sql = [self.sql]
self.log.info("Extracting data from %s", self.source_conn_id)
for sql in self.sql:
self.log.info("Executing: \n %s", sql)
results = self.source_hook.get_records(sql)
self.log.info("Inserting rows into %s", self.destination_conn_id)
self.destination_hook.insert_rows(
table=self.destination_table, rows=results, **self.insert_args
)
def execute_complete(
self,
context: Context,
event: dict[Any, Any] | None = None,
) -> Any:
if event:
if event.get("status") == "failure":
raise AirflowException(event.get("message"))
results = event.get("results")
if results:
map_index = context["ti"].map_index
offset = (
context["ti"].xcom_pull(
key="offset",
task_ids=self.task_id,
dag_id=self.dag_id,
map_indexes=map_index,
default=0,
)
+ self.page_size
)
self.log.info("Offset increased to %d", offset)
context["ti"].xcom_push(key="offset", value=offset)
self.log.info("Inserting %d rows into %s", len(results), self.destination_conn_id)
self.destination_hook.insert_rows(
table=self.destination_table, rows=results, **self.insert_args
)
self.log.info(
"Inserting %d rows into %s done!",
len(results),
self.destination_conn_id,
)
self.defer(
trigger=SQLExecuteQueryTrigger(
conn_id=self.source_conn_id,
hook_params=self.source_hook_params,
sql=self.get_paginated_sql(offset),
),
method_name=self.execute_complete.__name__,
)
else:
self.log.info(
"No more rows to fetch into %s; ending transfer.",
self.destination_table,
)
| GenericTransfer |
python | tornadoweb__tornado | demos/chat/chatdemo.py | {
"start": 848,
"end": 1770
} | class ____:
def __init__(self):
# cond is notified whenever the message cache is updated
self.cond = tornado.locks.Condition()
self.cache = []
self.cache_size = 200
def get_messages_since(self, cursor):
"""Returns a list of messages newer than the given cursor.
``cursor`` should be the ``id`` of the last message received.
"""
results = []
for msg in reversed(self.cache):
if msg["id"] == cursor:
break
results.append(msg)
results.reverse()
return results
def add_message(self, message):
self.cache.append(message)
if len(self.cache) > self.cache_size:
self.cache = self.cache[-self.cache_size :]
self.cond.notify_all()
# Making this a non-singleton is left as an exercise for the reader.
global_message_buffer = MessageBuffer()
| MessageBuffer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N805.py | {
"start": 1974,
"end": 2039
} | class ____(type):
def __subclasscheck__(cls, other): ...
| MyMeta |
python | tensorflow__tensorflow | tensorflow/python/eager/backprop.py | {
"start": 3916,
"end": 24812
} | class ____(object):
"""Pretends to be a tf.Operation for the gradient functions."""
def __init__(self, attrs, inputs, outputs, typ, skip_input_indices):
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.type = typ
self.skip_input_indices = skip_input_indices
def get_attr(self, attr):
typ = op_attr_type(self.type, attr)
for i in range(0, len(self.attrs), 2):
if self.attrs[i] == attr:
return make_attr(typ, self.attrs[i + 1])
raise KeyError(attr)
def _get_control_flow_context(self):
raise NotImplementedError(
"tf.GradientTape.gradients() does not support graph control flow "
"operations like tf.cond or tf.while at this time. Use tf.gradients() "
"instead. If you need this feature, please file a feature request at "
"https://github.com/tensorflow/tensorflow/issues/new"
)
def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs,
out_grads, skip_input_indices, forward_pass_name_scope):
"""Calls the gradient function of the op.
Args:
op_name: the name of the op to be differentiated.
attr_tuple: the attrs, as a tuple.
num_inputs: the number of inputs to the op.
inputs: inputs to the original operation.
outputs: outputs to the original operation.
out_grads: gradients of the operation wrt its outputs.
skip_input_indices: a tuple that is passed to the gradient function,
indicating which inputs to skip calculating the gradient for
forward_pass_name_scope: the namescope of the op in the forward pass.
Returns:
The gradients with respect to the inputs of the function, as a list.
"""
mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices)
grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access
if grad_fn is None:
return [None] * num_inputs
# This does not work with v1 TensorArrays.
if ops.executing_eagerly_outside_functions(
) or control_flow_util.EnableControlFlowV2(ops.get_default_graph()):
gradient_name_scope = "gradient_tape/"
if forward_pass_name_scope:
gradient_name_scope += forward_pass_name_scope + "/"
with ops.name_scope(gradient_name_scope):
return grad_fn(mock_op, *out_grads)
else:
return grad_fn(mock_op, *out_grads)
pywrap_tfe.TFE_Py_RegisterGradientFunction(_gradient_function)
def _must_record_gradient():
return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()
@tf_export("__internal__.record_gradient", v1=[])
def record_gradient(op_name, inputs, attrs, outputs):
"""Explicitly record the gradient for a given op.
Args:
op_name: The op name as listed in the `OpDef` for the op.
inputs: A list of tensor inputs to the op.
attrs: The op attributes as a flattened list of alternating attribute names
and attribute values.
outputs: A list of tensor outputs from the op.
"""
pywrap_tfe.TFE_Py_RecordGradient(op_name, inputs, attrs, outputs,
ops.get_name_scope())
execute.must_record_gradient = _must_record_gradient
execute.record_gradient = record_gradient
def implicit_val_and_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the value and the gradient of f when called with
the same arguments. The gradient is with respect to all trainable TFE
variables accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
val_grad_fn = tfe.implicit_value_and_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
value, grads_and_vars = val_grad_fn(x, y)
print('Value of loss: %s' % value)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a tuple pair.
Its first element is the value to which the function evaluates.
Its second element is list of (gradient, variable) pairs.
Raises:
ValueError: if `f` returns None.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
this_tape = tape.push_new_tape()
try:
end_node = f(*args, **kwds)
if end_node is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
finally:
tape.pop_tape(this_tape)
# Note: variables are returned in construction order. This ensures unique
# order across executions.
variables = this_tape.watched_variables()
if not variables:
raise ValueError("No trainable variables were accessed while the "
"function was being computed.")
sources = [v.handle for v in variables]
for s in sources:
if getattr(s, "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors yet."
)
grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
sources)
return end_node, list(zip(grad, variables))
return grad_fn
def implicit_grad(f):
"""Returns a function which differentiates f with respect to variables.
The wrapped function returns the gradient of f when called with the same
arguments. The gradient is with respect to all trainable TFE variables
accessed by `f`.
This function is useful when the exact set of variables to differentiate with
is not known ahead of time.
Example:
```python
dense_layer = tf.compat.v1.layers.Dense(1)
def loss(x, y):
return tf.reduce_sum(tf.square(dense_layer(x) - y))
# Obtain the gradient function.
grad_fn = tfe.implicit_gradients(loss)
# Invoke the gradient function with concrete values of x and y.
x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
grads_and_vars = grad_fn(x, y)
# Apply the gradients to Variables.
optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1)
optimizer.apply_gradients(grads_and_vars)
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar.
Returns:
A function which, when called, returns a list of (gradient, variable) pairs.
"""
# TODO(cais): Remove calls to tf.constant() once the gradients functions
# accept lists and np.ndarrays.
def grad_fn(*args, **kwds):
"""Computes the gradient of the wrapped function."""
return implicit_val_and_grad(f)(*args, **kwds)[1]
return grad_fn
def _get_arg_spec(f, params, param_args):
"""The positions of the parameters of f to be differentiated in param_args."""
try:
args = tf_inspect.getfullargspec(f).args
except TypeError as e:
# TypeError can happen when f is a callable object.
if params is None:
return range(len(param_args))
elif all(isinstance(x, int) for x in params):
return params
raise ValueError("Either callable provided is not a function or could not "
"inspect its arguments by name: %s. Original error: %s"
% (f, e))
if params is None:
if not args:
return range(len(param_args))
if args[0] == "self":
return range(len(args) - 1)
else:
return range(len(args))
elif all(isinstance(x, str) for x in params):
return [args.index(n) for n in params]
elif all(isinstance(x, int) for x in params):
return params
else:
raise ValueError(
"params must be all strings or all integers; got %s." % params)
def gradients_function(f, params=None):
"""Returns a function which differentiates f with respect to params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
# The 2nd order derivatives with respect to x is:
# d^2 f / (dx)^2 = 6 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns 1st order gradients.
grad_fn = tfe.gradients_function(f)
x = 2.0
y = 3.0
# Invoke the 1st order gradient function.
x_grad, y_grad = grad_fn(x, y)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# Obtain a function that returns the 2nd order gradient with respect to x.
gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0])
# Invoke the 2nd order gradient function.
x_gradgrad = gradgrad_fn(x, y)[0]
assert x_gradgrad.numpy() == 6 * 2 * 3
# To obtain a callable that returns the gradient(s) of `f` with respect to a
# subset of its inputs, use the `params` keyword argument with
# `gradients_function()`.
ygrad_fn = tfe.gradients_function(f, params=[1])
(y_grad,) = ygrad_fn(x, y)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Note that only tensors with real or complex dtypes are differentiable.
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing None
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of `f` with respect to all of `params`. The function takes an extra optional
keyword argument `dy`. Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the gradient of the decorated function."""
_, grad = val_and_grad_function(f, params=params)(*args, **kwds)
return grad
return decorated
def _ensure_unique_tensor_objects(parameter_positions, args):
"""Make each of the parameter_positions in args a unique tensor_lib.Tensor object.
Ensure that each parameter is treated independently.
For example:
def f(x, y): return x * y
g = gradients_function(f)
one = tf.constant(1.)
g(one, one) should return [1., 1.]
(even though the two arguments are the same Tensor object).
Args:
parameter_positions: List of indices into args defining the arguments to
differentiate against.
args: A list of arguments to the function to be differentiated.
Returns:
args, possibly edited in-place.
"""
s = set()
for (i, t) in enumerate(args):
if i in parameter_positions:
tid = ops.tensor_id(t)
if tid in s:
args[i] = gen_array_ops.identity(args[i])
else:
s.add(tid)
return args
def val_and_grad_function(f, params=None):
"""Returns a function that computes f and its derivative w.r.t. params.
Example:
```python
# f(x, y) = (x ^ 3) * y - x * (y ^ 2)
# Therefore, the 1st order derivatives are:
# df / dx = 3 * (x ^ 2) * y - y ^ 2
# df / dy = x ^ 3 - 2 * x * y
def f(x, y):
return x * x * x * y - x * y * y
# Obtain a function that returns the function value and the 1st order
# gradients.
val_grads_fn = tfe.value_and_gradients_function(f)
x = 2.0
y = 3.0
# Invoke the value-and-gradients function.
f_val, (x_grad, y_grad) = val_grads_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
# To obtain a callable that returns the value of `f` and the gradient(s) of
# `f` with respect to a subset of its inputs, use the `params` keyword
# argument with `value_and_gradients_function()`.
val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1])
f_val, (y_grad,) = val_ygrad_fn(x, y)
assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2)
assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3
```
Args:
f: function to be differentiated. If `f` returns a scalar, this scalar will
be differentiated. If `f` returns a tensor or list of tensors, by default
a scalar will be computed by adding all their values to produce a single
scalar. If desired, the tensors can be elementwise multiplied by the
tensors passed as the `dy` keyword argument to the returned gradient
function.
params: list of parameter names of f or list of integers indexing the
parameters with respect to which we'll differentiate. Passing `None`
differentiates with respect to all parameters.
Returns:
function which, when called, returns the value of f and the gradient
of f with respect to all of `params`. The function takes an extra optional
keyword argument "dy". Setting it allows computation of vector jacobian
products for vectors other than the vector of ones.
Raises:
ValueError: if the params are not all strings or all integers.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
dy = kwds.pop("dy", None)
if kwds:
raise ValueError("Functions to be differentiated cannot "
"receive keyword arguments.")
val, vjp = make_vjp(f, params)(*args, **kwds)
return val, vjp(dy=dy)
return decorated
def make_vjp(f, params=None, persistent=True):
"""Returns a function that computes f and its vjp w.r.t.
params.
The term "vjp" here is an abbreviation for vector-jacobian product.
Args:
f: the function to be differentiated.
params: the parameters (numbers or names) to differentiate with respect to.
A value of None will differentiate with respect to all parameters.
persistent: Boolean controlling whether the VJP function can be re-used.
Must be True or False.
Returns:
A function, which when called, returns a tuple (value, vjp), where:
- value is the result of calling f.
- vjp is a function, which takes a vector as an argument and
returns the product of that vector with the Jacobian of f.
Providing no argument to vjp is equivalent to providing a
vector of ones.
For example,
```python
def f(x):
return x * x
wrapped_fn = tfe.make_vjp(f)
result, vjp = wrapped_fn(tf.constant(3.0))
# result is 9.0
vjp() # the vjp function returns 6.0
Raises:
ValueError: if `f` returns None.
"""
def decorated(*args, **kwds):
"""Computes the value and gradient of the decorated function."""
parameter_positions = _get_arg_spec(f, params, args)
assert not kwds, "The gradient function can't take keyword arguments."
this_tape = tape.push_new_tape(persistent=persistent)
try:
sources = []
args = [
ops.convert_to_tensor(arg) if i in parameter_positions else arg
for i, arg in enumerate(args)
]
args = _ensure_unique_tensor_objects(parameter_positions, args)
for i in parameter_positions:
if getattr(args[i], "is_packed", False):
raise ValueError(
"GradientTape.gradient is not supported on packed EagerTensors"
"yet.")
sources.append(args[i])
tape.watch(this_tape, args[i])
result = f(*args)
if result is None:
raise ValueError("Cannot differentiate a function that returns None; "
"did you forget to return a value from {}?".format(
f.__name__))
flat_result = nest.flatten(result)
flat_result = [gen_array_ops.identity(x) for x in flat_result]
result = nest.pack_sequence_as(result, flat_result)
finally:
tape.pop_tape(this_tape)
def vjp(dy=None):
if dy is not None:
dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
return imperative_grad.imperative_grad(
this_tape, nest.flatten(result), sources, output_gradients=dy)
return result, vjp
return decorated
def _aggregate_grads(gradients):
"""Aggregate gradients from multiple sources.
Args:
gradients: A list of 'Tensor' or 'IndexedSlices' gradients.
Returns:
If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'.
Otherwise returns an aggregated 'IndexedSlices'.
"""
assert gradients, "No gradients to aggregate"
if len(gradients) == 1:
return gradients[0]
if all(isinstance(g, tensor_lib.Tensor) for g in gradients):
return gen_math_ops.add_n(gradients)
else:
assert all(
isinstance(g, (tensor_lib.Tensor, indexed_slices.IndexedSlices))
for g in gradients)
return backprop_util.AggregateIndexedSlicesGradients(gradients)
def _num_elements(grad):
"""The number of elements in the `grad` tensor."""
if isinstance(grad, tensor_lib.Tensor):
shape_tuple = grad._shape_tuple() # pylint: disable=protected-access
elif isinstance(grad, indexed_slices.IndexedSlices):
shape_tuple = grad.values._shape_tuple() # pylint: disable=protected-access
else:
raise ValueError("`grad` not a Tensor or IndexedSlices.")
if shape_tuple is None or None in shape_tuple:
return 0
return functools.reduce(operator.mul, shape_tuple, 1)
def _fast_fill(value, shape, dtype):
return array_ops.fill(
constant_op.constant(shape, dtype=dtypes.int32),
constant_op.constant(value, dtype=dtype))
def _zeros(shape, dtype):
"""Helper to return (possibly cached) zero tensors in eager mode."""
# Note: variants will use _zeros_like
if dtype == dtypes.string or dtype == dtypes.resource:
return None
ctx = context.context()
if not ctx.executing_eagerly():
return array_ops.zeros(shape, dtype)
device = ctx.device_name
if tensor_util.is_tf_type(shape):
shape_key = shape.ref()
else:
shape_key = shape
cache_key = shape_key, dtype, device
cached = ctx.zeros_cache().get(cache_key)
if cached is None:
if dtypes.as_dtype(dtype).is_bool:
value = False
else:
value = 0
cached = _fast_fill(value, shape, dtype)
ctx.zeros_cache().put(cache_key, cached)
return cached
def _ones(shape, dtype):
as_dtype = dtypes.as_dtype(dtype)
if as_dtype == dtypes.string:
return None
if not context.executing_eagerly():
return array_ops.ones(shape, dtype)
if as_dtype.is_bool:
value = True
else:
value = 1
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(value, dtype=dtype)
return _fast_fill(value, shape, dtype)
_default_vspace = imperative_grad.VSpace(
num_elements_fn=_num_elements,
aggregate_fn=_aggregate_grads,
zeros_fn=_zeros,
ones_fn=_ones,
zeros_like_fn=default_gradient.zeros_like,
ones_like_fn=default_gradient.ones_like,
graph_shape_fn=gen_array_ops.shape)
pywrap_tfe.TFE_Py_RegisterVSpace(_default_vspace)
def _handle_or_self(x):
"""Unwrap resource variable/ndarray to return tensors."""
if resource_variable_ops.is_resource_variable(x):
return x.handle
return x
def _extract_tensors_and_variables(tensor):
"""Extracts tensors and variables from the input object."""
for obj in nest.flatten(tensor):
if _pywrap_utils.IsTensor(obj) or _pywrap_utils.IsVariable(obj):
yield obj
elif isinstance(obj, composite_tensor.CompositeTensor):
components = type_spec.type_spec_from_value(obj)._to_components(obj) # pylint: disable=protected-access
yield from _extract_tensors_and_variables(components)
else:
raise ValueError(f"Passed in object {obj} of type {type(obj).__name__!r}"
f", not tf.Tensor or tf.Variable or ExtensionType.")
@tf_export("GradientTape", "autodiff.GradientTape", v1=["GradientTape"])
| _MockOp |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 35533,
"end": 38751
} | class ____(BertForQuestionAnswering):
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
task_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], QuestionAnsweringModelOutput]:
r"""
task_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Task type embedding is a special embedding to represent the characteristic of different tasks, such as
word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We
assign a `task_type_id` to each task and the `task_type_id` is in the range `[0,
config.task_type_vocab_size-1]
"""
outputs = self.ernie(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
task_type_ids=task_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
| ErnieForQuestionAnswering |
python | python-openxml__python-docx | tests/test_package.py | {
"start": 464,
"end": 1769
} | class ____:
"""Unit-test suite for `docx.package.Package`."""
def it_can_get_or_add_an_image_part_containing_a_specified_image(
self, image_parts_prop_: Mock, image_parts_: Mock, image_part_: Mock
):
image_parts_prop_.return_value = image_parts_
image_parts_.get_or_add_image_part.return_value = image_part_
package = Package()
image_part = package.get_or_add_image_part("image.png")
image_parts_.get_or_add_image_part.assert_called_once_with("image.png")
assert image_part is image_part_
def it_gathers_package_image_parts_after_unmarshalling(self):
package = Package.open(docx_path("having-images"))
image_parts = package.image_parts
assert len(image_parts) == 3
assert all(isinstance(p, ImagePart) for p in image_parts)
# fixture components ---------------------------------------------
@pytest.fixture
def image_part_(self, request: FixtureRequest):
return instance_mock(request, ImagePart)
@pytest.fixture
def image_parts_(self, request: FixtureRequest):
return instance_mock(request, ImageParts)
@pytest.fixture
def image_parts_prop_(self, request: FixtureRequest):
return property_mock(request, Package, "image_parts")
| DescribePackage |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_engine.py | {
"start": 27936,
"end": 28916
} | class ____(fixtures.TablesTest):
__only_on__ = "mssql"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"error_t",
metadata,
Column("error_code", String(50), primary_key=True),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.error_t.insert(),
[{"error_code": "01002"}],
)
def test_invalid_transaction_detection(self, connection):
# issue #5359
t = self.tables.error_t
# force duplicate PK error
assert_raises(
IntegrityError,
connection.execute,
t.insert(),
{"error_code": "01002"},
)
# this should not fail with
# "Can't reconnect until invalid transaction is rolled back."
result = connection.execute(t.select()).fetchall()
eq_(len(result), 1)
| InvalidTransactionFalsePositiveTest |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 30113,
"end": 30814
} | class ____(nn.Module):
"""Prediction module for the generator, made up of two dense layers."""
def __init__(self, config):
super().__init__()
self.activation = get_activation("gelu")
self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.dense(generator_hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@auto_docstring
| ConvBertGeneratorPredictions |
python | realpython__materials | python-built-in-exceptions/rainbow.py | {
"start": 394,
"end": 729
} | class ____:
def __init__(self, name="Red"):
name = name.title()
if name not in COLORS:
raise ValueError(f"{name} is not a valid rainbow color")
self.name = name
def as_hex(self):
return COLORS[self.name]["Hex"]
def as_rgb(self):
return COLORS[self.name]["RGB"]
| RainbowColor |
python | django-haystack__django-haystack | haystack/backends/elasticsearch7_backend.py | {
"start": 20168,
"end": 20322
} | class ____(ElasticsearchSearchQuery):
def add_field_facet(self, field, **options):
self.facets[field] = options.copy()
| Elasticsearch7SearchQuery |
python | getsentry__sentry | tests/sentry/models/test_grouphistory.py | {
"start": 2526,
"end": 4041
} | class ____(TestCase):
def test_no_history(self) -> None:
# Test both statuses with/without a previous status
assert get_prev_history(self.group, GroupHistoryStatus.UNRESOLVED) is None
assert get_prev_history(self.group, GroupHistoryStatus.DELETED) is None
def test_history(self) -> None:
prev_history = self.create_group_history(self.group, GroupHistoryStatus.UNRESOLVED)
assert get_prev_history(self.group, GroupHistoryStatus.RESOLVED) == prev_history
assert get_prev_history(self.group, GroupHistoryStatus.DELETED) is None
def test_multi_history(self) -> None:
other_group = self.create_group()
self.create_group_history(other_group, GroupHistoryStatus.UNRESOLVED)
assert get_prev_history(self.group, GroupHistoryStatus.UNRESOLVED) is None
prev_history = self.create_group_history(self.group, GroupHistoryStatus.UNRESOLVED)
assert get_prev_history(self.group, GroupHistoryStatus.RESOLVED) == prev_history
prev_history = self.create_group_history(
self.group, GroupHistoryStatus.RESOLVED, prev_history_date=prev_history.date_added
)
assert get_prev_history(self.group, GroupHistoryStatus.UNRESOLVED) == prev_history
prev_history = self.create_group_history(
self.group, GroupHistoryStatus.UNRESOLVED, prev_history_date=prev_history.date_added
)
assert get_prev_history(self.group, GroupHistoryStatus.RESOLVED) == prev_history
| GetPrevHistoryTest |
python | gevent__gevent | src/greentest/3.12/test_ssl.py | {
"start": 66411,
"end": 68849
} | class ____(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
try:
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
except RuntimeError:
if Py_DEBUG_WIN32:
self.skipTest("not supported on Win32 debug build")
raise
self.assertEqual(cm.exception.library, 'PEM')
regex = "(NO_START_LINE|UNSUPPORTED_PUBLIC_KEY_TYPE)"
self.assertRegex(cm.exception.reason, regex)
s = str(cm.exception)
self.assertTrue("NO_START_LINE" in s, s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
| SSLErrorTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gradient05.py | {
"start": 315,
"end": 1560
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gradient05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [61363712, 61365248]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"gradient": {
"colors": ["#DDEBCF", "#9CB86E", "#156B13"],
"type": "rectangular",
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_test.py | {
"start": 3860,
"end": 26943
} | class ____(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_in_graph_and_eager_modes
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
if context.executing_eagerly():
inputs = [constant_op.constant(np.ones((3, 4)))]
else:
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegex(ValueError, "must be a vector"):
rnn.dynamic_rnn(
cell,
array_ops_stack.stack(inputs),
dtype=dtypes.float32,
sequence_length=[[4]])
@test_util.run_in_graph_and_eager_modes
def testInvalidDtype(self):
if context.executing_eagerly():
inputs = np.zeros((3, 4, 5), dtype=np.int32)
else:
inputs = array_ops.placeholder(dtypes.int32, shape=(3, 4, 5))
cells = [
rnn_cell_impl.BasicRNNCell,
rnn_cell_impl.GRUCell,
rnn_cell_impl.BasicLSTMCell,
rnn_cell_impl.LSTMCell,
]
for cell_cls in cells:
with self.cached_session():
with self.assertRaisesRegex(ValueError,
"RNN cell only supports floating"):
cell = cell_cls(2, dtype=dtypes.int32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testBatchSizeFromInput(self):
cell = Plus1RNNCell()
in_eager_mode = context.executing_eagerly()
# With static batch size
if in_eager_mode:
inputs = np.zeros((3, 4, 5), dtype=np.float32)
initial_state = np.zeros((3, 5), dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, 5))
initial_state = array_ops.placeholder(dtypes.float32, shape=(3, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell, inputs, initial_state=initial_state)
self.assertEqual(3, outputs.shape[0])
self.assertEqual(3, state.shape[0])
# Without static batch size
# Tensor shapes are fully determined with eager execution enabled,
# so only run this test for graph construction.
if not in_eager_mode:
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 5))
# - Without initial_state
outputs, state = rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(None, outputs.shape.dims[0].value)
self.assertEqual(None, state.shape.dims[0].value)
# - With initial_state
outputs, state = rnn.dynamic_rnn(
cell,
inputs,
initial_state=array_ops.placeholder(dtypes.float32, shape=(None, 5)))
self.assertEqual(None, outputs.shape.dims[0].value)
self.assertEqual(None, state.shape.dims[0].value)
@test_util.run_in_graph_and_eager_modes
def testScalarStateIsAccepted(self):
cell = ScalarStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state)
@test_util.run_in_graph_and_eager_modes
def testUnbalancedOutputIsAccepted(self):
cell = UnbalancedOutputRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={inputs: [[[1], [2], [3], [4]]]})
self.assertIsInstance(outputs, tuple)
self.assertAllEqual([[[1], [2], [3], [4]]], outputs[0])
self.assertAllEqual([[[1, 1], [2, 2], [3, 3], [4, 4]]], outputs[1])
self.assertAllEqual(4, state)
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testEagerMemory(self):
with context.eager_mode():
cell = TensorArrayStateRNNCell()
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=[4])
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTensorArrayStateIsAccepted(self):
cell = TensorArrayStateRNNCell()
in_eager_mode = context.executing_eagerly()
if in_eager_mode:
inputs = np.array([[[1], [2], [3], [4]]], dtype=np.float32)
else:
inputs = array_ops.placeholder(dtypes.float32, shape=(1, 4, 1))
with self.cached_session() as sess:
outputs, state = rnn.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=[4])
state = (state[0], state[1].stack())
if not in_eager_mode:
outputs, state = sess.run(
[outputs, state], feed_dict={
inputs: [[[1], [2], [3], [4]]]
})
self.assertAllEqual([[[1], [2], [3], [4]]], outputs)
self.assertAllEqual(4, state[0])
self.assertAllEqual([[[1]], [[2]], [[3]], [[4]]], state[1])
@test_util.run_deprecated_v1
def testCellGetInitialState(self):
cell = rnn_cell_impl.BasicRNNCell(5)
with self.assertRaisesRegex(ValueError,
"batch_size and dtype cannot be None"):
cell.get_initial_state(None, None, None)
inputs = array_ops.placeholder(dtypes.float32, shape=(None, 4, 1))
with self.assertRaisesRegex(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=50, dtype=None)
with self.assertRaisesRegex(
ValueError, "batch size from input tensor is different from"):
cell.get_initial_state(
inputs=inputs, batch_size=constant_op.constant(50), dtype=None)
with self.assertRaisesRegex(ValueError,
"dtype from input tensor is different from"):
cell.get_initial_state(inputs=inputs, batch_size=None, dtype=dtypes.int16)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=None, dtype=None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def _assert_cell_builds(self, cell_class, dtype, batch_size, in_size,
out_size):
cell = cell_class(out_size, dtype=dtype)
in_shape = tensor_shape.TensorShape((batch_size, in_size))
cell.build(in_shape)
state_output = cell.get_initial_state(
inputs=None, batch_size=batch_size, dtype=dtype)
cell_output, _ = cell(array_ops.zeros(in_shape, dtype), state_output)
self.assertAllEqual([batch_size, out_size], cell_output.shape.as_list())
@test_util.run_in_graph_and_eager_modes
def testCellsBuild(self):
f32 = dtypes.float32
f64 = dtypes.float64
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicRNNCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.BasicLSTMCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.GRUCell, f64, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f32, 5, 7, 3)
self._assert_cell_builds(rnn_cell_impl.LSTMCell, f64, 5, 7, 3)
@test_util.run_deprecated_v1
def testBasicLSTMCellInterchangeWithLSTMCell(self):
with self.session(graph=ops_lib.Graph()) as sess:
basic_cell = rnn_cell_impl.BasicLSTMCell(1)
basic_cell(array_ops.ones([1, 1]),
state=basic_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in basic_cell.variables])
self.evaluate(basic_cell._bias.assign([10.] * 4))
save = saver.Saver()
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = save.save(sess, prefix)
with self.session(graph=ops_lib.Graph()) as sess:
lstm_cell = rnn_cell_impl.LSTMCell(1, name="basic_lstm_cell")
lstm_cell(array_ops.ones([1, 1]),
state=lstm_cell.get_initial_state(inputs=None,
batch_size=1,
dtype=dtypes.float32))
self.evaluate([v.initializer for v in lstm_cell.variables])
save = saver.Saver()
save.restore(sess, save_path)
self.assertAllEqual([10.] * 4, self.evaluate(lstm_cell._bias))
######### Benchmarking RNN code
def _static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell, inputs_t, sequence_length=sequence_length, dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def graph_creation_static_vs_dynamic_rnn_benchmark(max_time):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# These parameters don't matter
batch_size = 512
num_units = 512
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
def _create_static_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
_static_vs_dynamic_rnn_benchmark_static(inputs_list_t, sequence_length)
def _create_dynamic_rnn():
with session.Session(config=config, graph=ops_lib.Graph()):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
_static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
delta_static = timeit.timeit(_create_static_rnn, number=5)
delta_dynamic = timeit.timeit(_create_dynamic_rnn, number=5)
print("%d \t %f \t %f \t %f" %
(max_time, delta_static, delta_dynamic, delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _timer(sess, ops):
# Warm in
for _ in range(2):
sess.run(ops)
# Timing run
runs = 20
start = time.time()
for _ in range(runs):
sess.run(ops)
end = time.time()
return (end - start) / float(runs)
def static_vs_dynamic_rnn_benchmark(batch_size, max_time, num_units, use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# Using rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
delta_static = _timer(sess, ops)
# Using dynamic_rnn()
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _static_vs_dynamic_rnn_benchmark_dynamic(inputs_t, sequence_length)
variables_lib.global_variables_initializer().run()
delta_dynamic = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f \t %f" %
(batch_size, max_time, num_units, use_gpu, delta_static, delta_dynamic,
delta_dynamic / delta_static))
return delta_static, delta_dynamic
def _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t, sequence_length):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + [final_state],
trainable_variables)
return control_flow_ops.group(final_state, *(gradients + outputs))
def half_seq_len_vs_unroll_half_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Halve the sequence length, full static unroll
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(inputs_list_t,
sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_half_seq_len = _timer(sess, ops)
# Halve the unroll size, don't use sequence length
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _half_seq_len_vs_unroll_half_rnn_benchmark(
inputs_list_t[:(max_time // 2)], sequence_length / 2)
variables_lib.global_variables_initializer().run()
delta_unroll_half = _timer(sess, ops)
print("%d \t %d \t\t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_half_seq_len,
delta_unroll_half, delta_half_seq_len / delta_unroll_half))
return delta_half_seq_len, delta_unroll_half
def _concat_state_vs_tuple_state_rnn_benchmark(inputs_list_t, sequence_length,
state_is_tuple):
(_, input_size) = inputs_list_t[0].get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=state_is_tuple)
outputs, final_state = rnn.static_rnn(
cell,
inputs_list_t,
sequence_length=sequence_length,
dtype=dtypes.float32)
final_state = list(final_state) if state_is_tuple else [final_state]
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients(outputs + final_state,
trainable_variables)
return control_flow_ops.group(*(final_state + gradients + outputs))
def concat_state_vs_tuple_state_rnn_benchmark(batch_size, max_time, num_units,
use_gpu):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = max_time * np.ones((batch_size,))
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
# Run with concatenated states (default)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=False)
variables_lib.global_variables_initializer().run()
delta_concat_state = _timer(sess, ops)
# Run with tuple states (new)
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
with ops_lib.device("/cpu:0" if not use_gpu else None):
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _concat_state_vs_tuple_state_rnn_benchmark(
inputs_list_t, sequence_length, state_is_tuple=True)
variables_lib.global_variables_initializer().run()
delta_tuple_state = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t\t %f \t\t %f" %
(batch_size, max_time, num_units, use_gpu, delta_concat_state,
delta_tuple_state, delta_concat_state / delta_tuple_state))
return delta_concat_state, delta_tuple_state
def _dynamic_rnn_swap_memory_benchmark(inputs_t, sequence_length, swap_memory):
(unused_0, unused_1, input_size) = inputs_t.get_shape().as_list()
initializer = init_ops.random_uniform_initializer(-0.01, 0.01, seed=127)
cell = rnn_cell_impl.LSTMCell(
num_units=input_size,
use_peepholes=True,
initializer=initializer,
state_is_tuple=False)
outputs, final_state = rnn.dynamic_rnn(
cell,
inputs_t,
sequence_length=sequence_length,
swap_memory=swap_memory,
dtype=dtypes.float32)
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
return control_flow_ops.group(final_state, outputs, *gradients)
def dynamic_rnn_swap_memory_benchmark(batch_size, max_time, num_units):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = np.random.randint(0, max_time, size=batch_size)
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(max_time)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
# No memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=False)
variables_lib.global_variables_initializer().run()
no_swap = _timer(sess, ops)
# Memory swap
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=True)
variables_lib.global_variables_initializer().run()
swap = _timer(sess, ops)
print("%d \t %d \t %d \t %f \t %f \t %f" %
(batch_size, max_time, num_units, no_swap, swap, swap / no_swap))
return no_swap, swap
def rnn_long_sequence_benchmark(batch_size, seqlen, num_units, dynamic,
swap_memory, nn):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Set up sequence lengths
np.random.seed([127])
sequence_length = [seqlen for _ in range(batch_size)]
inputs_list = [
np.random.randn(batch_size, num_units).astype(np.float32)
for _ in range(seqlen)
]
inputs = np.dstack(inputs_list).transpose([0, 2, 1]) # batch x time x depth
for _ in range(nn):
if dynamic:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_t = variables_lib.Variable(inputs, trainable=False).value()
ops = _dynamic_rnn_swap_memory_benchmark(
inputs_t, sequence_length, swap_memory=swap_memory)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
else:
with session.Session(config=config, graph=ops_lib.Graph()) as sess:
inputs_list_t = [
variables_lib.Variable(
x, trainable=False).value() for x in inputs_list
]
ops = _static_vs_dynamic_rnn_benchmark_static(inputs_list_t,
sequence_length)
variables_lib.global_variables_initializer().run()
elapsed = _timer(sess, ops)
print("%d \t %d \t %d \t %s \t %f \t %f" % (batch_size, seqlen, num_units,
dynamic, elapsed,
elapsed / seqlen))
| RNNTest |
python | squidfunk__mkdocs-material | material/plugins/tags/structure/tag/__init__.py | {
"start": 1424,
"end": 4943
} | class ____:
"""
A tag.
Tags can be used to categorize pages and group them into a tag structure. A
tag is a simple string, which can be split into a hierarchy of tags by using
the character or string as defined in the `hierarchy_separator` setting in
`mkdocs.yml`. Each parent tag contains their child tags.
Example:
```yaml
tags:
- foo/bar
- foo/baz
- qux
```
The tag structure for the above example would look like this:
```
.
├─ foo
│ ├─ bar
│ └─ baz
└─ qux
```
Note that this class does not split the tag name into a hierarchy of tags
by itself, but rather provides a simple interface to iterate over the tag
and its parents. Splitting is left to the caller, in order to allow for
changing the separator in `mkdocs.yml`.
"""
def __init__(
self, name: str, *, parent: Tag | None = None, hidden = False
):
"""
Initialize the tag.
Arguments:
name: The tag name.
parent: The parent tag.
hidden: Whether the tag is hidden.
"""
self.name = name
self.parent = parent
self.hidden = hidden
def __repr__(self) -> str:
"""
Return a printable representation of the tag.
Returns:
Printable representation.
"""
return f"Tag('{self.name}')"
def __str__(self) -> str:
"""
Return a string representation of the tag.
Returns:
String representation.
"""
return self.name
def __hash__(self) -> int:
"""
Return the hash of the tag.
Returns:
The hash.
"""
return hash(self.name)
def __iter__(self) -> Iterator[Tag]:
"""
Iterate over the tag and its parent tags.
Note that the first tag returned is the tag itself, followed by its
parent tags in ascending order. This allows to iterate over the tag
and its parents in a single loop, which is useful for generating
tree or breadcrumb structures.
Yields:
The current tag.
"""
tag = self
while tag:
yield tag
tag = tag.parent
def __contains__(self, other: Tag) -> bool:
"""
Check if the tag contains another tag.
Arguments:
other: The other tag to check.
Returns:
Whether the tag contains the other tag.
"""
assert isinstance(other, Tag)
return any(tag == other for tag in self)
def __eq__(self, other: Tag) -> bool:
"""
Check if the tag is equal to another tag.
Arguments:
other: The other tag to check.
Returns:
Whether the tags are equal.
"""
assert isinstance(other, Tag)
return self.name == other.name
def __lt__(self, other: Tag) -> bool:
"""
Check if the tag is less than another tag.
Arguments:
other: The other tag to check.
Returns:
Whether the tag is less than the other tag.
"""
assert isinstance(other, Tag)
return self.name < other.name
# -------------------------------------------------------------------------
name: str
"""
The tag name.
"""
parent: Tag | None
"""
The parent tag.
"""
hidden: bool
"""
Whether the tag is hidden.
"""
| Tag |
python | django-haystack__django-haystack | test_haystack/test_managers.py | {
"start": 721,
"end": 892
} | class ____(SearchIndexManager):
def filter(self, *args, **kwargs):
return self.get_search_queryset().filter(content="foo1").filter(*args, **kwargs)
| CustomManager |
python | walkccc__LeetCode | solutions/667. Beautiful Arrangement II/667.py | {
"start": 0,
"end": 254
} | class ____:
def constructArray(self, n: int, k: int) -> list[int]:
ans = list(range(1, n - k + 1))
for i in range(k):
if i % 2 == 0:
ans.append(n - i // 2)
else:
ans.append(n - k + (i + 1) // 2)
return ans
| Solution |
python | getsentry__sentry | src/sentry/incidents/handlers/condition/anomaly_detection_handler.py | {
"start": 1151,
"end": 1343
} | class ____(TypedDict):
value: int
source_id: int
subscription_id: int
timestamp: datetime
@condition_handler_registry.register(Condition.ANOMALY_DETECTION)
| AnomalyDetectionUpdate |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/config.py | {
"start": 821,
"end": 928
} | class ____(TypedDict):
postgres_url: str
postgres_db: "PostgresStorageConfigDb"
| PostgresStorageConfig |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 8276,
"end": 8525
} | class ____(WeaviateQueryError):
"""Is raised if a gRPC tenant get request to Weaviate fails in any way."""
def __init__(self, message: str):
super().__init__(message, "tenant get")
self.message = message
| WeaviateTenantGetError |
python | geekcomputers__Python | LinkedLists all Types/doubly_linked_list.py | {
"start": 674,
"end": 7115
} | class ____:
def __init__(self):
self.head = self.tail = None
self.length = 0
def insert_front(self, data):
node = Node(data, self.head)
if self.head == None:
self.tail = node
node.prev = self.head
self.head = node
self.length += 1
def insert_back(self, data):
node = Node(data, None, self.tail)
if self.head == None:
self.tail = self.head = node
self.length += 1
else:
self.tail.next = node
self.tail = node
self.length += 1
def insert_values(self, data_values: list):
self.head = self.tail = None
self.length = 0
for data in data_values:
self.insert_back(data)
def pop_front(self):
if not self.head:
print("List is Empty!")
return
self.head = self.head.next
self.head.prev = None
self.length -= 1
def pop_back(self):
if not self.head:
print("List is Empty!")
return
temp = self.tail
self.tail = temp.prev
temp.prev = self.tail.next = None
self.length -= 1
def print(self):
if self.head is None:
print("Linked List is Empty!")
return
temp = self.head
print("NULL <-", end=" ")
while temp:
if temp.next == None:
print(f"{temp.data} ->", end=" ")
break
print(f"{temp.data} <=>", end=" ")
temp = temp.next
print("NULL")
def len(self):
return self.length # O(1) length calculation
# if self.head is None:
# return 0
# count = 0
# temp = self.head
# while temp:
# count += 1
# temp = temp.next
# return count
def remove_at(self, idx):
if idx < 0 or self.len() <= idx:
raise Exception("Invalid Position")
if idx == 0:
self.pop_front()
return
elif idx == self.length - 1:
self.pop_back()
return
temp = self.head
dist = 0
while dist != idx - 1:
dist += 1
temp = temp.next
temp.next = temp.next.next
temp.next.prev = temp.next.prev.prev
self.length -= 1
def insert_at(self, idx: int, data):
if idx < 0 or self.len() < idx:
raise Exception("Invalid Position")
if idx == 0:
self.insert_front(data)
return
elif idx == self.length:
self.insert_back(data)
return
temp = self.head
dist = 0
while dist != idx - 1:
dist += 1
temp = temp.next
node = Node(data, temp.next, temp)
temp.next = node
self.length += 1
def insert_after_value(self, idx_data, data):
if not self.head: # For Empty List case
print("List is Empty!")
return
if self.head.data == idx_data: # To insert after the Head Element
self.insert_at(1, data)
return
temp = self.head
while temp:
if temp.data == idx_data:
node = Node(data, temp.next, temp)
temp.next = node
self.length += 1
return
temp = temp.next
print("The Element is not in the List!")
def remove_by_value(self, idx_data):
temp = self.head
if temp.data == idx_data:
self.pop_front()
return
elif self.tail.data == idx_data:
self.pop_back()
return
while temp:
if temp.data == idx_data:
temp.prev.next = temp.next
temp.next.prev = temp.prev
self.length -= 1
return
if temp != None:
temp = temp.next
print("The Element is not the List!")
def index(self, data):
"""Returns the index of the Element"""
if not self.head:
print("List is Empty!")
return
idx = 0
temp = self.head
while temp:
if temp.data == data:
return idx
temp = temp.next
idx += 1
print("The Element is not in the List!")
def search(self, idx):
"""Returns the Element at the Given Index"""
if self.len() == 0 or idx >= self.len():
raise Exception("Invalid Position")
return
temp = self.head
curr_idx = 0
while temp:
if curr_idx == idx:
return temp.data
temp = temp.next
curr_idx += 1
def reverse(self):
if not self.head:
print("The List is Empty!")
return
prev = c_next = None
curr = self.head
while curr != None:
c_next = curr.next
curr.next = prev
prev = curr
curr = c_next
self.tail = self.head
self.head = prev
def mid_element(self):
if not self.head:
print("List is Empty!")
return
slow = self.head.next
fast = self.head.next.next
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
return slow.data
def __dir__(self):
funcs = [
"insert_front",
"insert_back",
"pop_front",
"pop_back",
"print",
"len",
"length",
"remove_at",
"insert_after_value",
"index",
"search",
"reverse",
"mid_element",
"__dir__",
]
return funcs
def main():
ll: Node = DoublyLinkedList()
ll.insert_front(1)
ll.insert_front(2)
ll.insert_front(3)
ll.insert_back(0)
ll.insert_values(["ZeroTwo", "Asuna", "Tsukasa", "Seras"])
# ll.remove_at(3)
# ll.insert_at(4 , 'Raeliana')
# ll.pop_back()
ll.insert_after_value("Asuna", "MaoMao")
# print(ll.search(4))
# ll.remove_by_value('Asuna')
# ll.reverse()
# print(ll.index('ZeroTwo'))
ll.print()
# print(ll.mid_element())
# print(ll.length)
# print(ll.__dir__())
if __name__ == "__main__":
main()
| DoublyLinkedList |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 86028,
"end": 87441
} | class ____(unittest.TestCase):
def setUp(self):
self.relateds = []
class TestRelatedObject:
def __init__(subself, obj):
self.relateds.append(subself)
subself.obj = obj
obj.related = subself
class TestRelatedObjectFactory(factory.Factory):
class Meta:
model = TestRelatedObject
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.RelatedFactory(
TestRelatedObjectFactory,
factory_related_name='obj',
)
self.TestRelatedObject = TestRelatedObject
self.TestRelatedObjectFactory = TestRelatedObjectFactory
self.TestObjectFactory = TestObjectFactory
def test_no_extraction(self):
o = self.TestObjectFactory()
self.assertEqual(1, len(self.relateds))
rel = self.relateds[0]
self.assertEqual(o, rel.obj)
self.assertEqual(rel, o.related)
def test_passed_value(self):
o = self.TestObjectFactory(one=42)
self.assertEqual([], self.relateds)
self.assertFalse(hasattr(o, 'related'))
def test_passed_none(self):
o = self.TestObjectFactory(one=None)
self.assertEqual([], self.relateds)
self.assertFalse(hasattr(o, 'related'))
| RelatedFactoryExtractionTestCase |
python | nedbat__coveragepy | tests/test_debug.py | {
"start": 15473,
"end": 17589
} | class ____(CoverageTest):
"""Tests of debug.py:short_filename."""
def test_short_filename(self) -> None:
s = os.sep
se = re.escape(s)
assert short_filename(ast.__file__) == f"syspath:{s}ast.py"
assert short_filename(pytest.__file__) == f"syspath:{s}pytest{s}__init__.py"
assert short_filename(env.__file__) == f"cov:{s}env.py"
self.make_file("hello.txt", "hi")
short_hello = short_filename(os.path.abspath("hello.txt"))
assert re.match(rf"tmp:{se}t\d+{se}hello.txt", short_hello)
oddball = f"{s}xyzzy{s}plugh{s}foo.txt"
assert short_filename(oddball) == oddball
assert short_filename(None) is None
@pytest.mark.parametrize("long_len", [10, 100])
def test_relevant_environment_display(long_len: int) -> None:
env_vars = {
"HOME": "my home",
"HOME_DIR": "other place",
"XYZ_NEVER_MIND": "doesn't matter",
"SOME_PYOTHER": "xyz123",
"COVERAGE_THING": "abcd",
"MY_PYPI_TOKEN": "secret.something",
"TMP": "temporary",
"COVERAGE_PROCESS_CONFIG": "abc" + "x" * (long_len - 3),
}
long_val = {
10: "abcxxxxxxx",
100: "abc" + "x" * (60 - 3 - 3) + "...",
}[long_len]
expected = [
("COVERAGE_PROCESS_CONFIG", long_val),
("COVERAGE_THING", "abcd"),
("HOME", "my home"),
("MY_PYPI_TOKEN", "******.*********"),
("SOME_PYOTHER", "xyz123"),
("TMP", "temporary"),
]
assert expected == relevant_environment_display(env_vars)
def test_exc_one_line() -> None:
try:
raise DataError("wtf?")
except Exception as exc:
assert "coverage.exceptions.DataError: wtf?" == exc_one_line(exc)
def test_auto_repr() -> None:
class MyStuff:
"""Random class to test auto_repr."""
def __init__(self) -> None:
self.x = 17
self.y = "hello"
__repr__ = auto_repr
stuff = MyStuff()
setattr(stuff, "$coverage.object_id", 123456)
assert re.match(r"<MyStuff @0x[a-f\d]+ x=17 y='hello'>", repr(stuff))
| ShortFilenameTest |
python | PyCQA__pylint | tests/functional/g/generic_alias/generic_alias_collections.py | {
"start": 2128,
"end": 2168
} | class ____(list[int]):
pass
| DerivedList |
python | dask__distributed | distributed/http/scheduler/prometheus/semaphore.py | {
"start": 173,
"end": 3514
} | class ____(PrometheusCollector):
def __init__(self, server):
super().__init__(server)
self.subsystem = "semaphore"
def collect(self):
try:
sem_ext = self.server.extensions["semaphores"]
except KeyError:
return
semaphore_max_leases_family = GaugeMetricFamily(
self.build_name("max_leases"),
"Maximum leases allowed per semaphore\n"
"Note: This will be constant for "
"each semaphore during its lifetime.",
labels=["name"],
)
semaphore_active_leases_family = GaugeMetricFamily(
self.build_name("active_leases"),
"Amount of currently active leases per semaphore",
labels=["name"],
)
semaphore_pending_leases = GaugeMetricFamily(
self.build_name("pending_leases"),
"Amount of currently pending leases per semaphore",
labels=["name"],
)
semaphore_acquire_total = CounterMetricFamily(
self.build_name("acquire_total"),
"Total number of leases acquired per semaphore",
labels=["name"],
)
semaphore_release_total = CounterMetricFamily(
self.build_name("release_total"),
"Total number of leases released per semaphore\n"
"Note: If a semaphore is closed while there are still leases active, "
"this count will not equal `semaphore_acquired_total` after execution.",
labels=["name"],
)
semaphore_average_pending_lease_time = GaugeMetricFamily(
self.build_name("average_pending_lease_time"),
"Exponential moving average of the time it took to acquire a lease "
"per semaphore\n"
"Note: This only includes time spent on scheduler side, "
"it does not include time spent on communication.\n"
"Note: This average is calculated based on order of leases instead "
"of time of lease acquisition.",
labels=["name"],
unit="s",
)
for semaphore_name, semaphore_max_leases in sem_ext.max_leases.items():
semaphore_max_leases_family.add_metric(
[semaphore_name], semaphore_max_leases
)
semaphore_active_leases_family.add_metric(
[semaphore_name], len(sem_ext.leases[semaphore_name])
)
semaphore_pending_leases.add_metric(
[semaphore_name], sem_ext.metrics["pending"][semaphore_name]
)
semaphore_acquire_total.add_metric(
[semaphore_name], sem_ext.metrics["acquire_total"][semaphore_name]
)
semaphore_release_total.add_metric(
[semaphore_name], sem_ext.metrics["release_total"][semaphore_name]
)
semaphore_average_pending_lease_time.add_metric(
[semaphore_name],
sem_ext.metrics["average_pending_lease_time"][semaphore_name],
)
yield semaphore_max_leases_family
yield semaphore_active_leases_family
yield semaphore_pending_leases
yield semaphore_acquire_total
yield semaphore_release_total
yield semaphore_average_pending_lease_time
| SemaphoreMetricCollector |
python | plotly__plotly.py | plotly/graph_objs/splom/marker/colorbar/title/_font.py | {
"start": 233,
"end": 9939
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom.marker.colorbar.title"
_path_str = "splom.marker.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.marker.c
olorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.marker.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 33210,
"end": 33649
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a global concurrency limit."""
name: Optional[Name] = Field(default=None)
limit: Optional[NonNegativeInteger] = Field(default=None)
active: Optional[bool] = Field(default=None)
active_slots: Optional[NonNegativeInteger] = Field(default=None)
slot_decay_per_second: Optional[NonNegativeFloat] = Field(default=None)
| GlobalConcurrencyLimitUpdate |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 101266,
"end": 104634
} | class ____(Response):
"""
Response of events.get_task_events endpoint.
:param events: Events list
:type events: Sequence[dict]
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_task_events"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"events": {
"description": "Events list",
"items": {"type": "object"},
"type": ["array", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
events: Optional[List[dict]] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskEventsResponse, self).__init__(**kwargs)
self.events = events
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("events")
def events(self) -> Optional[List[dict]]:
return self._property_events
@events.setter
def events(self, value: Optional[List[dict]]) -> None:
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (list, tuple))
self.assert_isinstance(value, "events", (dict,), is_array=True)
self._property_events = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetTaskEventsResponse |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 59781,
"end": 59946
} | class ____(AnsiFunction[datetime.datetime]):
"""The CURRENT_TIMESTAMP() SQL function."""
type = sqltypes.DateTime()
inherit_cache = True
| current_timestamp |
python | zarr-developers__zarr-python | src/zarr/core/buffer/core.py | {
"start": 17343,
"end": 18026
} | class ____(NamedTuple):
"""Prototype of the Buffer and NDBuffer class
The protocol must be pickable.
Attributes
----------
buffer
The Buffer class to use when Zarr needs to create new Buffer.
nd_buffer
The NDBuffer class to use when Zarr needs to create new NDBuffer.
"""
buffer: type[Buffer]
nd_buffer: type[NDBuffer]
# The default buffer prototype used throughout the Zarr codebase.
def default_buffer_prototype() -> BufferPrototype:
from zarr.registry import (
get_buffer_class,
get_ndbuffer_class,
)
return BufferPrototype(buffer=get_buffer_class(), nd_buffer=get_ndbuffer_class())
| BufferPrototype |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/refurb/FURB142.py | {
"start": 573,
"end": 1282
} | class ____:
s: set[int]
c = C()
for x in (1, 2, 3):
c.s.add(x)
# Ok
s.update(x for x in (1, 2, 3))
for x in (1, 2, 3):
s.add(x)
else:
pass
async def f(y):
async for x in y:
s.add(x)
def g():
for x in (set(),):
x.add(x)
# Test cases for lambda and ternary expressions - https://github.com/astral-sh/ruff/issues/18590
s = set()
for x in lambda: 0:
s.discard(-x)
for x in (1,) if True else (2,):
s.add(-x)
# don't add extra parens
for x in (lambda: 0):
s.discard(-x)
for x in ((1,) if True else (2,)):
s.add(-x)
# don't add parens directly in function call
for x in lambda: 0:
s.discard(x)
for x in (1,) if True else (2,):
s.add(x)
| C |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sql_datasource.py | {
"start": 8257,
"end": 8726
} | class ____(_PartitionerDatetime):
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_year_and_month_and_day"] = (
"partition_on_year_and_month_and_day"
)
@property
@override
def param_names(self) -> List[str]:
return ["year", "month", "day"]
@override
def partitioner_method_kwargs(self) -> Dict[str, Any]:
return {"column_name": self.column_name}
| SqlPartitionerYearAndMonthAndDay |
python | ray-project__ray | python/ray/llm/_internal/serve/core/configs/openai_api_models.py | {
"start": 4077,
"end": 4177
} | class ____(vLLMScoreRequest):
model_config = ConfigDict(arbitrary_types_allowed=True)
| ScoreRequest |
python | scikit-learn__scikit-learn | sklearn/tests/test_base.py | {
"start": 2403,
"end": 2549
} | class ____(BaseEstimator):
"A buggy estimator that does not set its parameters right."
def __init__(self, a=None):
self.a = 1
| Buggy |
python | google__pytype | pytype/directors/directors.py | {
"start": 4208,
"end": 5537
} | class ____:
"""A collection of possibly nested start..end ranges from AST nodes."""
def __init__(self, start_to_end_mapping):
self._starts = sorted(start_to_end_mapping)
self._start_to_end = start_to_end_mapping
self._end_to_start = {v: k for k, v in start_to_end_mapping.items()}
def has_start(self, line):
return line in self._start_to_end
def has_end(self, line):
return line in self._end_to_start
def find_outermost(self, line):
"""Find the outermost interval containing line."""
i = bisect.bisect_left(self._starts, line)
num_intervals = len(self._starts)
if i or line == self._starts[0]:
if i < num_intervals and self._starts[i] == line:
# line number is start of interval.
start = self._starts[i]
else:
# Skip nested intervals
while (
1 < i <= num_intervals
and self._start_to_end[self._starts[i - 1]] < line
):
i -= 1
start = self._starts[i - 1]
end = self._start_to_end[start]
if line in range(start, end):
return start, end
return None, None
def adjust_end(self, old_end, new_end):
start = self._end_to_start[old_end]
self._start_to_end[start] = new_end
del self._end_to_start[old_end]
self._end_to_start[new_end] = start
| _BlockRanges |
python | sympy__sympy | sympy/functions/combinatorial/numbers.py | {
"start": 69346,
"end": 70493
} | class ____(DefinedFunction):
r"""
Calculate the number of prime factors counting multiplicities for a
positive integer n.
If n's prime factorization is:
.. math ::
n = \prod_{i=1}^k p_i^{m_i},
then ``primeomega(n)`` or `\Omega(n)` is:
.. math ::
\Omega(n) = \sum_{i=1}^k m_i.
Examples
========
>>> from sympy.functions.combinatorial.numbers import primeomega
>>> primeomega(1)
0
>>> primeomega(20)
3
See Also
========
sympy.ntheory.factor_.factorint
References
==========
.. [1] https://mathworld.wolfram.com/PrimeFactor.html
.. [2] https://oeis.org/A001222
"""
is_integer = True
is_nonnegative = True
@classmethod
def eval(cls, n):
if n.is_integer is False:
raise TypeError("n should be an integer")
if n.is_positive is False:
raise ValueError("n should be a positive integer")
if n.is_prime is True:
return S.One
if n is S.One:
return S.Zero
if n.is_Integer is True:
return S(sum(factorint(n).values()))
| primeomega |
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 15983,
"end": 17017
} | class ____(TestCase):
def test_ndarrays_to_tensors(self):
out = _util.ndarrays_to_tensors(((w.asarray(42), 7), 3))
assert len(out) == 2
assert isinstance(out[0], tuple) and len(out[0]) == 2
assert isinstance(out[0][0], torch.Tensor)
@skip(not TEST_CUDA, reason="requires cuda")
def test_f16_on_cuda(self):
# make sure operations with float16 tensors give same results on CUDA and on CPU
t = torch.arange(5, dtype=torch.float16)
assert_allclose(w.vdot(t.cuda(), t.cuda()), w.vdot(t, t))
assert_allclose(w.inner(t.cuda(), t.cuda()), w.inner(t, t))
assert_allclose(w.matmul(t.cuda(), t.cuda()), w.matmul(t, t))
assert_allclose(w.einsum("i,i", t.cuda(), t.cuda()), w.einsum("i,i", t, t))
assert_allclose(w.mean(t.cuda()), w.mean(t))
assert_allclose(w.cov(t.cuda(), t.cuda()), w.cov(t, t).tensor.cuda())
assert_allclose(w.corrcoef(t.cuda()), w.corrcoef(t).tensor.cuda())
if __name__ == "__main__":
run_tests()
| TestMisc |
python | getsentry__sentry | tests/sentry/models/test_debugfile.py | {
"start": 12303,
"end": 20074
} | class ____(APITestCase):
def test_simple_cache_clear(self) -> None:
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-dsym-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.writestr("proguard/%s.txt" % PROGUARD_UUID, PROGUARD_SOURCE)
f.writestr("ignored-file.txt", b"This is just some stuff")
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.data) == 1
assert response.data[0]["headers"] == {"Content-Type": "text/x-proguard+plain"}
assert response.data[0]["sha1"] == "e6d3c5185dac63eddfdc1a5edfffa32d46103b44"
assert response.data[0]["uuid"] == PROGUARD_UUID
assert response.data[0]["objectName"] == "proguard-mapping"
assert response.data[0]["cpuName"] == "any"
assert response.data[0]["symbolType"] == "proguard"
difs = ProjectDebugFile.difcache.fetch_difs(
project=project, debug_ids=[PROGUARD_UUID], features=["mapping"]
)
assert len(difs) == 1
assert os.path.isfile(difs[PROGUARD_UUID])
# if we clear now, nothing happens
ProjectDebugFile.difcache.clear_old_entries()
assert os.path.isfile(difs[PROGUARD_UUID])
# Put the time into the future
real_time = time.time
time.time = lambda: real_time() + 60 * 60 * 48
try:
ProjectDebugFile.difcache.clear_old_entries()
finally:
time.time = real_time
# But it's gone now
assert not os.path.isfile(difs[PROGUARD_UUID])
@pytest.mark.parametrize(
("path", "name", "uuid"),
(
(
"/proguard/mapping-00000000-0000-0000-0000-000000000000.txt",
None,
"00000000-0000-0000-0000-000000000000",
),
(
"/proguard/00000000-0000-0000-0000-000000000000.txt",
None,
"00000000-0000-0000-0000-000000000000",
),
(
"/var/folders/x5/zw3gnf_x3ts0dwg56362ftrw0000gn/T/tmpbs2r93sr",
"/proguard/mapping-00000000-0000-0000-0000-000000000000.txt",
"00000000-0000-0000-0000-000000000000",
),
(
"/var/folders/x5/zw3gnf_x3ts0dwg56362ftrw0000gn/T/tmpbs2r93sr",
"/proguard/00000000-0000-0000-0000-000000000000.txt",
"00000000-0000-0000-0000-000000000000",
),
),
)
def test_proguard_files_detected(path: str, name: str | None, uuid: str) -> None:
# ProGuard files are detected by the path/name, not the file contents.
# So, the ProGuard check should not depend on the file existing.
detected = detect_dif_from_path(path, name)
assert len(detected) == 1
(dif_meta,) = detected
assert dif_meta.file_format == "proguard"
assert dif_meta.debug_id == uuid
assert dif_meta.data == {"features": ["mapping"]}
@pytest.mark.parametrize(
("path", "name"),
(
("/var/folders/x5/zw3gnf_x3ts0dwg56362ftrw0000gn/T/tmpbs2r93sr", None),
("/var/folders/x5/zw3gnf_x3ts0dwg56362ftrw0000gn/T/tmpbs2r93sr", "not-a-proguard-file.txt"),
(
# Note: "/" missing from beginning of path
"proguard/mapping-00000000-0000-0000-0000-000000000000.txt",
None,
),
(
"/var/folders/x5/zw3gnf_x3ts0dwg56362ftrw0000gn/T/tmpbs2r93sr",
# Note: "/" missing from beginning of path
"proguard/mapping-00000000-0000-0000-0000-000000000000.txt",
),
),
)
def test_proguard_file_not_detected(path: str, name: str | None) -> None:
with pytest.raises(FileNotFoundError):
# If the file is not detected as a ProGuard file, detect_dif_from_path
# attempts to open the file, which probably doesn't exist.
# Note that if the path or name does exist as a file on the filesystem,
# this test will fail.
detect_dif_from_path(path, name)
def test_dartsymbolmap_file_detected() -> None:
"""Test that dartsymbolmap files are properly detected and validated."""
# Create a temporary dartsymbolmap file (array format)
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
f.write('["ExceptionClass", "xyz", "DatabaseError", "abc"]')
f.flush()
detected = detect_dif_from_path(
f.name, name="dartsymbolmap.json", debug_id="b8e43a-f242-3d73-a453-aeb6a777ef75"
)
assert len(detected) == 1
dif_meta = detected[0]
assert dif_meta.file_format == "dartsymbolmap"
assert dif_meta.arch == "any"
assert dif_meta.debug_id == "b8e43a-f242-3d73-a453-aeb6a777ef75"
assert dif_meta.name == "dartsymbolmap.json"
assert dif_meta.data == {"features": ["mapping"]}
def test_dartsymbolmap_file_odd_array_fails() -> None:
"""Test that dartsymbolmap with odd number of elements fails."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
f.write('["one", "two", "three"]') # Odd number of elements
f.flush()
from sentry.models.debugfile import BadDif
with pytest.raises(
BadDif, match="dartsymbolmap array must have an even number of elements"
):
detect_dif_from_path(
f.name, name="dartsymbolmap.json", debug_id="b8e43a-f242-3d73-a453-aeb6a777ef75"
)
def test_dartsymbolmap_file_dict_format() -> None:
"""Test that dict format JSON files are detected as Il2Cpp, not dartsymbolmap."""
# Note: Files starting with '{' are detected as Il2Cpp files, not dartsymbolmap
# This is because determine_dif_kind() checks for '{' before '[' and assigns Il2Cpp
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
f.write('{"xyz": "ExceptionClass", "abc": "DatabaseError"}')
f.flush()
# This will be detected as Il2Cpp, not dartsymbolmap
detected = detect_dif_from_path(
f.name, name="dartsymbolmap.json", debug_id="b8e43a-f242-3d73-a453-aeb6a777ef75"
)
assert len(detected) == 1
dif_meta = detected[0]
# Should be detected as il2cpp, not dartsymbolmap
assert dif_meta.file_format == "il2cpp"
assert dif_meta.debug_id == "b8e43a-f242-3d73-a453-aeb6a777ef75"
def test_dartsymbolmap_file_invalid_json() -> None:
"""Test that invalid JSON fails for dartsymbolmap."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
f.write("[invalid json")
f.flush()
from sentry.models.debugfile import BadDif
with pytest.raises(BadDif, match="Invalid dartsymbolmap:"):
detect_dif_from_path(
f.name, name="dartsymbolmap.json", debug_id="b8e43a-f242-3d73-a453-aeb6a777ef75"
)
def test_dartsymbolmap_file_missing_debug_id() -> None:
"""Test that dartsymbolmap without debug_id fails."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json") as f:
f.write('["one", "two"]')
f.flush()
from sentry.models.debugfile import BadDif
with pytest.raises(BadDif, match="Missing debug_id for dartsymbolmap"):
detect_dif_from_path(f.name, name="dartsymbolmap.json", debug_id=None)
| DebugFilesClearTest |
python | huggingface__transformers | src/transformers/models/mt5/modeling_mt5.py | {
"start": 3825,
"end": 5189
} | class ____(nn.Module):
def __init__(self, config: MT5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
# Copied from transformers.models.t5.modeling_t5.T5LayerFF with T5->MT5
| MT5DenseGatedActDense |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 199925,
"end": 200315
} | class ____:
_col_type = DATEMULTIRANGE
_col_str = "DATEMULTIRANGE"
def _data_str(self):
return "{[2013-03-23,2013-03-24), [2014-05-23,2014-05-24)}"
def _data_obj(self):
return [
Range(datetime.date(2013, 3, 23), datetime.date(2013, 3, 24)),
Range(datetime.date(2014, 5, 23), datetime.date(2014, 5, 24)),
]
| _DateMultiRangeTests |
python | realpython__materials | python-class/animals.py | {
"start": 139,
"end": 201
} | class ____(Animal):
unique_feature = "Mammary glands"
| Mammal |
python | pytorch__pytorch | torch/jit/mobile/__init__.py | {
"start": 1731,
"end": 8889
} | class ____:
def __init__(self, cpp_module) -> None:
self._c = cpp_module
super().__init__()
def __call__(self, *input):
return self._c.forward(input)
def find_method(self, method_name):
return self._c.find_method(method_name)
def forward(self, *input):
return self._c.forward(input)
def run_method(self, method_name, *input):
return self._c.run_method(method_name, input)
def _export_operator_list(module: LiteScriptModule):
r"""Return a set of root operator names (with overload name) that are used by any method in this mobile module."""
return torch._C._export_operator_list(module._c)
def _get_model_bytecode_version(f_input) -> int:
r"""Take a file-like object to return an integer.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
version: An integer. If the integer is -1, the version is invalid. A warning
will show in the log.
Example:
.. testcode::
from torch.jit.mobile import _get_model_bytecode_version
# Get bytecode version from a saved file path
version = _get_model_bytecode_version("path/to/model.ptl")
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._get_model_bytecode_version(os.fspath(f_input))
else:
# pyrefly: ignore [missing-attribute]
return torch._C._get_model_bytecode_version_from_buffer(f_input.read())
def _get_mobile_model_contained_types(f_input) -> int:
r"""Take a file-like object and return a set of string, like ("int", "Optional").
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
type_list: A set of string, like ("int", "Optional"). These are types used in bytecode.
Example:
.. testcode::
from torch.jit.mobile import _get_mobile_model_contained_types
# Get type list from a saved file path
type_list = _get_mobile_model_contained_types("path/to/model.ptl")
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._get_mobile_model_contained_types(os.fspath(f_input))
else:
# pyrefly: ignore [missing-attribute]
return torch._C._get_mobile_model_contained_types_from_buffer(f_input.read())
def _backport_for_mobile(f_input, f_output, to_version):
r"""Take a input string containing a file name (file-like object) and a new destination to return a boolean.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
f_output: path to new model destination
to_version: the expected output model bytecode version
Returns:
success: A boolean. If backport success, return true, otherwise false
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if (isinstance(f_input, (str, os.PathLike))) and (
isinstance(f_output, (str, os.PathLike))
):
return torch._C._backport_for_mobile(
os.fspath(f_input),
os.fspath(f_output),
to_version,
)
else:
return torch._C._backport_for_mobile_from_buffer(
# pyrefly: ignore [missing-attribute]
f_input.read(),
str(f_output),
to_version,
)
def _backport_for_mobile_to_buffer(f_input, to_version):
r"""Take a string containing a file name (file-like object).
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._backport_for_mobile_to_buffer(os.fspath(f_input), to_version)
else:
return torch._C._backport_for_mobile_from_buffer_to_buffer(
# pyrefly: ignore [missing-attribute]
f_input.read(),
to_version,
)
def _get_model_ops_and_info(f_input):
r"""Retrieve the root (top level) operators of a model and their corresponding compatibility info.
These root operators can call other operators within them (traced ops), and
a root op can call many different traced ops depending on internal code paths in the root op.
These traced ops are not returned by this function. Those operators are abstracted into the
runtime as an implementation detail (and the traced ops themselves can also call other operators)
making retrieving them difficult and their value from this api negligible since they will differ
between which runtime version the model is run on. Because of this, there is a false positive this
api can't prevent in a compatibility usecase. All the root ops of a model are present in a
target runtime, but not all the traced ops are which prevents a model from being able to run.
Args:
f_input: a file-like object (has to implement read, readline, tell, and seek),
or a string containing a file name
Returns:
Operators and info: A Dictionary mapping strings (the qualified names of the root operators)
of the model to their OperatorInfo structs.
Example:
.. testcode::
from torch.jit.mobile import _get_model_ops_and_info
# Get bytecode version from a saved file path
ops_and_info = _get_model_ops_and_info("path/to/model.ptl")
"""
if isinstance(f_input, (str, os.PathLike)):
if not os.path.exists(f_input):
raise ValueError(f"The provided filename {f_input} does not exist")
if os.path.isdir(f_input):
raise ValueError(f"The provided filename {f_input} is a directory")
if isinstance(f_input, (str, os.PathLike)):
return torch._C._get_model_ops_and_info(os.fspath(f_input))
else:
# pyrefly: ignore [missing-attribute]
return torch._C._get_model_ops_and_info(f_input.read())
| LiteScriptModule |
python | walkccc__LeetCode | solutions/2861. Maximum Number of Alloys/2861.py | {
"start": 0,
"end": 793
} | class ____:
def maxNumberOfAlloys(self, n: int, k: int, budget: int,
composition: list[list[int]], stock: list[int],
costs: list[int]) -> int:
l = 1
r = 1_000_000_000
def isPossible(m: int) -> bool:
"""Returns True if it's possible to create `m` alloys by using any machine."""
# Try all the possible machines.
for machine in composition:
requiredMoney = 0
for j in range(n):
requiredUnits = max(0, machine[j] * m - stock[j])
requiredMoney += requiredUnits * costs[j]
if requiredMoney <= budget:
return True
return False
while l < r:
m = (l + r) // 2
if isPossible(m):
l = m + 1
else:
r = m
return l - 1
| Solution |
python | patrick-kidger__equinox | equinox/_vmap_pmap.py | {
"start": 15350,
"end": 23761
} | class ____(Module):
_fun: Callable
_in_axes: PyTree[AxisSpec]
_out_axes: PyTree[AxisSpec]
_axis_name: Hashable | None
_axis_size: int | None
_filter_warning: bool
_pmapkwargs: dict[str, Any]
@property
def __wrapped__(self):
return self._fun
def _call(self, is_lower, args, kwargs):
maybe_dummy = _common_preprocess(self._axis_size, kwargs)
del kwargs
in_axes = _named_in_axes(self._fun, self._in_axes, args)
in_axes = _resolve_axes(args, in_axes)
in_axes = (None, in_axes, 0, None)
dynamic, static = partition(
(self._fun, args, maybe_dummy, self._out_axes), is_array
)
struct = jtu.tree_map(lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype), dynamic)
cached = _filter_pmap_cache(
self._fun,
static,
struct,
in_axes,
self._axis_name,
self._axis_size,
self._pmapkwargs,
)
if is_lower:
return Lowered(
cached.lower(dynamic),
(self._fun, self._out_axes, self._axis_size),
_preprocess, # pyright: ignore
_postprocess, # pyright: ignore
)
else:
if self._filter_warning is True:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="Some donated buffers were not usable*"
)
out = cached(dynamic)
else:
out = cached(dynamic)
return _postprocess(out)
def __call__(self, /, *args, **kwargs):
return self._call(False, args, kwargs)
def lower(self, /, *args, **kwargs) -> Lowered:
return self._call(True, args, kwargs)
def __get__(self, instance, owner):
del owner
if instance is None:
return self
return Partial(self, instance)
# Deliberately using `Callable[..., Any]` as `filter_pmap` does change the input and
# out args in ways not expressible in the static type system (changing the number of
# axes).
@overload
def filter_pmap(
*,
in_axes: PyTree[AxisSpec] = if_array(0),
out_axes: PyTree[AxisSpec] = if_array(0),
axis_name: Hashable = None,
axis_size: int | None = None,
donate: Literal["all", "warn", "none"] = "none",
) -> Callable[[Callable[..., Any]], Callable[..., Any]]: ...
@overload
def filter_pmap(
fun: Callable[..., Any],
*,
in_axes: PyTree[AxisSpec] = if_array(0),
out_axes: PyTree[AxisSpec] = if_array(0),
axis_name: Hashable = None,
axis_size: int | None = None,
donate: Literal["all", "warn", "none"] = "none",
) -> Callable[..., Any]: ...
@doc_remove_args("pmapkwargs")
def filter_pmap(
fun=sentinel,
*,
in_axes: PyTree[AxisSpec] = if_array(0),
out_axes: PyTree[AxisSpec] = if_array(0),
axis_name: Hashable = None,
axis_size: int | None = None,
donate: Literal["all", "warn", "none"] = "none",
**pmapkwargs,
):
"""
!!! warning
JAX has now added more powerful parallelism APIs directly to the JIT interface.
As such, using [`equinox.filter_jit`][] with sharded inputs is now recommended
over `filter_pmap`. See also the
[parallelism example](../examples/parallelism.ipynb).
Parallelises a function. By default, all JAX/NumPy arrays are parallelised down
their leading axis (i.e. axis index 0), and all other types are broadcast.
`jax.pmap`, and thus `equinox.filter_pmap`, also compiles their function in the same
way as `jax.jit`. By default, all JAX arrays are traced, and all other arguments are
treated as static inputs.
**Arguments:**
For both `in_axes` and `out_axes`, then `int` indicates an array axis to parallelise
over, `None` indicates that an argument should be broadcast (not parallelise
over), and callables `Leaf -> Union[None, int]` are mapped and evaluated on every
leaf of their subtree. `None` should be used for non-JAX-array arguments.
- `fun` is a pure function to parallelise. Should be of the form `fun(*args)`; that
is to say it cannot accept keyword arguments.
- `in_axes` indicates which axes of the input arrays should be parallelised over.
It should be a PyTree of `None`, `int`, or callables `Leaf -> Union[None, int]`.
Its tree structure should either be:
1. a prefix of the input tuple of `args`.
2. a dictionary, in which case the named arguments use the specified indices
to parallelise over, and all other arguments will have the default
`eqx.if_array(0)`.
- `out_axes` indicates which axis of the output arrays the mapped axis should appear
at. It should be a PyTree of `None`, `int`, or callables
`Leaf -> Union[None, int]`, and its tree structure should be a prefix of the
output `fun(*args)`.
- `axis_name` is an optional hashable Python object used to identify the mapped
axis so that parallel collectives (e.g. `jax.lax.psum`) can be applied.
- `axis_size` is an optional `int` describing the size of the axis mapped. This
only needs to be passed if none of the input arguments are vectorised, as else
it can be deduced by looking at the argument shapes.
- `donate` indicates whether the buffers of JAX arrays are donated or not, it
should either be:
- `'all'`: donate all arrays and suppress all warnings about
unused buffers;
- `'warn'`: as above, but don't suppress unused buffer warnings;
- `'none'`: the default, disables buffer donation.
**Returns:**
The parallelised version of `fun`.
!!! tip
To parallelise all JAX/NumPy arrays down their `j`th axis, and broadcast all
other types, then you can use `equinox.if_array(j)`, which returns a callable
`leaf -> j if is_array(leaf) else None`. For example: the default values of
`in_axes` and `out_axes` are both `equinox.if_array(0)`.
!!! example
```python
import equinox as eqx
import jax.numpy as jnp
@eqx.filter_pmap
def f(x, y):
return x + y
@eqx.filter_pmap(in_axes=(None, 1))
def g(x, y):
return x + y
f(jnp.array([1, 2]), jnp.array([3, 4])) # both args parallelised down axis 0
f(jnp.array([1, 2]), 3) # first arg parallelised down axis 0
# second arg broadcasted (as it's not
# a JAX array)
g(jnp.array(1), jnp.array([[2, 3]])) # first arg broadcasted
# second arg parallelised down axis 1
```
"""
if fun is sentinel:
return ft.partial(
filter_pmap,
in_axes=in_axes,
out_axes=out_axes,
axis_name=axis_name,
axis_size=axis_size,
donate=donate,
**pmapkwargs,
)
deprecated_0_10(pmapkwargs, "default")
deprecated_0_10(pmapkwargs, "fn")
deprecated_0_10(pmapkwargs, "args")
deprecated_0_10(pmapkwargs, "kwargs")
deprecated_0_10(pmapkwargs, "out")
if any(x in pmapkwargs for x in ("static_broadcasted_argnums", "donate_argnums")):
raise ValueError(
"`pmapkwargs` cannot contain either 'static_broadcasted_argnums' or "
"'donate_argnums'"
)
if donate == "arrays":
warnings.warn(
"The `donate='arrays'` option to `filter_pmap` has been renamed to "
"`donate='all'`",
DeprecationWarning,
)
donate = "all"
if donate not in {"all", "warn", "none"}:
raise ValueError(
"`filter_jit(..., donate=...)` must be one of 'all', 'warn', or 'none'"
)
filter_warning = True if donate == "all" else False
if donate != "none":
pmapkwargs["donate_argnums"] = (0,)
pmap_wrapper = _PmapWrapper(
_fun=fun,
_in_axes=in_axes,
_out_axes=out_axes,
_axis_name=axis_name,
_axis_size=axis_size,
_filter_warning=filter_warning,
_pmapkwargs=pmapkwargs,
)
return module_update_wrapper(pmap_wrapper)
| _PmapWrapper |
python | realpython__materials | tic-tac-toe-ai-python/source_code_bonus/tic-tac-toe/library/src/tic_tac_toe/game/players.py | {
"start": 262,
"end": 862
} | class ____(metaclass=abc.ABCMeta):
def __init__(self, mark: Mark) -> None:
self.mark = mark
def make_move(self, game_state: GameState) -> GameState:
if self.mark is game_state.current_mark:
if move := self.get_move(game_state):
return move.after_state
raise InvalidMove("No more possible moves")
else:
raise InvalidMove("It's the other player's turn")
@abc.abstractmethod
def get_move(self, game_state: GameState) -> Move | None:
"""Return the current player's move in the given game state."""
| Player |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-for-k-virus-variants-to-spread.py | {
"start": 2560,
"end": 4248
} | class ____(object):
def minDayskVariants(self, points, k):
"""
:type points: List[List[int]]
:type k: int
:rtype: int
"""
def add_rec(rec, intervals):
x0, y0, x1, y1 = rec
# add [y0, y1] by 1 in [x0, x1+1)
intervals.append([[x0, +1], [y0, y1]])
intervals.append([[x1+1, -1], [y0, y1]])
def check(points, k, l): # Time: O(nlogn), Space: O(n)
intervals = []
y_set = set()
for x, y in points:
add_rec([x-l, y-l, x+l, y+l], intervals)
y_set.add(y-l)
y_set.add(y+l)
intervals.sort()
y_to_idx = {y:i for i, y in enumerate(sorted(y_set))} # coordinate compression
st = SegmentTree(len(y_to_idx))
for [_, v], [y0, y1] in intervals: # line sweep
st.update(y_to_idx[y0], y_to_idx[y1], v)
if st.query(0, len(y_to_idx)-1) >= k:
return True
return False
points = [[x+y, x-y] for x, y in points] # rotate
min_x = min(points)[0]
max_x = max(points)[0]
min_y = min(points, key=lambda x: x[1])[1]
max_y = max(points, key=lambda x: x[1])[1]
left, right = 0, ((max_x-min_x)+(max_y-min_y)+1)//2
while left <= right:
mid = left + (right-left)//2
if check(points, k, mid):
right = mid-1
else:
left = mid+1
return left
# Time: O(n^2 * logr), r is the sum of range x size and range y size
# Space: O(n)
import collections
# interview solution
| Solution |
python | astropy__astropy | astropy/io/fits/header.py | {
"start": 69404,
"end": 72050
} | class ____(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@property
def data_size(self):
"""
Return the size (in bytes) of the data portion following the `Header`.
"""
return _hdr_data_size(self)
@property
def data_size_padded(self):
"""
Return the size (in bytes) of the data portion following the `Header`
including padding.
"""
size = self.data_size
return size + _pad_length(size)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, "rb")
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
| _BasicHeader |
python | dagster-io__dagster | python_modules/libraries/dagster-airflow/dagster_airflow_tests/test_dagster_operator.py | {
"start": 905,
"end": 4089
} | class ____(unittest.TestCase):
@mock.patch("dagster_airflow.hooks.dagster_hook.DagsterHook.launch_run", return_value="run_id")
@mock.patch("dagster_airflow.hooks.dagster_hook.DagsterHook.wait_for_run")
def test_operator(self, launch_run, wait_for_run):
dag = DAG(dag_id="anydag", start_date=datetime.now())
run_config = {"foo": "bar"}
task = DagsterCloudOperator(
dag=dag,
task_id="anytask",
job_name="anyjob",
run_config=run_config,
user_token="token",
organization_id="test-org",
dagster_conn_id=None, # pyright: ignore[reportArgumentType]
)
if airflow_version >= "2.0.0":
dagrun = dag.create_dagrun(
state=DagRunState.RUNNING, # pyright: ignore[reportPossiblyUnboundVariable]
execution_date=datetime.now(),
data_interval=(DATA_INTERVAL_START, DATA_INTERVAL_END),
start_date=DATA_INTERVAL_END,
run_type=DagRunType.MANUAL, # pyright: ignore[reportPossiblyUnboundVariable]
)
ti = dagrun.get_task_instance(task_id="anytask")
assert ti
ti.task = dag.get_task(task_id="anytask")
ti.run(ignore_ti_state=True) # pyright: ignore[reportAttributeAccessIssue]
assert ti.state == TaskInstanceState.SUCCESS # pyright: ignore[reportPossiblyUnboundVariable]
else:
ti = TaskInstance(task=task, execution_date=datetime.now())
ctx = ti.get_template_context()
task.execute(ctx)
launch_run.assert_called_once()
wait_for_run.assert_called_once()
@mock.patch("dagster_airflow.hooks.dagster_hook.DagsterHook.launch_run", return_value="run_id")
@mock.patch("dagster_airflow.hooks.dagster_hook.DagsterHook.wait_for_run")
@mock.patch(
"dagster_airflow.hooks.dagster_hook.DagsterHook.get_connection",
return_value=MOCK_DAGSTER_CONNECTION,
)
@pytest.mark.skipif(airflow_version < "2.0.0", reason="dagster connection requires airflow 2")
def test_operator_with_connection(self, launch_run, wait_for_run, _mock_get_conn):
dag = DAG(dag_id="anydag", start_date=datetime.now())
run_config = {"foo": "bar"}
DagsterCloudOperator(dag=dag, task_id="anytask", job_name="anyjob", run_config=run_config)
dagrun = dag.create_dagrun(
state=DagRunState.RUNNING, # pyright: ignore[reportPossiblyUnboundVariable]
execution_date=datetime.now(),
data_interval=(DATA_INTERVAL_START, DATA_INTERVAL_END),
start_date=DATA_INTERVAL_END,
run_type=DagRunType.MANUAL, # pyright: ignore[reportPossiblyUnboundVariable]
)
ti = dagrun.get_task_instance(task_id="anytask")
assert ti
ti.task = dag.get_task(task_id="anytask")
ti.run(ignore_ti_state=True) # pyright: ignore[reportAttributeAccessIssue]
assert ti.state == TaskInstanceState.SUCCESS # pyright: ignore[reportPossiblyUnboundVariable]
launch_run.assert_called_once()
wait_for_run.assert_called_once()
| TestDagsterOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_responses/oauth_response_builder.py | {
"start": 195,
"end": 791
} | class ____:
@classmethod
def token_response(cls, status_code: int = 200) -> "OAuthResponseBuilder":
return cls("oauth", status_code)
def __init__(self, resource: str, status_code: int = 200) -> None:
self._status_code: int = status_code
self._resource: str = resource
def with_status_code(self, status_code: int) -> "OAuthResponseBuilder":
self._status_code = status_code
return self
def build(self) -> HttpResponse:
return HttpResponse(json.dumps(find_template(self._resource, __file__)), self._status_code)
| OAuthResponseBuilder |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 341,
"end": 611
} | class ____:
def __init__(self, get_response):
self.get_response = get_response
if iscoroutinefunction(self.get_response):
markcoroutinefunction(self)
def __call__(self, request):
return self.get_response(request)
| BaseMiddleware |
python | huggingface__transformers | src/transformers/models/dbrx/modeling_dbrx.py | {
"start": 15433,
"end": 16673
} | class ____(nn.Module):
"""Modular DBRX MLP/FFN component with MoE support."""
def __init__(self, config, **kwargs):
super().__init__()
self.router = DbrxRouter(config.ffn_config)
self.experts = DbrxExperts(config.ffn_config)
self.moe_normalize_expert_weights = config.ffn_config.moe_normalize_expert_weights
self.top_k = config.ffn_config.moe_top_k
def route_tokens_to_experts(self, router_logits):
router_logits = torch.nn.functional.softmax(router_logits, dim=1, dtype=router_logits.dtype)
router_top_value, router_indices = torch.topk(router_logits, self.top_k, dim=-1)
if self.moe_normalize_expert_weights is not None:
router_top_value = router_top_value / torch.norm(
router_top_value, p=self.moe_normalize_expert_weights, dim=-1, keepdim=True
)
return router_top_value, router_indices
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
router_logits = self.router(hidden_states)
top_k_weights, top_k_index = self.route_tokens_to_experts(router_logits)
output = self.experts(hidden_states, top_k_index, top_k_weights)
return output
| DbrxFFN |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/analytics_admin.py | {
"start": 14077,
"end": 17010
} | class ____(GoogleCloudBaseOperator):
"""
Deletes Data stream.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleAnalyticsAdminDeleteDataStreamOperator`
:param property_id: ID of the property which is parent for the data stream.
:param data_stream_id: ID of the data stream to delete.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The timeout for this request.
:param metadata: Optional. Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"gcp_conn_id",
"impersonation_chain",
"property_id",
"data_stream_id",
)
def __init__(
self,
*,
property_id: str,
data_stream_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.property_id = property_id
self.data_stream_id = data_stream_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(
self,
context: Context,
) -> None:
hook = GoogleAnalyticsAdminHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting a Google Analytics data stream (id %s).", self.data_stream_id)
hook.delete_data_stream(
property_id=self.property_id,
data_stream_id=self.data_stream_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("The Google Analytics data stream was deleted successfully.")
return None
| GoogleAnalyticsAdminDeleteDataStreamOperator |
python | kubernetes-client__python | kubernetes/client/api/certificates_api.py | {
"start": 543,
"end": 5197
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/certificates.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| CertificatesApi |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 52896,
"end": 53361
} | class ____(Interface):
def text():
"""
A textual description of the predicate used in the introspector.
For example, ``'content_type = application/json'`` for a
``ContentTypePredicate`` with a ``value == 'application/json'``.
"""
def phash():
"""
A unique string for the predicate containing both the name and value.
Often implementations simply set ``phash = text``.
"""
| IPredicate |
python | dateutil__dateutil | src/dateutil/tz/_factories.py | {
"start": 1654,
"end": 2569
} | class ____(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = weakref.WeakValueDictionary()
cls.__strong_cache = OrderedDict()
cls.__strong_cache_size = 8
cls.__cache_lock = _thread.allocate_lock()
def __call__(cls, s, posix_offset=False):
key = (s, posix_offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(s, posix_offset))
# This lock may not be necessary in Python 3. See GH issue #901
with cls.__cache_lock:
cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
# Remove an item if the strong cache is overpopulated
if len(cls.__strong_cache) > cls.__strong_cache_size:
cls.__strong_cache.popitem(last=False)
return instance
| _TzStrFactory |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Filters.py | {
"start": 734,
"end": 1474
} | class ____(CtrlNode):
"""Bessel filter. Input data must have time values."""
nodeName = 'BesselFilter'
uiTemplate = [
('band', 'combo', {'values': ['lowpass', 'highpass'], 'index': 0}),
('cutoff', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'bounds': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}),
('order', 'intSpin', {'value': 4, 'min': 1, 'max': 16}),
('bidir', 'check', {'checked': True})
]
def processData(self, data):
s = self.stateGroup.state()
if s['band'] == 'lowpass':
mode = 'low'
else:
mode = 'high'
return functions.besselFilter(data, bidir=s['bidir'], btype=mode, cutoff=s['cutoff'], order=s['order'])
| Bessel |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 34202,
"end": 34555
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
members = [('start', types.intp),
('stop', types.intp),
('step', types.intp),
]
super(SliceModel, self).__init__(dmm, fe_type, members)
@register_default(types.NPDatetime)
@register_default(types.NPTimedelta)
| SliceModel |
python | getsentry__sentry | src/sentry/integrations/cursor/models.py | {
"start": 436,
"end": 576
} | class ____(BaseModel):
autoCreatePr: bool
branchName: str
openAsCursorGithubApp: bool | None = None
| CursorAgentLaunchRequestTarget |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 107844,
"end": 118685
} | class ____(Structure):
_fields_ = [('version', c_uint),
('attackerAdvantage', c_ulong),
]
ConfComputeGetKeyRotationThresholdInfo_v1 = 0x1000010
## string/bytes conversion for ease of use
def convertStrBytes(func):
'''
In python 3, strings are unicode instead of bytes, and need to be converted for ctypes
Args from caller: (1, 'string', <__main__.c_nvmlDevice_t at 0xFFFFFFFF>)
Args passed to function: (1, b'string', <__main__.c_nvmlDevice_t at 0xFFFFFFFF)>
----
Returned from function: b'returned string'
Returned to caller: 'returned string'
'''
@wraps(func)
def wrapper(*args, **kwargs):
# encoding a str returns bytes in python 2 and 3
args = [arg.encode() if isinstance(arg, str) else arg for arg in args]
res = func(*args, **kwargs)
# In python 2, str and bytes are the same
# In python 3, str is unicode and should be decoded.
# Ctypes handles most conversions, this only effects c_char and char arrays.
if isinstance(res, bytes):
if isinstance(res, str):
return res
return res.decode()
return res
if sys.version_info >= (3,):
return wrapper
return func
def throwOnVersionMismatch(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except NVMLError_FunctionNotFound:
raise NVMLLibraryMismatchError("Unversioned function called and the "
"pyNVML version does not match the NVML lib version. "
"Either use matching pyNVML and NVML lib versions or "
"use a versioned function such as " + func.__name__ + "_v2")
return wrapper
## C function wrappers ##
def nvmlInitWithFlags(flags):
_LoadNvmlLibrary()
#
# Initialize the library
#
fn = _nvmlGetFunctionPointer("nvmlInitWithFlags")
ret = fn(flags)
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
_nvmlLib_refcount += 1
libLoadLock.release()
return None
def nvmlInit():
nvmlInitWithFlags(0)
return None
def _LoadNvmlLibrary():
'''
Load the library if it isn't loaded already
'''
global nvmlLib
if (nvmlLib == None):
# lock to ensure only one caller loads the library
libLoadLock.acquire()
try:
# ensure the library still isn't loaded
if (nvmlLib == None):
try:
if (sys.platform[:3] == "win"):
# cdecl calling convention
try:
# Check for nvml.dll in System32 first for DCH drivers
nvmlLib = CDLL(os.path.join(os.getenv("WINDIR", "C:/Windows"), "System32/nvml.dll"))
except OSError as ose:
# If nvml.dll is not found in System32, it should be in ProgramFiles
# load nvml.dll from %ProgramFiles%/NVIDIA Corporation/NVSMI/nvml.dll
nvmlLib = CDLL(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/nvml.dll"))
else:
# assume linux
nvmlLib = CDLL("libnvidia-ml.so.1")
except OSError as ose:
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
if (nvmlLib == None):
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
def nvmlShutdown():
#
# Leave the library loaded, but shutdown the interface
#
fn = _nvmlGetFunctionPointer("nvmlShutdown")
ret = fn()
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
if (0 < _nvmlLib_refcount):
_nvmlLib_refcount -= 1
libLoadLock.release()
return None
# Added in 2.285
@convertStrBytes
def nvmlErrorString(result):
fn = _nvmlGetFunctionPointer("nvmlErrorString")
fn.restype = c_char_p # otherwise return is an int
ret = fn(result)
return ret
# Added in 2.285
@convertStrBytes
def nvmlSystemGetNVMLVersion():
c_version = create_string_buffer(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlSystemGetNVMLVersion")
ret = fn(c_version, c_uint(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
def nvmlSystemGetCudaDriverVersion():
c_cuda_version = c_int()
fn = _nvmlGetFunctionPointer("nvmlSystemGetCudaDriverVersion")
ret = fn(byref(c_cuda_version))
_nvmlCheckReturn(ret)
return c_cuda_version.value
def nvmlSystemGetCudaDriverVersion_v2():
c_cuda_version = c_int()
fn = _nvmlGetFunctionPointer("nvmlSystemGetCudaDriverVersion_v2")
ret = fn(byref(c_cuda_version))
_nvmlCheckReturn(ret)
return c_cuda_version.value
# Added in 2.285
@convertStrBytes
def nvmlSystemGetProcessName(pid):
c_name = create_string_buffer(1024)
fn = _nvmlGetFunctionPointer("nvmlSystemGetProcessName")
ret = fn(c_uint(pid), c_name, c_uint(1024))
_nvmlCheckReturn(ret)
return c_name.value
@convertStrBytes
def nvmlSystemGetDriverVersion():
c_version = create_string_buffer(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlSystemGetDriverVersion")
ret = fn(c_version, c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 2.285
def nvmlSystemGetHicVersion():
c_count = c_uint(0)
hics = None
fn = _nvmlGetFunctionPointer("nvmlSystemGetHicVersion")
# get the count
ret = fn(byref(c_count), None)
# this should only fail with insufficient size
if ((ret != NVML_SUCCESS) and
(ret != NVML_ERROR_INSUFFICIENT_SIZE)):
raise NVMLError(ret)
# If there are no hics
if (c_count.value == 0):
return []
hic_array = c_nvmlHwbcEntry_t * c_count.value
hics = hic_array()
ret = fn(byref(c_count), hics)
_nvmlCheckReturn(ret)
return hics
def nvmlSystemGetDriverBranch():
c_branchInfo = c_nvmlSystemDriverBranchInfo_v1_t(0)
c_branchInfo.version = SystemDriverBranchInfo_v1
fn = _nvmlGetFunctionPointer("nvmlSystemGetDriverBranch")
ret = fn(byref(c_branchInfo), c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_branchInfo
## Unit get functions
def nvmlUnitGetCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetCount")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlUnitGetHandleByIndex(index):
c_index = c_uint(index)
unit = c_nvmlUnit_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetHandleByIndex")
ret = fn(c_index, byref(unit))
_nvmlCheckReturn(ret)
return unit
def nvmlUnitGetUnitInfo(unit):
c_info = c_nvmlUnitInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetUnitInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlUnitGetLedState(unit):
c_state = c_nvmlLedState_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetLedState")
ret = fn(unit, byref(c_state))
_nvmlCheckReturn(ret)
return c_state
def nvmlUnitGetPsuInfo(unit):
c_info = c_nvmlPSUInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetPsuInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlUnitGetTemperature(unit, type):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetTemperature")
ret = fn(unit, c_uint(type), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
def nvmlUnitGetFanSpeedInfo(unit):
c_speeds = c_nvmlUnitFanSpeeds_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetFanSpeedInfo")
ret = fn(unit, byref(c_speeds))
_nvmlCheckReturn(ret)
return c_speeds
# added to API
def nvmlUnitGetDeviceCount(unit):
c_count = c_uint(0)
# query the unit to determine device count
fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices")
ret = fn(unit, byref(c_count), None)
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
ret = NVML_SUCCESS
_nvmlCheckReturn(ret)
return c_count.value
def nvmlUnitGetDevices(unit):
c_count = c_uint(nvmlUnitGetDeviceCount(unit))
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices")
ret = fn(unit, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return c_devices
## Device get functions
def nvmlDeviceGetCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCount_v2")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetHandleByIndex(index):
c_index = c_uint(index)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByIndex_v2")
ret = fn(c_index, byref(device))
_nvmlCheckReturn(ret)
return device
# Deprecated
@convertStrBytes
def nvmlDeviceGetHandleBySerial(serial):
c_serial = c_char_p(serial)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleBySerial")
ret = fn(c_serial, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetHandleByUUID(uuid):
c_uuid = c_char_p(uuid)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByUUID")
ret = fn(c_uuid, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetHandleByUUIDV(uuid, type):
c_uuid = c_nvmlUUID_t()
c_uuid.type = type
if type == NVML_UUID_TYPE_ASCII:
c_uuid.value.str = uuid
elif type == NVML_UUID_TYPE_BINARY:
memmove(c_uuid.value.bytes, uuid, NVML_DEVICE_UUID_BINARY_LEN)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByUUIDV")
ret = fn(byref(c_uuid), byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetHandleByPciBusId(pciBusId):
c_busId = c_char_p(pciBusId)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByPciBusId_v2")
ret = fn(c_busId, byref(device))
_nvmlCheckReturn(ret)
return device
@convertStrBytes
def nvmlDeviceGetName(handle):
c_name = create_string_buffer(NVML_DEVICE_NAME_V2_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetName")
ret = fn(handle, c_name, c_uint(NVML_DEVICE_NAME_V2_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_name.value
| c_nvmlConfComputeGetKeyRotationThresholdInfo_t |
python | pypa__pipenv | pipenv/patched/pip/_internal/commands/lock.py | {
"start": 891,
"end": 6091
} | class ____(RequirementCommand):
"""
EXPERIMENTAL - Lock packages and their dependencies from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports locking from "requirements files", which provide an easy
way to specify a whole environment to be installed.
The generated lock file is only guaranteed to be valid for the current
python version and platform.
"""
usage = """
%prog [options] [-e] <local project path> ...
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] <archive url/path> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(
cmdoptions.PipOption(
"--output",
"-o",
dest="output_file",
metavar="path",
type="path",
default="pylock.toml",
help="Lock file name (default=pylock.toml). Use - for stdout.",
)
)
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.no_use_pep517())
self.cmd_opts.add_option(cmdoptions.check_build_deps())
self.cmd_opts.add_option(cmdoptions.config_settings())
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.require_hashes())
self.cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options: Values, args: List[str]) -> int:
logger.verbose("Using %s", get_pip_version())
logger.warning(
"pip lock is currently an experimental command. "
"It may be removed/changed in a future release "
"without prior warning."
)
session = self.get_default_session(options)
finder = self._build_package_finder(
options=options,
session=session,
ignore_requires_python=options.ignore_requires_python,
)
build_tracker = self.enter_context(get_build_tracker())
directory = TempDirectory(
delete=not options.no_clean,
kind="install",
globally_managed=True,
)
reqs = self.get_requirements(args, options, finder, session)
check_legacy_setup_py_options(options, reqs)
wheel_cache = WheelCache(options.cache_dir)
# Only when installing is it permitted to use PEP 660.
# In other circumstances (pip wheel, pip download) we generate
# regular (i.e. non editable) metadata and wheels.
for req in reqs:
req.permit_editable_wheels = True
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
build_tracker=build_tracker,
session=session,
finder=finder,
use_user_site=False,
verbosity=self.verbosity,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=False,
ignore_installed=True,
ignore_requires_python=options.ignore_requires_python,
upgrade_strategy="to-satisfy-only",
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
if options.output_file == "-":
base_dir = Path.cwd()
else:
output_file_path = Path(options.output_file)
if not is_valid_pylock_file_name(output_file_path):
logger.warning(
"%s is not a valid lock file name.",
output_file_path,
)
base_dir = output_file_path.parent
pylock_toml = Pylock.from_install_requirements(
requirement_set.requirements.values(), base_dir=base_dir
).as_toml()
if options.output_file == "-":
sys.stdout.write(pylock_toml)
else:
output_file_path.write_text(pylock_toml, encoding="utf-8")
return SUCCESS
| LockCommand |
python | walkccc__LeetCode | solutions/3428. Maximum and Minimum Sums of at Most Size K Subsequences/3428.py | {
"start": 0,
"end": 1466
} | class ____:
def minMaxSums(self, nums: list[int], k: int) -> int:
# In a sorted array, nums[i] will be
# 1. The maximum for subsequences formed by nums[0..i].
# 2. The minimum for subsequences formed by nums[i..n - 1].
#
# The number of times nums[i] is the maximum is the same as the number of
# times nums[n - 1 - i] is the minimum, due to the symmetry in subsequences
# derived from the sorted order.
#
# To calculate the contribution of nums[i], we need to find the number of
# ways to select at most (k - 1) elements from the range of indices where
# nums[i] is the smallest or nums[n - 1 - i] is the largest.
MOD = 1_000_000_007
n = len(nums)
def getComb(n: int, k: int) -> list[list[int]]:
"""C(n, k) = C(n - 1, k) + C(n - 1, k - 1)"""
comb = [[0] * (k + 1) for _ in range(n + 1)]
for i in range(n + 1):
comb[i][0] = 1
for i in range(1, n + 1):
for j in range(1, k + 1):
comb[i][j] = (comb[i - 1][j] + comb[i - 1][j - 1]) % MOD
return comb
comb = getComb(n, k - 1)
ans = 0
nums.sort()
# i: available numbers from the left of nums[i] or
# available numbers from the right of nums[-1 - i]
for i in range(n):
count = 0
for j in range(k): # selected numbers
count = (count + comb[i][j]) % MOD
ans += nums[i] * count
ans += nums[-1 - i] * count
ans %= MOD
return ans
| Solution |
python | pytorch__pytorch | torch/_inductor/utils.py | {
"start": 88623,
"end": 96679
} | class ____(enum.Enum):
# The placeholder for the actual name of a triton kernel.
# e.g. for "def triton_" it would be "triton_"
KERNEL_NAME = "KERNEL_NAME"
# The descriptive name of the triton kernel; when unique_kernel_names = False, this
# placeholder will be replaced with a string with more information.
DESCRIPTIVE_NAME = "DESCRIPTIVE_NAME"
def pass_execution_and_save(
func: Callable[..., Any], gm: GraphModule, inp: Sequence[Any], msg: str
) -> None:
from .pattern_matcher import stable_topological_sort
with tempfile.NamedTemporaryFile(
mode="w",
encoding="utf-8",
) as f:
before_io = io.StringIO()
after_io = io.StringIO()
ShapeProp(gm=gm, fake_mode=detect_fake_mode(inp)).propagate(*inp)
print(f"Before:\n{gm.graph}", file=f)
print(gm.graph, file=before_io)
start_time = datetime.now()
with GraphTransformObserver(gm, msg):
func(gm.graph)
time_elapsed = datetime.now() - start_time
# recompile graph
stable_topological_sort(gm.graph)
gm.graph.lint()
gm.recompile()
print(f"After:\n{gm.graph}", file=f)
print(gm.graph, file=after_io)
t = before_io.getvalue() == after_io.getvalue()
log.info(
"%s, save before/after graph to %s, graph before/after are the same = %s, time elapsed = %s",
msg,
f.name,
t,
time_elapsed,
)
def is_multi_outputs_template(input_buf: Optional[Union[Buffer, Operation]]) -> bool:
"""
Check if input buffer is a multi-outputs template buffer
"""
from . import ir
return isinstance(input_buf, ir.CppTemplateBuffer) and isinstance(
input_buf.layout, ir.MultiOutputLayout
)
def is_output_of_multi_outputs_template(
input_buf: Optional[Union[Buffer, Operation]],
) -> bool:
"""
Check if input buffer is a output of multi-outputs template buffer
"""
from . import ir
return (
isinstance(input_buf, ir.MultiOutput)
and len(input_buf.inputs) == 1
and is_multi_outputs_template(input_buf.inputs[0]) # type: ignore[arg-type]
)
def is_collective(
node: Optional[Union[Node, Operation]],
op: Optional[torch._ops.OperatorBase] = None,
) -> bool:
if node is None:
return False
from . import ir
return (
isinstance(node, ir._CollectiveKernel)
and not isinstance(node, ir._WaitKernel)
and (op is None or node.op_overload is op)
) or (
# TODO: this is a temporary solution to ensure that we can identify torchrec's
# communication ops. But in order to allow better communication and computation
# overlap, torchrec's communication ops should be not used.
type(node) is ir.FallbackKernel
and (
# NOTE: the `hasattr()` check is to bypass errors such as the following:
# AttributeError: '_OpNamespace' 'torchrec' object has no attribute 'all_to_all_single'
(
hasattr(torch.ops.torchrec, "all_to_all_single")
and node.op_overload == torch.ops.torchrec.all_to_all_single.default
)
or (
hasattr(torch.ops.torchrec, "all_gather_into_tensor")
and node.op_overload
== torch.ops.torchrec.all_gather_into_tensor.default
)
or (
hasattr(torch.ops.torchrec, "reduce_scatter_tensor")
and node.op_overload == torch.ops.torchrec.reduce_scatter_tensor.default
)
)
)
def is_wait(node: Optional[Union[IRNode, Operation]]) -> bool:
from . import ir
return type(node) is ir._WaitKernel
def contains_collective(
snode: BaseSchedulerNode,
filter_fn: Optional[Callable[[BaseSchedulerNode], bool]] = None,
) -> bool:
from torch._inductor.scheduler import GroupedSchedulerNode
if isinstance(snode, GroupedSchedulerNode):
return any(contains_collective(x) for x in snode.snodes)
return is_collective(snode.node) and (filter_fn is None or filter_fn(snode))
def contains_wait(snode: BaseSchedulerNode) -> bool:
from torch._inductor.scheduler import GroupedSchedulerNode
if isinstance(snode, GroupedSchedulerNode):
return any(contains_wait(x) for x in snode.snodes)
else:
return is_wait(snode.node)
def is_fallback_op(
node: Optional[Operation],
op: Union[torch._ops.OpOverload, Collection[torch._ops.OpOverload]],
) -> bool:
from . import ir
if isinstance(op, torch._ops.OpOverload):
op = [op]
return isinstance(node, ir.FallbackKernel) and node.op_overload in op
def buf_name_to_fused_snode(
buf_name: str, name_to_buf: dict[str, Any], name_to_fused_node: dict[str, Any]
) -> Any:
return name_to_fused_node[name_to_buf[buf_name].defining_op.get_name()]
def find_recursive_deps_of_node(
snode: BaseSchedulerNode,
collected_node_set: MutableSet[BaseSchedulerNode],
name_to_buf: dict[str, SchedulerBuffer],
name_to_fused_node: dict[str, BaseSchedulerNode],
criteria_cb: Callable[[Any], bool] = lambda snode: False,
) -> None:
if criteria_cb(snode):
return
collected_node_set.add(snode)
for dep in snode.unmet_dependencies:
defining_op_for_dep = buf_name_to_fused_snode(
dep.name, name_to_buf, name_to_fused_node
)
if defining_op_for_dep in collected_node_set:
continue
find_recursive_deps_of_node(
defining_op_for_dep,
collected_node_set,
name_to_buf,
name_to_fused_node,
criteria_cb=criteria_cb,
)
def find_recursive_users_of_node(
snode: BaseSchedulerNode,
collected_node_set: MutableSet[BaseSchedulerNode],
name_to_buf: dict[str, SchedulerBuffer],
name_to_fused_node: dict[str, BaseSchedulerNode],
criteria_cb: Callable[[Any], bool] = lambda snode: False,
) -> None:
if criteria_cb(snode):
return
collected_node_set.add(snode)
for o in snode.get_outputs():
for user in o.users:
assert user.node is not None
if user.node.get_name() == "OUTPUT":
continue
if user.node.get_name() not in name_to_fused_node:
continue
user_op = name_to_fused_node[user.node.get_name()]
if user_op in collected_node_set:
continue
find_recursive_users_of_node(
user_op,
collected_node_set,
name_to_buf,
name_to_fused_node,
criteria_cb=criteria_cb,
)
def num_fw_fixed_arguments(dynamo_gm_num_inputs: int, aot_fw_gm_num_inputs: int) -> int:
"Computes the number of inputs to the aot fw graph which have fixed addresses (params and buffers)"
num_rng_seed_offset_inputs = (
2 if torch._functorch.config.functionalize_rng_ops else 0
)
# AOT won't lift any parameters if we're inlining NN Modules
# however desugaring subclasses will still add arguments
# resulted in extra fixed inputs https://github.com/pytorch/pytorch/issues/130502
return aot_fw_gm_num_inputs - dynamo_gm_num_inputs - num_rng_seed_offset_inputs
def count_tangents(fx_g: torch.fx.GraphModule) -> int:
"""
Infers which inputs are static for a backwards graph
"""
def is_saved_tensor(x: Node) -> bool:
return (
"tangents" not in x.name
and "bwd_seed" not in x.name
and "bwd_base_offset" not in x.name
and "bwd_rng_state" not in x.name
)
arg_count = 0
static_arg_idxs = []
for n in fx_g.graph.nodes:
if n.op == "placeholder":
if is_saved_tensor(n):
static_arg_idxs.append(arg_count)
arg_count += 1
assert static_arg_idxs == list(range(len(static_arg_idxs)))
return len(static_arg_idxs)
@dataclasses.dataclass
| Placeholder |
python | Netflix__metaflow | metaflow/plugins/events_decorator.py | {
"start": 396,
"end": 11145
} | class ____(FlowDecorator):
"""
Specifies the event(s) that this flow depends on.
```
@trigger(event='foo')
```
or
```
@trigger(events=['foo', 'bar'])
```
Additionally, you can specify the parameter mappings
to map event payload to Metaflow parameters for the flow.
```
@trigger(event={'name':'foo', 'parameters':{'flow_param': 'event_field'}})
```
or
```
@trigger(events=[{'name':'foo', 'parameters':{'flow_param_1': 'event_field_1'},
{'name':'bar', 'parameters':{'flow_param_2': 'event_field_2'}])
```
'parameters' can also be a list of strings and tuples like so:
```
@trigger(event={'name':'foo', 'parameters':['common_name', ('flow_param', 'event_field')]})
```
This is equivalent to:
```
@trigger(event={'name':'foo', 'parameters':{'common_name': 'common_name', 'flow_param': 'event_field'}})
```
Parameters
----------
event : Union[str, Dict[str, Any]], optional, default None
Event dependency for this flow.
events : List[Union[str, Dict[str, Any]]], default []
Events dependency for this flow.
options : Dict[str, Any], default {}
Backend-specific configuration for tuning eventing behavior.
MF Add To Current
-----------------
trigger -> metaflow.events.Trigger
Returns `Trigger` if the current run is triggered by an event
@@ Returns
-------
Trigger
`Trigger` if triggered by an event
"""
name = "trigger"
defaults = {
"event": None,
"events": [],
"options": {},
}
def process_event(self, event):
"""
Process a single event and return a dictionary if static trigger and a function
if deploy-time trigger.
Parameters
----------
event : Union[str, Dict[str, Any], Callable]
Event to process
Returns
-------
Union[Dict[str, Union[str, Callable]], Callable]
Processed event
Raises
------
MetaflowException
If the event is not in the correct format
"""
if is_stringish(event):
return {"name": str(event)}
elif isinstance(event, dict):
if "name" not in event:
raise MetaflowException(
"The *event* attribute for *@trigger* is missing the *name* key."
)
if callable(event["name"]) and not isinstance(
event["name"], DeployTimeField
):
event["name"] = DeployTimeField(
"event_name",
str,
None,
event["name"],
False,
print_representation=str(event["name"]),
)
event["parameters"] = self.process_parameters(
event.get("parameters", {}), event["name"]
)
return event
elif callable(event) and not isinstance(event, DeployTimeField):
return DeployTimeField(
"event",
[str, dict],
None,
event,
False,
print_representation=str(event),
)
else:
raise MetaflowException(
"Incorrect format for *event* attribute in *@trigger* decorator. "
"Supported formats are string and dictionary - \n"
"@trigger(event='foo') or @trigger(event={'name': 'foo', "
"'parameters': {'alpha': 'beta'}})"
)
def process_parameters(self, parameters, event_name):
"""
Process the parameters for an event and return a dictionary of parameter mappings if
parameters was statically defined or a function if deploy-time trigger.
Parameters
----------
Parameters : Union[Dict[str, str], List[Union[str, Tuple[str, str]]], Callable]
Parameters to process
event_name : Union[str, callable]
Name of the event
Returns
-------
Union[Dict[str, str], Callable]
Processed parameters
Raises
------
MetaflowException
If the parameters are not in the correct format
"""
new_param_values = {}
if isinstance(parameters, list):
for mapping in parameters:
if is_stringish(mapping):
# param_name
new_param_values[mapping] = mapping
elif isinstance(mapping, tuple) and len(mapping) == 2:
# (param_name, field_name)
param_name, field_name = mapping
if not is_stringish(param_name) or not is_stringish(field_name):
raise MetaflowException(
f"The *parameters* attribute for event {event_name} is invalid. "
"It should be a list/tuple of strings and lists/tuples of size 2."
)
new_param_values[param_name] = field_name
else:
raise MetaflowException(
"The *parameters* attribute for event is invalid. "
"It should be a list/tuple of strings and lists/tuples of size 2"
)
elif isinstance(parameters, dict):
for key, value in parameters.items():
if not is_stringish(key) or not is_stringish(value):
raise MetaflowException(
f"The *parameters* attribute for event {event_name} is invalid. "
"It should be a dictionary of string keys and string values."
)
new_param_values[key] = value
elif callable(parameters) and not isinstance(parameters, DeployTimeField):
# func
return DeployTimeField(
"parameters",
[list, dict, tuple],
None,
parameters,
False,
print_representation=str(parameters),
)
return new_param_values
def flow_init(
self,
flow_name,
graph,
environment,
flow_datastore,
metadata,
logger,
echo,
options,
):
self.triggers = []
if sum(map(bool, (self.attributes["event"], self.attributes["events"]))) > 1:
raise MetaflowException(
"Specify only one of *event* or *events* "
"attributes in *@trigger* decorator."
)
elif self.attributes["event"]:
event = self.attributes["event"]
processed_event = self.process_event(event)
self.triggers.append(processed_event)
elif self.attributes["events"]:
# events attribute supports the following formats -
# 1. events=[{'name': 'table.prod_db.members',
# 'parameters': {'alpha': 'member_weight'}},
# {'name': 'table.prod_db.metadata',
# 'parameters': {'beta': 'grade'}}]
if isinstance(self.attributes["events"], list):
# process every event in events
for event in self.attributes["events"]:
processed_event = self.process_event(event)
self.triggers.append(processed_event)
elif callable(self.attributes["events"]) and not isinstance(
self.attributes["events"], DeployTimeField
):
trig = DeployTimeField(
"events",
list,
None,
self.attributes["events"],
False,
print_representation=str(self.attributes["events"]),
)
self.triggers.append(trig)
else:
raise MetaflowException(
"Incorrect format for *events* attribute in *@trigger* decorator. "
"Supported format is list - \n"
"@trigger(events=[{'name': 'foo', 'parameters': {'alpha': "
"'beta'}}, {'name': 'bar', 'parameters': "
"{'gamma': 'kappa'}}])"
)
if not self.triggers:
raise MetaflowException("No event(s) specified in *@trigger* decorator.")
# same event shouldn't occur more than once
names = [
x["name"]
for x in self.triggers
if not isinstance(x, DeployTimeField)
and not isinstance(x["name"], DeployTimeField)
]
if len(names) != len(set(names)):
raise MetaflowException(
"Duplicate event names defined in *@trigger* decorator."
)
self.options = self.attributes["options"]
# TODO: Handle scenario for local testing using --trigger.
def format_deploytime_value(self):
new_triggers = []
# First pass to evaluate DeployTimeFields
for trigger in self.triggers:
# Case where trigger is a function that returns a list of events
# Need to do this bc we need to iterate over list later
if isinstance(trigger, DeployTimeField):
evaluated_trigger = deploy_time_eval(trigger)
if isinstance(evaluated_trigger, list):
for event in evaluated_trigger:
new_triggers.append(self.process_event(event))
else:
new_triggers.append(self.process_event(evaluated_trigger))
else:
new_triggers.append(trigger)
# Second pass to evaluate names
for trigger in new_triggers:
name = trigger.get("name")
if isinstance(name, DeployTimeField):
trigger["name"] = deploy_time_eval(name)
if not is_stringish(trigger["name"]):
raise MetaflowException(
f"The *name* attribute for event {trigger} is not a valid string"
)
# third pass to evaluate parameters
for trigger in new_triggers:
parameters = trigger.get("parameters", {})
if isinstance(parameters, DeployTimeField):
parameters_eval = deploy_time_eval(parameters)
parameters = self.process_parameters(parameters_eval, trigger["name"])
trigger["parameters"] = parameters
self.triggers = new_triggers
| TriggerDecorator |
python | matplotlib__matplotlib | lib/matplotlib/projections/polar.py | {
"start": 10302,
"end": 14131
} | class ____(maxis.XTick):
"""
A theta-axis tick.
This subclass of `.XTick` provides angular ticks with some small
modification to their re-positioning such that ticks are rotated based on
tick location. This results in ticks that are correctly perpendicular to
the arc spine.
When 'auto' rotation is enabled, labels are also rotated to be parallel to
the spine. The label padding is also applied here since it's not possible
to use a generic axes transform to produce tick-specific padding.
"""
def __init__(self, axes, *args, **kwargs):
self._text1_translate = mtransforms.ScaledTranslation(
0, 0, axes.get_figure(root=False).dpi_scale_trans)
self._text2_translate = mtransforms.ScaledTranslation(
0, 0, axes.get_figure(root=False).dpi_scale_trans)
super().__init__(axes, *args, **kwargs)
self.label1.set(
rotation_mode='anchor',
transform=self.label1.get_transform() + self._text1_translate)
self.label2.set(
rotation_mode='anchor',
transform=self.label2.get_transform() + self._text2_translate)
def _apply_params(self, **kwargs):
super()._apply_params(**kwargs)
# Ensure transform is correct; sometimes this gets reset.
trans = self.label1.get_transform()
if not trans.contains_branch(self._text1_translate):
self.label1.set_transform(trans + self._text1_translate)
trans = self.label2.get_transform()
if not trans.contains_branch(self._text2_translate):
self.label2.set_transform(trans + self._text2_translate)
def _update_padding(self, pad, angle):
padx = pad * np.cos(angle) / 72
pady = pad * np.sin(angle) / 72
self._text1_translate._t = (padx, pady)
self._text1_translate.invalidate()
self._text2_translate._t = (-padx, -pady)
self._text2_translate.invalidate()
def update_position(self, loc):
super().update_position(loc)
axes = self.axes
angle = loc * axes.get_theta_direction() + axes.get_theta_offset()
text_angle = np.rad2deg(angle) % 360 - 90
angle -= np.pi / 2
marker = self.tick1line.get_marker()
if marker in (mmarkers.TICKUP, '|'):
trans = mtransforms.Affine2D().scale(1, 1).rotate(angle)
elif marker == mmarkers.TICKDOWN:
trans = mtransforms.Affine2D().scale(1, -1).rotate(angle)
else:
# Don't modify custom tick line markers.
trans = self.tick1line._marker._transform
self.tick1line._marker._transform = trans
marker = self.tick2line.get_marker()
if marker in (mmarkers.TICKUP, '|'):
trans = mtransforms.Affine2D().scale(1, 1).rotate(angle)
elif marker == mmarkers.TICKDOWN:
trans = mtransforms.Affine2D().scale(1, -1).rotate(angle)
else:
# Don't modify custom tick line markers.
trans = self.tick2line._marker._transform
self.tick2line._marker._transform = trans
mode, user_angle = self._labelrotation
if mode == 'default':
text_angle = user_angle
else:
if text_angle > 90:
text_angle -= 180
elif text_angle < -90:
text_angle += 180
text_angle += user_angle
self.label1.set_rotation(text_angle)
self.label2.set_rotation(text_angle)
# This extra padding helps preserve the look from previous releases but
# is also needed because labels are anchored to their center.
pad = self._pad + 7
self._update_padding(pad,
self._loc * axes.get_theta_direction() +
axes.get_theta_offset())
| ThetaTick |
python | pydata__xarray | xarray/tests/test_merge.py | {
"start": 408,
"end": 994
} | class ____:
def test_broadcast_dimension_size(self):
actual = merge.broadcast_dimension_size(
[xr.Variable("x", [1]), xr.Variable("y", [2, 1])]
)
assert actual == {"x": 1, "y": 2}
actual = merge.broadcast_dimension_size(
[xr.Variable(("x", "y"), [[1, 2]]), xr.Variable("y", [2, 1])]
)
assert actual == {"x": 1, "y": 2}
with pytest.raises(ValueError):
merge.broadcast_dimension_size(
[xr.Variable(("x", "y"), [[1, 2]]), xr.Variable("y", [2])]
)
| TestMergeInternals |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py | {
"start": 31989,
"end": 33116
} | class ____(TypeDefinition):
__slots__ = ('loc', 'name', 'fields', 'directives',)
_fields = ('name', 'fields',)
def __init__(self, name, fields, loc=None, directives=None):
self.loc = loc
self.name = name
self.fields = fields
self.directives = directives
def __eq__(self, other):
return (
self is other or (
isinstance(other, InputObjectTypeDefinition) and
# self.loc == other.loc and
self.name == other.name and
self.fields == other.fields and
self.directives == other.directives
)
)
def __repr__(self):
return ('InputObjectTypeDefinition('
'name={self.name!r}'
', fields={self.fields!r}'
', directives={self.directives!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.name,
self.fields,
self.loc,
self.directives,
)
def __hash__(self):
return id(self)
| InputObjectTypeDefinition |
python | django-import-export__django-import-export | import_export/instance_loaders.py | {
"start": 956,
"end": 2089
} | class ____(ModelInstanceLoader):
"""
Loads all possible model instances in dataset avoid hitting database for
every ``get_instance`` call.
This instance loader work only when there is one ``import_id_fields``
field.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pk_field_name = self.resource.get_import_id_fields()[0]
self.pk_field = self.resource.fields[pk_field_name]
# If the pk field is missing, all instances in dataset are new
# and cache is empty.
self.all_instances = {}
if self.dataset.dict and self.pk_field.column_name in self.dataset.dict[0]:
ids = [self.pk_field.clean(row) for row in self.dataset.dict]
qs = self.get_queryset().filter(**{"%s__in" % self.pk_field.attribute: ids})
self.all_instances = {
self.pk_field.get_value(instance): instance for instance in qs
}
def get_instance(self, row):
if self.all_instances:
return self.all_instances.get(self.pk_field.clean(row))
return None
| CachedInstanceLoader |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.